Bug Summary

File:llvm/lib/Target/PowerPC/PPCISelLowering.cpp
Warning:line 9657, column 31
1st function call argument is an uninitialized value

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name PPCISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -fhalf-no-semantic-interposition -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/PowerPC -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/PowerPC -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/build-llvm/lib/Target/PowerPC -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-04-14-063029-18377-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp

1//===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the PPCISelLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "PPCISelLowering.h"
14#include "MCTargetDesc/PPCPredicates.h"
15#include "PPC.h"
16#include "PPCCCState.h"
17#include "PPCCallingConv.h"
18#include "PPCFrameLowering.h"
19#include "PPCInstrInfo.h"
20#include "PPCMachineFunctionInfo.h"
21#include "PPCPerfectShuffle.h"
22#include "PPCRegisterInfo.h"
23#include "PPCSubtarget.h"
24#include "PPCTargetMachine.h"
25#include "llvm/ADT/APFloat.h"
26#include "llvm/ADT/APInt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/DenseMap.h"
29#include "llvm/ADT/None.h"
30#include "llvm/ADT/STLExtras.h"
31#include "llvm/ADT/SmallPtrSet.h"
32#include "llvm/ADT/SmallSet.h"
33#include "llvm/ADT/SmallVector.h"
34#include "llvm/ADT/Statistic.h"
35#include "llvm/ADT/StringRef.h"
36#include "llvm/ADT/StringSwitch.h"
37#include "llvm/CodeGen/CallingConvLower.h"
38#include "llvm/CodeGen/ISDOpcodes.h"
39#include "llvm/CodeGen/MachineBasicBlock.h"
40#include "llvm/CodeGen/MachineFrameInfo.h"
41#include "llvm/CodeGen/MachineFunction.h"
42#include "llvm/CodeGen/MachineInstr.h"
43#include "llvm/CodeGen/MachineInstrBuilder.h"
44#include "llvm/CodeGen/MachineJumpTableInfo.h"
45#include "llvm/CodeGen/MachineLoopInfo.h"
46#include "llvm/CodeGen/MachineMemOperand.h"
47#include "llvm/CodeGen/MachineModuleInfo.h"
48#include "llvm/CodeGen/MachineOperand.h"
49#include "llvm/CodeGen/MachineRegisterInfo.h"
50#include "llvm/CodeGen/RuntimeLibcalls.h"
51#include "llvm/CodeGen/SelectionDAG.h"
52#include "llvm/CodeGen/SelectionDAGNodes.h"
53#include "llvm/CodeGen/TargetInstrInfo.h"
54#include "llvm/CodeGen/TargetLowering.h"
55#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56#include "llvm/CodeGen/TargetRegisterInfo.h"
57#include "llvm/CodeGen/ValueTypes.h"
58#include "llvm/IR/CallingConv.h"
59#include "llvm/IR/Constant.h"
60#include "llvm/IR/Constants.h"
61#include "llvm/IR/DataLayout.h"
62#include "llvm/IR/DebugLoc.h"
63#include "llvm/IR/DerivedTypes.h"
64#include "llvm/IR/Function.h"
65#include "llvm/IR/GlobalValue.h"
66#include "llvm/IR/IRBuilder.h"
67#include "llvm/IR/Instructions.h"
68#include "llvm/IR/Intrinsics.h"
69#include "llvm/IR/IntrinsicsPowerPC.h"
70#include "llvm/IR/Module.h"
71#include "llvm/IR/Type.h"
72#include "llvm/IR/Use.h"
73#include "llvm/IR/Value.h"
74#include "llvm/MC/MCContext.h"
75#include "llvm/MC/MCExpr.h"
76#include "llvm/MC/MCRegisterInfo.h"
77#include "llvm/MC/MCSectionXCOFF.h"
78#include "llvm/MC/MCSymbolXCOFF.h"
79#include "llvm/Support/AtomicOrdering.h"
80#include "llvm/Support/BranchProbability.h"
81#include "llvm/Support/Casting.h"
82#include "llvm/Support/CodeGen.h"
83#include "llvm/Support/CommandLine.h"
84#include "llvm/Support/Compiler.h"
85#include "llvm/Support/Debug.h"
86#include "llvm/Support/ErrorHandling.h"
87#include "llvm/Support/Format.h"
88#include "llvm/Support/KnownBits.h"
89#include "llvm/Support/MachineValueType.h"
90#include "llvm/Support/MathExtras.h"
91#include "llvm/Support/raw_ostream.h"
92#include "llvm/Target/TargetMachine.h"
93#include "llvm/Target/TargetOptions.h"
94#include <algorithm>
95#include <cassert>
96#include <cstdint>
97#include <iterator>
98#include <list>
99#include <utility>
100#include <vector>
101
102using namespace llvm;
103
104#define DEBUG_TYPE"ppc-lowering" "ppc-lowering"
105
106static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108
109static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111
112static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114
115static cl::opt<bool> DisableSCO("disable-ppc-sco",
116cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117
118static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120
121static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123
124// TODO - Remove this option if soft fp128 has been fully supported .
125static cl::opt<bool>
126 EnableSoftFP128("enable-soft-fp128",
127 cl::desc("temp option to enable soft fp128"), cl::Hidden);
128
129STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"ppc-lowering", "NumTailCalls"
, "Number of tail calls"}
;
130STATISTIC(NumSiblingCalls, "Number of sibling calls")static llvm::Statistic NumSiblingCalls = {"ppc-lowering", "NumSiblingCalls"
, "Number of sibling calls"}
;
131STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM")static llvm::Statistic ShufflesHandledWithVPERM = {"ppc-lowering"
, "ShufflesHandledWithVPERM", "Number of shuffles lowered to a VPERM"
}
;
132STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed")static llvm::Statistic NumDynamicAllocaProbed = {"ppc-lowering"
, "NumDynamicAllocaProbed", "Number of dynamic stack allocation probed"
}
;
133
134static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
135
136static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
137
138// FIXME: Remove this once the bug has been fixed!
139extern cl::opt<bool> ANDIGlueBug;
140
141PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
142 const PPCSubtarget &STI)
143 : TargetLowering(TM), Subtarget(STI) {
144 // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
145 // arguments are at least 4/8 bytes aligned.
146 bool isPPC64 = Subtarget.isPPC64();
147 setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
148
149 // Set up the register classes.
150 addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
151 if (!useSoftFloat()) {
152 if (hasSPE()) {
153 addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
154 // EFPU2 APU only supports f32
155 if (!Subtarget.hasEFPU2())
156 addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
157 } else {
158 addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
159 addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
160 }
161 }
162
163 // Match BITREVERSE to customized fast code sequence in the td file.
164 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
165 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
166
167 // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
168 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
169
170 // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
171 for (MVT VT : MVT::integer_valuetypes()) {
172 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
173 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
174 }
175
176 if (Subtarget.isISA3_0()) {
177 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
178 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
179 setTruncStoreAction(MVT::f64, MVT::f16, Legal);
180 setTruncStoreAction(MVT::f32, MVT::f16, Legal);
181 } else {
182 // No extending loads from f16 or HW conversions back and forth.
183 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
184 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
185 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
186 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
187 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
188 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
189 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
190 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
191 }
192
193 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
194
195 // PowerPC has pre-inc load and store's.
196 setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
197 setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
198 setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
199 setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
200 setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
201 setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
202 setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
203 setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
204 setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
205 setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
206 if (!Subtarget.hasSPE()) {
207 setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
208 setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
209 setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
210 setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
211 }
212
213 // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
214 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
215 for (MVT VT : ScalarIntVTs) {
216 setOperationAction(ISD::ADDC, VT, Legal);
217 setOperationAction(ISD::ADDE, VT, Legal);
218 setOperationAction(ISD::SUBC, VT, Legal);
219 setOperationAction(ISD::SUBE, VT, Legal);
220 }
221
222 if (Subtarget.useCRBits()) {
223 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
224
225 if (isPPC64 || Subtarget.hasFPCVT()) {
226 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
227 AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
228 isPPC64 ? MVT::i64 : MVT::i32);
229 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
230 AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
231 isPPC64 ? MVT::i64 : MVT::i32);
232
233 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
234 AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
235 isPPC64 ? MVT::i64 : MVT::i32);
236 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
237 AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
238 isPPC64 ? MVT::i64 : MVT::i32);
239
240 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote);
241 AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1,
242 isPPC64 ? MVT::i64 : MVT::i32);
243 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote);
244 AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1,
245 isPPC64 ? MVT::i64 : MVT::i32);
246
247 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
248 AddPromotedToType(ISD::FP_TO_SINT, MVT::i1,
249 isPPC64 ? MVT::i64 : MVT::i32);
250 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
251 AddPromotedToType(ISD::FP_TO_UINT, MVT::i1,
252 isPPC64 ? MVT::i64 : MVT::i32);
253 } else {
254 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
255 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
256 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
257 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
258 }
259
260 // PowerPC does not support direct load/store of condition registers.
261 setOperationAction(ISD::LOAD, MVT::i1, Custom);
262 setOperationAction(ISD::STORE, MVT::i1, Custom);
263
264 // FIXME: Remove this once the ANDI glue bug is fixed:
265 if (ANDIGlueBug)
266 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
267
268 for (MVT VT : MVT::integer_valuetypes()) {
269 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
270 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
271 setTruncStoreAction(VT, MVT::i1, Expand);
272 }
273
274 addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
275 }
276
277 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
278 // PPC (the libcall is not available).
279 setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
280 setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
281 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom);
282 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom);
283
284 // We do not currently implement these libm ops for PowerPC.
285 setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
286 setOperationAction(ISD::FCEIL, MVT::ppcf128, Expand);
287 setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
288 setOperationAction(ISD::FRINT, MVT::ppcf128, Expand);
289 setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
290 setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
291
292 // PowerPC has no SREM/UREM instructions unless we are on P9
293 // On P9 we may use a hardware instruction to compute the remainder.
294 // When the result of both the remainder and the division is required it is
295 // more efficient to compute the remainder from the result of the division
296 // rather than use the remainder instruction. The instructions are legalized
297 // directly because the DivRemPairsPass performs the transformation at the IR
298 // level.
299 if (Subtarget.isISA3_0()) {
300 setOperationAction(ISD::SREM, MVT::i32, Legal);
301 setOperationAction(ISD::UREM, MVT::i32, Legal);
302 setOperationAction(ISD::SREM, MVT::i64, Legal);
303 setOperationAction(ISD::UREM, MVT::i64, Legal);
304 } else {
305 setOperationAction(ISD::SREM, MVT::i32, Expand);
306 setOperationAction(ISD::UREM, MVT::i32, Expand);
307 setOperationAction(ISD::SREM, MVT::i64, Expand);
308 setOperationAction(ISD::UREM, MVT::i64, Expand);
309 }
310
311 // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
312 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
313 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
314 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
315 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
316 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
317 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
318 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
319 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
320
321 // Handle constrained floating-point operations of scalar.
322 // TODO: Handle SPE specific operation.
323 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
324 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
325 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
326 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
327 setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
328 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
329
330 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
331 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
332 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
333 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
334 setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
335 if (Subtarget.hasVSX()) {
336 setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal);
337 setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal);
338 }
339
340 if (Subtarget.hasFSQRT()) {
341 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
342 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
343 }
344
345 if (Subtarget.hasFPRND()) {
346 setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
347 setOperationAction(ISD::STRICT_FCEIL, MVT::f32, Legal);
348 setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
349 setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
350
351 setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
352 setOperationAction(ISD::STRICT_FCEIL, MVT::f64, Legal);
353 setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
354 setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
355 }
356
357 // We don't support sin/cos/sqrt/fmod/pow
358 setOperationAction(ISD::FSIN , MVT::f64, Expand);
359 setOperationAction(ISD::FCOS , MVT::f64, Expand);
360 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
361 setOperationAction(ISD::FREM , MVT::f64, Expand);
362 setOperationAction(ISD::FPOW , MVT::f64, Expand);
363 setOperationAction(ISD::FSIN , MVT::f32, Expand);
364 setOperationAction(ISD::FCOS , MVT::f32, Expand);
365 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
366 setOperationAction(ISD::FREM , MVT::f32, Expand);
367 setOperationAction(ISD::FPOW , MVT::f32, Expand);
368 if (Subtarget.hasSPE()) {
369 setOperationAction(ISD::FMA , MVT::f64, Expand);
370 setOperationAction(ISD::FMA , MVT::f32, Expand);
371 } else {
372 setOperationAction(ISD::FMA , MVT::f64, Legal);
373 setOperationAction(ISD::FMA , MVT::f32, Legal);
374 }
375
376 if (Subtarget.hasSPE())
377 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
378
379 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
380
381 // If we're enabling GP optimizations, use hardware square root
382 if (!Subtarget.hasFSQRT() &&
383 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
384 Subtarget.hasFRE()))
385 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
386
387 if (!Subtarget.hasFSQRT() &&
388 !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
389 Subtarget.hasFRES()))
390 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
391
392 if (Subtarget.hasFCPSGN()) {
393 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
394 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
395 } else {
396 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
397 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
398 }
399
400 if (Subtarget.hasFPRND()) {
401 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
402 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
403 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
404 setOperationAction(ISD::FROUND, MVT::f64, Legal);
405
406 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
407 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
408 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
409 setOperationAction(ISD::FROUND, MVT::f32, Legal);
410 }
411
412 // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
413 // to speed up scalar BSWAP64.
414 // CTPOP or CTTZ were introduced in P8/P9 respectively
415 setOperationAction(ISD::BSWAP, MVT::i32 , Expand);
416 if (Subtarget.hasP9Vector())
417 setOperationAction(ISD::BSWAP, MVT::i64 , Custom);
418 else
419 setOperationAction(ISD::BSWAP, MVT::i64 , Expand);
420 if (Subtarget.isISA3_0()) {
421 setOperationAction(ISD::CTTZ , MVT::i32 , Legal);
422 setOperationAction(ISD::CTTZ , MVT::i64 , Legal);
423 } else {
424 setOperationAction(ISD::CTTZ , MVT::i32 , Expand);
425 setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
426 }
427
428 if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
429 setOperationAction(ISD::CTPOP, MVT::i32 , Legal);
430 setOperationAction(ISD::CTPOP, MVT::i64 , Legal);
431 } else {
432 setOperationAction(ISD::CTPOP, MVT::i32 , Expand);
433 setOperationAction(ISD::CTPOP, MVT::i64 , Expand);
434 }
435
436 // PowerPC does not have ROTR
437 setOperationAction(ISD::ROTR, MVT::i32 , Expand);
438 setOperationAction(ISD::ROTR, MVT::i64 , Expand);
439
440 if (!Subtarget.useCRBits()) {
441 // PowerPC does not have Select
442 setOperationAction(ISD::SELECT, MVT::i32, Expand);
443 setOperationAction(ISD::SELECT, MVT::i64, Expand);
444 setOperationAction(ISD::SELECT, MVT::f32, Expand);
445 setOperationAction(ISD::SELECT, MVT::f64, Expand);
446 }
447
448 // PowerPC wants to turn select_cc of FP into fsel when possible.
449 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
450 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
451
452 // PowerPC wants to optimize integer setcc a bit
453 if (!Subtarget.useCRBits())
454 setOperationAction(ISD::SETCC, MVT::i32, Custom);
455
456 if (Subtarget.hasFPU()) {
457 setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
458 setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
459 setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
460
461 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
462 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
463 setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
464 }
465
466 // PowerPC does not have BRCOND which requires SetCC
467 if (!Subtarget.useCRBits())
468 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
469
470 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
471
472 if (Subtarget.hasSPE()) {
473 // SPE has built-in conversions
474 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
475 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
476 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
477 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
478 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
479 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
480 } else {
481 // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
482 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
483 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
484
485 // PowerPC does not have [U|S]INT_TO_FP
486 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
487 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
488 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
489 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
490 }
491
492 if (Subtarget.hasDirectMove() && isPPC64) {
493 setOperationAction(ISD::BITCAST, MVT::f32, Legal);
494 setOperationAction(ISD::BITCAST, MVT::i32, Legal);
495 setOperationAction(ISD::BITCAST, MVT::i64, Legal);
496 setOperationAction(ISD::BITCAST, MVT::f64, Legal);
497 if (TM.Options.UnsafeFPMath) {
498 setOperationAction(ISD::LRINT, MVT::f64, Legal);
499 setOperationAction(ISD::LRINT, MVT::f32, Legal);
500 setOperationAction(ISD::LLRINT, MVT::f64, Legal);
501 setOperationAction(ISD::LLRINT, MVT::f32, Legal);
502 setOperationAction(ISD::LROUND, MVT::f64, Legal);
503 setOperationAction(ISD::LROUND, MVT::f32, Legal);
504 setOperationAction(ISD::LLROUND, MVT::f64, Legal);
505 setOperationAction(ISD::LLROUND, MVT::f32, Legal);
506 }
507 } else {
508 setOperationAction(ISD::BITCAST, MVT::f32, Expand);
509 setOperationAction(ISD::BITCAST, MVT::i32, Expand);
510 setOperationAction(ISD::BITCAST, MVT::i64, Expand);
511 setOperationAction(ISD::BITCAST, MVT::f64, Expand);
512 }
513
514 // We cannot sextinreg(i1). Expand to shifts.
515 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
516
517 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
518 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
519 // support continuation, user-level threading, and etc.. As a result, no
520 // other SjLj exception interfaces are implemented and please don't build
521 // your own exception handling based on them.
522 // LLVM/Clang supports zero-cost DWARF exception handling.
523 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
524 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
525
526 // We want to legalize GlobalAddress and ConstantPool nodes into the
527 // appropriate instructions to materialize the address.
528 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
529 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
530 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
531 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
532 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
533 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
534 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
535 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
536 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
537 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
538
539 // TRAP is legal.
540 setOperationAction(ISD::TRAP, MVT::Other, Legal);
541
542 // TRAMPOLINE is custom lowered.
543 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
544 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
545
546 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
547 setOperationAction(ISD::VASTART , MVT::Other, Custom);
548
549 if (Subtarget.is64BitELFABI()) {
550 // VAARG always uses double-word chunks, so promote anything smaller.
551 setOperationAction(ISD::VAARG, MVT::i1, Promote);
552 AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
553 setOperationAction(ISD::VAARG, MVT::i8, Promote);
554 AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
555 setOperationAction(ISD::VAARG, MVT::i16, Promote);
556 AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
557 setOperationAction(ISD::VAARG, MVT::i32, Promote);
558 AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
559 setOperationAction(ISD::VAARG, MVT::Other, Expand);
560 } else if (Subtarget.is32BitELFABI()) {
561 // VAARG is custom lowered with the 32-bit SVR4 ABI.
562 setOperationAction(ISD::VAARG, MVT::Other, Custom);
563 setOperationAction(ISD::VAARG, MVT::i64, Custom);
564 } else
565 setOperationAction(ISD::VAARG, MVT::Other, Expand);
566
567 // VACOPY is custom lowered with the 32-bit SVR4 ABI.
568 if (Subtarget.is32BitELFABI())
569 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
570 else
571 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
572
573 // Use the default implementation.
574 setOperationAction(ISD::VAEND , MVT::Other, Expand);
575 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
576 setOperationAction(ISD::STACKRESTORE , MVT::Other, Custom);
577 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
578 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64 , Custom);
579 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
580 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
581 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
582 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
583
584 // We want to custom lower some of our intrinsics.
585 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
586
587 // To handle counter-based loop conditions.
588 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
589
590 setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
591 setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
592 setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
593 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
594
595 // Comparisons that require checking two conditions.
596 if (Subtarget.hasSPE()) {
597 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
598 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
599 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
600 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
601 }
602 setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
603 setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
604 setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
605 setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
606 setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
607 setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
608 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
609 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
610 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
611 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
612 setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
613 setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
614
615 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
616 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
617
618 if (Subtarget.has64BitSupport()) {
619 // They also have instructions for converting between i64 and fp.
620 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
621 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
622 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
623 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
624 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
625 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
626 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
627 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
628 // This is just the low 32 bits of a (signed) fp->i64 conversion.
629 // We cannot do this with Promote because i64 is not a legal type.
630 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
631 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
632
633 if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
634 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
635 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
636 }
637 } else {
638 // PowerPC does not have FP_TO_UINT on 32-bit implementations.
639 if (Subtarget.hasSPE()) {
640 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
641 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
642 } else {
643 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
644 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
645 }
646 }
647
648 // With the instructions enabled under FPCVT, we can do everything.
649 if (Subtarget.hasFPCVT()) {
650 if (Subtarget.has64BitSupport()) {
651 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
652 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
653 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
654 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
655 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
656 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
657 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
658 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
659 }
660
661 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
662 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
663 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
664 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
665 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
666 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
667 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
668 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
669 }
670
671 if (Subtarget.use64BitRegs()) {
672 // 64-bit PowerPC implementations can support i64 types directly
673 addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
674 // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
675 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
676 // 64-bit PowerPC wants to expand i128 shifts itself.
677 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
678 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
679 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
680 } else {
681 // 32-bit PowerPC wants to expand i64 shifts itself.
682 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
683 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
684 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
685 }
686
687 // PowerPC has better expansions for funnel shifts than the generic
688 // TargetLowering::expandFunnelShift.
689 if (Subtarget.has64BitSupport()) {
690 setOperationAction(ISD::FSHL, MVT::i64, Custom);
691 setOperationAction(ISD::FSHR, MVT::i64, Custom);
692 }
693 setOperationAction(ISD::FSHL, MVT::i32, Custom);
694 setOperationAction(ISD::FSHR, MVT::i32, Custom);
695
696 if (Subtarget.hasVSX()) {
697 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
698 setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
699 setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
700 setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
701 }
702
703 if (Subtarget.hasAltivec()) {
704 for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
705 setOperationAction(ISD::SADDSAT, VT, Legal);
706 setOperationAction(ISD::SSUBSAT, VT, Legal);
707 setOperationAction(ISD::UADDSAT, VT, Legal);
708 setOperationAction(ISD::USUBSAT, VT, Legal);
709 }
710 // First set operation action for all vector types to expand. Then we
711 // will selectively turn on ones that can be effectively codegen'd.
712 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
713 // add/sub are legal for all supported vector VT's.
714 setOperationAction(ISD::ADD, VT, Legal);
715 setOperationAction(ISD::SUB, VT, Legal);
716
717 // For v2i64, these are only valid with P8Vector. This is corrected after
718 // the loop.
719 if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
720 setOperationAction(ISD::SMAX, VT, Legal);
721 setOperationAction(ISD::SMIN, VT, Legal);
722 setOperationAction(ISD::UMAX, VT, Legal);
723 setOperationAction(ISD::UMIN, VT, Legal);
724 }
725 else {
726 setOperationAction(ISD::SMAX, VT, Expand);
727 setOperationAction(ISD::SMIN, VT, Expand);
728 setOperationAction(ISD::UMAX, VT, Expand);
729 setOperationAction(ISD::UMIN, VT, Expand);
730 }
731
732 if (Subtarget.hasVSX()) {
733 setOperationAction(ISD::FMAXNUM, VT, Legal);
734 setOperationAction(ISD::FMINNUM, VT, Legal);
735 }
736
737 // Vector instructions introduced in P8
738 if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
739 setOperationAction(ISD::CTPOP, VT, Legal);
740 setOperationAction(ISD::CTLZ, VT, Legal);
741 }
742 else {
743 setOperationAction(ISD::CTPOP, VT, Expand);
744 setOperationAction(ISD::CTLZ, VT, Expand);
745 }
746
747 // Vector instructions introduced in P9
748 if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
749 setOperationAction(ISD::CTTZ, VT, Legal);
750 else
751 setOperationAction(ISD::CTTZ, VT, Expand);
752
753 // We promote all shuffles to v16i8.
754 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
755 AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
756
757 // We promote all non-typed operations to v4i32.
758 setOperationAction(ISD::AND , VT, Promote);
759 AddPromotedToType (ISD::AND , VT, MVT::v4i32);
760 setOperationAction(ISD::OR , VT, Promote);
761 AddPromotedToType (ISD::OR , VT, MVT::v4i32);
762 setOperationAction(ISD::XOR , VT, Promote);
763 AddPromotedToType (ISD::XOR , VT, MVT::v4i32);
764 setOperationAction(ISD::LOAD , VT, Promote);
765 AddPromotedToType (ISD::LOAD , VT, MVT::v4i32);
766 setOperationAction(ISD::SELECT, VT, Promote);
767 AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
768 setOperationAction(ISD::VSELECT, VT, Legal);
769 setOperationAction(ISD::SELECT_CC, VT, Promote);
770 AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
771 setOperationAction(ISD::STORE, VT, Promote);
772 AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
773
774 // No other operations are legal.
775 setOperationAction(ISD::MUL , VT, Expand);
776 setOperationAction(ISD::SDIV, VT, Expand);
777 setOperationAction(ISD::SREM, VT, Expand);
778 setOperationAction(ISD::UDIV, VT, Expand);
779 setOperationAction(ISD::UREM, VT, Expand);
780 setOperationAction(ISD::FDIV, VT, Expand);
781 setOperationAction(ISD::FREM, VT, Expand);
782 setOperationAction(ISD::FNEG, VT, Expand);
783 setOperationAction(ISD::FSQRT, VT, Expand);
784 setOperationAction(ISD::FLOG, VT, Expand);
785 setOperationAction(ISD::FLOG10, VT, Expand);
786 setOperationAction(ISD::FLOG2, VT, Expand);
787 setOperationAction(ISD::FEXP, VT, Expand);
788 setOperationAction(ISD::FEXP2, VT, Expand);
789 setOperationAction(ISD::FSIN, VT, Expand);
790 setOperationAction(ISD::FCOS, VT, Expand);
791 setOperationAction(ISD::FABS, VT, Expand);
792 setOperationAction(ISD::FFLOOR, VT, Expand);
793 setOperationAction(ISD::FCEIL, VT, Expand);
794 setOperationAction(ISD::FTRUNC, VT, Expand);
795 setOperationAction(ISD::FRINT, VT, Expand);
796 setOperationAction(ISD::FNEARBYINT, VT, Expand);
797 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
798 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
799 setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
800 setOperationAction(ISD::MULHU, VT, Expand);
801 setOperationAction(ISD::MULHS, VT, Expand);
802 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
803 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
804 setOperationAction(ISD::UDIVREM, VT, Expand);
805 setOperationAction(ISD::SDIVREM, VT, Expand);
806 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
807 setOperationAction(ISD::FPOW, VT, Expand);
808 setOperationAction(ISD::BSWAP, VT, Expand);
809 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
810 setOperationAction(ISD::ROTL, VT, Expand);
811 setOperationAction(ISD::ROTR, VT, Expand);
812
813 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
814 setTruncStoreAction(VT, InnerVT, Expand);
815 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
816 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
817 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
818 }
819 }
820 setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
821 if (!Subtarget.hasP8Vector()) {
822 setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
823 setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
824 setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
825 setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
826 }
827
828 // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
829 // with merges, splats, etc.
830 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
831
832 // Vector truncates to sub-word integer that fit in an Altivec/VSX register
833 // are cheap, so handle them before they get expanded to scalar.
834 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
835 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
836 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
837 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
838 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
839
840 setOperationAction(ISD::AND , MVT::v4i32, Legal);
841 setOperationAction(ISD::OR , MVT::v4i32, Legal);
842 setOperationAction(ISD::XOR , MVT::v4i32, Legal);
843 setOperationAction(ISD::LOAD , MVT::v4i32, Legal);
844 setOperationAction(ISD::SELECT, MVT::v4i32,
845 Subtarget.useCRBits() ? Legal : Expand);
846 setOperationAction(ISD::STORE , MVT::v4i32, Legal);
847 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
848 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
849 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
850 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
851 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
852 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
853 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
854 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
855 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
856 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
857 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
858 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
859
860 // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
861 setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
862 // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
863 if (Subtarget.hasAltivec())
864 for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
865 setOperationAction(ISD::ROTL, VT, Legal);
866 // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
867 if (Subtarget.hasP8Altivec())
868 setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
869
870 addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
871 addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
872 addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
873 addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
874
875 setOperationAction(ISD::MUL, MVT::v4f32, Legal);
876 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
877
878 if (Subtarget.hasVSX()) {
879 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
880 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
881 }
882
883 if (Subtarget.hasP8Altivec())
884 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
885 else
886 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
887
888 if (Subtarget.isISA3_1()) {
889 setOperationAction(ISD::MUL, MVT::v2i64, Legal);
890 setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
891 setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
892 setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
893 setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
894 setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
895 setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
896 setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
897 setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
898 setOperationAction(ISD::UREM, MVT::v2i64, Legal);
899 setOperationAction(ISD::SREM, MVT::v2i64, Legal);
900 setOperationAction(ISD::UREM, MVT::v4i32, Legal);
901 setOperationAction(ISD::SREM, MVT::v4i32, Legal);
902 setOperationAction(ISD::UREM, MVT::v1i128, Legal);
903 setOperationAction(ISD::SREM, MVT::v1i128, Legal);
904 setOperationAction(ISD::UDIV, MVT::v1i128, Legal);
905 setOperationAction(ISD::SDIV, MVT::v1i128, Legal);
906 setOperationAction(ISD::ROTL, MVT::v1i128, Legal);
907 }
908
909 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
910 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
911
912 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
913 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
914
915 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
916 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
917 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
918 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
919
920 // Altivec does not contain unordered floating-point compare instructions
921 setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
922 setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
923 setCondCodeAction(ISD::SETO, MVT::v4f32, Expand);
924 setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
925
926 if (Subtarget.hasVSX()) {
927 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
928 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
929 if (Subtarget.hasP8Vector()) {
930 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
931 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
932 }
933 if (Subtarget.hasDirectMove() && isPPC64) {
934 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
935 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
936 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
937 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
938 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
939 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
940 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
941 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
942 }
943 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
944
945 // The nearbyint variants are not allowed to raise the inexact exception
946 // so we can only code-gen them with unsafe math.
947 if (TM.Options.UnsafeFPMath) {
948 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
949 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
950 }
951
952 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
953 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
954 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
955 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
956 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
957 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
958 setOperationAction(ISD::FROUND, MVT::f64, Legal);
959 setOperationAction(ISD::FRINT, MVT::f64, Legal);
960
961 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
962 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
963 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
964 setOperationAction(ISD::FROUND, MVT::f32, Legal);
965 setOperationAction(ISD::FRINT, MVT::f32, Legal);
966
967 setOperationAction(ISD::MUL, MVT::v2f64, Legal);
968 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
969
970 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
971 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
972
973 // Share the Altivec comparison restrictions.
974 setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
975 setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
976 setCondCodeAction(ISD::SETO, MVT::v2f64, Expand);
977 setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
978
979 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
980 setOperationAction(ISD::STORE, MVT::v2f64, Legal);
981
982 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
983
984 if (Subtarget.hasP8Vector())
985 addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
986
987 addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
988
989 addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
990 addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
991 addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
992
993 if (Subtarget.hasP8Altivec()) {
994 setOperationAction(ISD::SHL, MVT::v2i64, Legal);
995 setOperationAction(ISD::SRA, MVT::v2i64, Legal);
996 setOperationAction(ISD::SRL, MVT::v2i64, Legal);
997
998 // 128 bit shifts can be accomplished via 3 instructions for SHL and
999 // SRL, but not for SRA because of the instructions available:
1000 // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
1001 // doing
1002 setOperationAction(ISD::SHL, MVT::v1i128, Expand);
1003 setOperationAction(ISD::SRL, MVT::v1i128, Expand);
1004 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1005
1006 setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
1007 }
1008 else {
1009 setOperationAction(ISD::SHL, MVT::v2i64, Expand);
1010 setOperationAction(ISD::SRA, MVT::v2i64, Expand);
1011 setOperationAction(ISD::SRL, MVT::v2i64, Expand);
1012
1013 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
1014
1015 // VSX v2i64 only supports non-arithmetic operations.
1016 setOperationAction(ISD::ADD, MVT::v2i64, Expand);
1017 setOperationAction(ISD::SUB, MVT::v2i64, Expand);
1018 }
1019
1020 if (Subtarget.isISA3_1())
1021 setOperationAction(ISD::SETCC, MVT::v1i128, Legal);
1022 else
1023 setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1024
1025 setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1026 AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1027 setOperationAction(ISD::STORE, MVT::v2i64, Promote);
1028 AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1029
1030 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
1031
1032 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
1033 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
1034 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
1035 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
1036 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1037 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1038 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1039 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1040
1041 // Custom handling for partial vectors of integers converted to
1042 // floating point. We already have optimal handling for v2i32 through
1043 // the DAG combine, so those aren't necessary.
1044 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
1045 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
1046 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
1047 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
1048 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
1049 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
1050 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
1051 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
1052 setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
1053 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1054 setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
1055 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1056 setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
1057 setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
1059 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
1060
1061 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1062 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1063 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1064 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1065 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1066 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
1067
1068 if (Subtarget.hasDirectMove())
1069 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1070 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1071
1072 // Handle constrained floating-point operations of vector.
1073 // The predictor is `hasVSX` because altivec instruction has
1074 // no exception but VSX vector instruction has.
1075 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1076 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1077 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1078 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1079 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1080 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1081 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1082 setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1083 setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
1084 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1085 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal);
1086 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1087 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1088
1089 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1090 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1091 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1092 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1093 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1094 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1095 setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1096 setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1097 setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
1098 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1099 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal);
1100 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1101 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1102
1103 addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1104 }
1105
1106 if (Subtarget.hasP8Altivec()) {
1107 addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1108 addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1109 }
1110
1111 if (Subtarget.hasP9Vector()) {
1112 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1113 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1114
1115 // 128 bit shifts can be accomplished via 3 instructions for SHL and
1116 // SRL, but not for SRA because of the instructions available:
1117 // VS{RL} and VS{RL}O.
1118 setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1119 setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1120 setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1121
1122 addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1123 setOperationAction(ISD::FADD, MVT::f128, Legal);
1124 setOperationAction(ISD::FSUB, MVT::f128, Legal);
1125 setOperationAction(ISD::FDIV, MVT::f128, Legal);
1126 setOperationAction(ISD::FMUL, MVT::f128, Legal);
1127 setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1128 // No extending loads to f128 on PPC.
1129 for (MVT FPT : MVT::fp_valuetypes())
1130 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1131 setOperationAction(ISD::FMA, MVT::f128, Legal);
1132 setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1133 setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1134 setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1135 setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1136 setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1137 setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1138
1139 setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1140 setOperationAction(ISD::FRINT, MVT::f128, Legal);
1141 setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1142 setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1143 setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1144 setOperationAction(ISD::FROUND, MVT::f128, Legal);
1145
1146 setOperationAction(ISD::SELECT, MVT::f128, Expand);
1147 setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1148 setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1149 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1150 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1151 setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1152 // No implementation for these ops for PowerPC.
1153 setOperationAction(ISD::FSIN, MVT::f128, Expand);
1154 setOperationAction(ISD::FCOS, MVT::f128, Expand);
1155 setOperationAction(ISD::FPOW, MVT::f128, Expand);
1156 setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1157 setOperationAction(ISD::FREM, MVT::f128, Expand);
1158
1159 // Handle constrained floating-point operations of fp128
1160 setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1161 setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1162 setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1163 setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1164 setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1165 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1166 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1167 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1168 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1169 setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1170 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1171 setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1172 setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1173 setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1174 setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1175 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1176 setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1177 setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1178 setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1179 setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1180 } else if (Subtarget.hasAltivec() && EnableSoftFP128) {
1181 addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1182
1183 for (MVT FPT : MVT::fp_valuetypes())
1184 setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1185
1186 setOperationAction(ISD::LOAD, MVT::f128, Promote);
1187 setOperationAction(ISD::STORE, MVT::f128, Promote);
1188
1189 AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32);
1190 AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32);
1191
1192 // Set FADD/FSUB as libcall to avoid the legalizer to expand the
1193 // fp_to_uint and int_to_fp.
1194 setOperationAction(ISD::FADD, MVT::f128, LibCall);
1195 setOperationAction(ISD::FSUB, MVT::f128, LibCall);
1196
1197 setOperationAction(ISD::FMUL, MVT::f128, Expand);
1198 setOperationAction(ISD::FDIV, MVT::f128, Expand);
1199 setOperationAction(ISD::FNEG, MVT::f128, Expand);
1200 setOperationAction(ISD::FABS, MVT::f128, Expand);
1201 setOperationAction(ISD::FSIN, MVT::f128, Expand);
1202 setOperationAction(ISD::FCOS, MVT::f128, Expand);
1203 setOperationAction(ISD::FPOW, MVT::f128, Expand);
1204 setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1205 setOperationAction(ISD::FREM, MVT::f128, Expand);
1206 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
1207 setOperationAction(ISD::FMA, MVT::f128, Expand);
1208 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1209
1210 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1211 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1212
1213 // Expand the fp_extend if the target type is fp128.
1214 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
1215 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand);
1216
1217 // Expand the fp_round if the source type is fp128.
1218 for (MVT VT : {MVT::f32, MVT::f64}) {
1219 setOperationAction(ISD::FP_ROUND, VT, Custom);
1220 setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
1221 }
1222
1223 setOperationAction(ISD::SETCC, MVT::f128, Custom);
1224 setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
1225 setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
1226 setOperationAction(ISD::BR_CC, MVT::f128, Expand);
1227
1228 // Lower following f128 select_cc pattern:
1229 // select_cc x, y, tv, fv, cc -> select_cc (setcc x, y, cc), 0, tv, fv, NE
1230 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1231
1232 // We need to handle f128 SELECT_CC with integer result type.
1233 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1234 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
1235 }
1236
1237 if (Subtarget.hasP9Altivec()) {
1238 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1239 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1240
1241 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal);
1242 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1243 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1244 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Legal);
1245 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1246 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1247 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1248 }
1249
1250 if (Subtarget.isISA3_1()) {
1251 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1252 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1253 }
1254 }
1255
1256 if (Subtarget.pairedVectorMemops()) {
1257 addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass);
1258 setOperationAction(ISD::LOAD, MVT::v256i1, Custom);
1259 setOperationAction(ISD::STORE, MVT::v256i1, Custom);
1260 }
1261 if (Subtarget.hasMMA()) {
1262 addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass);
1263 setOperationAction(ISD::LOAD, MVT::v512i1, Custom);
1264 setOperationAction(ISD::STORE, MVT::v512i1, Custom);
1265 setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom);
1266 }
1267
1268 if (Subtarget.has64BitSupport())
1269 setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1270
1271 if (Subtarget.isISA3_1())
1272 setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1273
1274 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1275
1276 if (!isPPC64) {
1277 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
1278 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1279 }
1280
1281 setBooleanContents(ZeroOrOneBooleanContent);
1282
1283 if (Subtarget.hasAltivec()) {
1284 // Altivec instructions set fields to all zeros or all ones.
1285 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1286 }
1287
1288 if (!isPPC64) {
1289 // These libcalls are not available in 32-bit.
1290 setLibcallName(RTLIB::SHL_I128, nullptr);
1291 setLibcallName(RTLIB::SRL_I128, nullptr);
1292 setLibcallName(RTLIB::SRA_I128, nullptr);
1293 }
1294
1295 if (!isPPC64)
1296 setMaxAtomicSizeInBitsSupported(32);
1297
1298 setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1299
1300 // We have target-specific dag combine patterns for the following nodes:
1301 setTargetDAGCombine(ISD::ADD);
1302 setTargetDAGCombine(ISD::SHL);
1303 setTargetDAGCombine(ISD::SRA);
1304 setTargetDAGCombine(ISD::SRL);
1305 setTargetDAGCombine(ISD::MUL);
1306 setTargetDAGCombine(ISD::FMA);
1307 setTargetDAGCombine(ISD::SINT_TO_FP);
1308 setTargetDAGCombine(ISD::BUILD_VECTOR);
1309 if (Subtarget.hasFPCVT())
1310 setTargetDAGCombine(ISD::UINT_TO_FP);
1311 setTargetDAGCombine(ISD::LOAD);
1312 setTargetDAGCombine(ISD::STORE);
1313 setTargetDAGCombine(ISD::BR_CC);
1314 if (Subtarget.useCRBits())
1315 setTargetDAGCombine(ISD::BRCOND);
1316 setTargetDAGCombine(ISD::BSWAP);
1317 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1318 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1319 setTargetDAGCombine(ISD::INTRINSIC_VOID);
1320
1321 setTargetDAGCombine(ISD::SIGN_EXTEND);
1322 setTargetDAGCombine(ISD::ZERO_EXTEND);
1323 setTargetDAGCombine(ISD::ANY_EXTEND);
1324
1325 setTargetDAGCombine(ISD::TRUNCATE);
1326 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1327
1328
1329 if (Subtarget.useCRBits()) {
1330 setTargetDAGCombine(ISD::TRUNCATE);
1331 setTargetDAGCombine(ISD::SETCC);
1332 setTargetDAGCombine(ISD::SELECT_CC);
1333 }
1334
1335 if (Subtarget.hasP9Altivec()) {
1336 setTargetDAGCombine(ISD::ABS);
1337 setTargetDAGCombine(ISD::VSELECT);
1338 }
1339
1340 setLibcallName(RTLIB::LOG_F128, "logf128");
1341 setLibcallName(RTLIB::LOG2_F128, "log2f128");
1342 setLibcallName(RTLIB::LOG10_F128, "log10f128");
1343 setLibcallName(RTLIB::EXP_F128, "expf128");
1344 setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1345 setLibcallName(RTLIB::SIN_F128, "sinf128");
1346 setLibcallName(RTLIB::COS_F128, "cosf128");
1347 setLibcallName(RTLIB::POW_F128, "powf128");
1348 setLibcallName(RTLIB::FMIN_F128, "fminf128");
1349 setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1350 setLibcallName(RTLIB::REM_F128, "fmodf128");
1351 setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
1352 setLibcallName(RTLIB::CEIL_F128, "ceilf128");
1353 setLibcallName(RTLIB::FLOOR_F128, "floorf128");
1354 setLibcallName(RTLIB::TRUNC_F128, "truncf128");
1355 setLibcallName(RTLIB::ROUND_F128, "roundf128");
1356 setLibcallName(RTLIB::LROUND_F128, "lroundf128");
1357 setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
1358 setLibcallName(RTLIB::RINT_F128, "rintf128");
1359 setLibcallName(RTLIB::LRINT_F128, "lrintf128");
1360 setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
1361 setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
1362 setLibcallName(RTLIB::FMA_F128, "fmaf128");
1363
1364 // With 32 condition bits, we don't need to sink (and duplicate) compares
1365 // aggressively in CodeGenPrep.
1366 if (Subtarget.useCRBits()) {
1367 setHasMultipleConditionRegisters();
1368 setJumpIsExpensive();
1369 }
1370
1371 setMinFunctionAlignment(Align(4));
1372
1373 switch (Subtarget.getCPUDirective()) {
1374 default: break;
1375 case PPC::DIR_970:
1376 case PPC::DIR_A2:
1377 case PPC::DIR_E500:
1378 case PPC::DIR_E500mc:
1379 case PPC::DIR_E5500:
1380 case PPC::DIR_PWR4:
1381 case PPC::DIR_PWR5:
1382 case PPC::DIR_PWR5X:
1383 case PPC::DIR_PWR6:
1384 case PPC::DIR_PWR6X:
1385 case PPC::DIR_PWR7:
1386 case PPC::DIR_PWR8:
1387 case PPC::DIR_PWR9:
1388 case PPC::DIR_PWR10:
1389 case PPC::DIR_PWR_FUTURE:
1390 setPrefLoopAlignment(Align(16));
1391 setPrefFunctionAlignment(Align(16));
1392 break;
1393 }
1394
1395 if (Subtarget.enableMachineScheduler())
1396 setSchedulingPreference(Sched::Source);
1397 else
1398 setSchedulingPreference(Sched::Hybrid);
1399
1400 computeRegisterProperties(STI.getRegisterInfo());
1401
1402 // The Freescale cores do better with aggressive inlining of memcpy and
1403 // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1404 if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1405 Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1406 MaxStoresPerMemset = 32;
1407 MaxStoresPerMemsetOptSize = 16;
1408 MaxStoresPerMemcpy = 32;
1409 MaxStoresPerMemcpyOptSize = 8;
1410 MaxStoresPerMemmove = 32;
1411 MaxStoresPerMemmoveOptSize = 8;
1412 } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1413 // The A2 also benefits from (very) aggressive inlining of memcpy and
1414 // friends. The overhead of a the function call, even when warm, can be
1415 // over one hundred cycles.
1416 MaxStoresPerMemset = 128;
1417 MaxStoresPerMemcpy = 128;
1418 MaxStoresPerMemmove = 128;
1419 MaxLoadsPerMemcmp = 128;
1420 } else {
1421 MaxLoadsPerMemcmp = 8;
1422 MaxLoadsPerMemcmpOptSize = 4;
1423 }
1424
1425 IsStrictFPEnabled = true;
1426
1427 // Let the subtarget (CPU) decide if a predictable select is more expensive
1428 // than the corresponding branch. This information is used in CGP to decide
1429 // when to convert selects into branches.
1430 PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1431}
1432
1433/// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1434/// the desired ByVal argument alignment.
1435static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1436 if (MaxAlign == MaxMaxAlign)
1437 return;
1438 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1439 if (MaxMaxAlign >= 32 &&
1440 VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1441 MaxAlign = Align(32);
1442 else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1443 MaxAlign < 16)
1444 MaxAlign = Align(16);
1445 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1446 Align EltAlign;
1447 getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1448 if (EltAlign > MaxAlign)
1449 MaxAlign = EltAlign;
1450 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1451 for (auto *EltTy : STy->elements()) {
1452 Align EltAlign;
1453 getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1454 if (EltAlign > MaxAlign)
1455 MaxAlign = EltAlign;
1456 if (MaxAlign == MaxMaxAlign)
1457 break;
1458 }
1459 }
1460}
1461
1462/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1463/// function arguments in the caller parameter area.
1464unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1465 const DataLayout &DL) const {
1466 // 16byte and wider vectors are passed on 16byte boundary.
1467 // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1468 Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1469 if (Subtarget.hasAltivec())
1470 getMaxByValAlign(Ty, Alignment, Align(16));
1471 return Alignment.value();
1472}
1473
1474bool PPCTargetLowering::useSoftFloat() const {
1475 return Subtarget.useSoftFloat();
1476}
1477
1478bool PPCTargetLowering::hasSPE() const {
1479 return Subtarget.hasSPE();
1480}
1481
1482bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1483 return VT.isScalarInteger();
1484}
1485
1486const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1487 switch ((PPCISD::NodeType)Opcode) {
1488 case PPCISD::FIRST_NUMBER: break;
1489 case PPCISD::FSEL: return "PPCISD::FSEL";
1490 case PPCISD::XSMAXCDP: return "PPCISD::XSMAXCDP";
1491 case PPCISD::XSMINCDP: return "PPCISD::XSMINCDP";
1492 case PPCISD::FCFID: return "PPCISD::FCFID";
1493 case PPCISD::FCFIDU: return "PPCISD::FCFIDU";
1494 case PPCISD::FCFIDS: return "PPCISD::FCFIDS";
1495 case PPCISD::FCFIDUS: return "PPCISD::FCFIDUS";
1496 case PPCISD::FCTIDZ: return "PPCISD::FCTIDZ";
1497 case PPCISD::FCTIWZ: return "PPCISD::FCTIWZ";
1498 case PPCISD::FCTIDUZ: return "PPCISD::FCTIDUZ";
1499 case PPCISD::FCTIWUZ: return "PPCISD::FCTIWUZ";
1500 case PPCISD::FP_TO_UINT_IN_VSR:
1501 return "PPCISD::FP_TO_UINT_IN_VSR,";
1502 case PPCISD::FP_TO_SINT_IN_VSR:
1503 return "PPCISD::FP_TO_SINT_IN_VSR";
1504 case PPCISD::FRE: return "PPCISD::FRE";
1505 case PPCISD::FRSQRTE: return "PPCISD::FRSQRTE";
1506 case PPCISD::FTSQRT:
1507 return "PPCISD::FTSQRT";
1508 case PPCISD::FSQRT:
1509 return "PPCISD::FSQRT";
1510 case PPCISD::STFIWX: return "PPCISD::STFIWX";
1511 case PPCISD::VPERM: return "PPCISD::VPERM";
1512 case PPCISD::XXSPLT: return "PPCISD::XXSPLT";
1513 case PPCISD::XXSPLTI_SP_TO_DP:
1514 return "PPCISD::XXSPLTI_SP_TO_DP";
1515 case PPCISD::XXSPLTI32DX:
1516 return "PPCISD::XXSPLTI32DX";
1517 case PPCISD::VECINSERT: return "PPCISD::VECINSERT";
1518 case PPCISD::XXPERMDI: return "PPCISD::XXPERMDI";
1519 case PPCISD::VECSHL: return "PPCISD::VECSHL";
1520 case PPCISD::CMPB: return "PPCISD::CMPB";
1521 case PPCISD::Hi: return "PPCISD::Hi";
1522 case PPCISD::Lo: return "PPCISD::Lo";
1523 case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
1524 case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1525 case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1526 case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
1527 case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET";
1528 case PPCISD::PROBED_ALLOCA: return "PPCISD::PROBED_ALLOCA";
1529 case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
1530 case PPCISD::SRL: return "PPCISD::SRL";
1531 case PPCISD::SRA: return "PPCISD::SRA";
1532 case PPCISD::SHL: return "PPCISD::SHL";
1533 case PPCISD::SRA_ADDZE: return "PPCISD::SRA_ADDZE";
1534 case PPCISD::CALL: return "PPCISD::CALL";
1535 case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP";
1536 case PPCISD::CALL_NOTOC: return "PPCISD::CALL_NOTOC";
1537 case PPCISD::MTCTR: return "PPCISD::MTCTR";
1538 case PPCISD::BCTRL: return "PPCISD::BCTRL";
1539 case PPCISD::BCTRL_LOAD_TOC: return "PPCISD::BCTRL_LOAD_TOC";
1540 case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
1541 case PPCISD::READ_TIME_BASE: return "PPCISD::READ_TIME_BASE";
1542 case PPCISD::EH_SJLJ_SETJMP: return "PPCISD::EH_SJLJ_SETJMP";
1543 case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1544 case PPCISD::MFOCRF: return "PPCISD::MFOCRF";
1545 case PPCISD::MFVSR: return "PPCISD::MFVSR";
1546 case PPCISD::MTVSRA: return "PPCISD::MTVSRA";
1547 case PPCISD::MTVSRZ: return "PPCISD::MTVSRZ";
1548 case PPCISD::SINT_VEC_TO_FP: return "PPCISD::SINT_VEC_TO_FP";
1549 case PPCISD::UINT_VEC_TO_FP: return "PPCISD::UINT_VEC_TO_FP";
1550 case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1551 return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1552 case PPCISD::ANDI_rec_1_EQ_BIT:
1553 return "PPCISD::ANDI_rec_1_EQ_BIT";
1554 case PPCISD::ANDI_rec_1_GT_BIT:
1555 return "PPCISD::ANDI_rec_1_GT_BIT";
1556 case PPCISD::VCMP: return "PPCISD::VCMP";
1557 case PPCISD::VCMP_rec: return "PPCISD::VCMP_rec";
1558 case PPCISD::LBRX: return "PPCISD::LBRX";
1559 case PPCISD::STBRX: return "PPCISD::STBRX";
1560 case PPCISD::LFIWAX: return "PPCISD::LFIWAX";
1561 case PPCISD::LFIWZX: return "PPCISD::LFIWZX";
1562 case PPCISD::LXSIZX: return "PPCISD::LXSIZX";
1563 case PPCISD::STXSIX: return "PPCISD::STXSIX";
1564 case PPCISD::VEXTS: return "PPCISD::VEXTS";
1565 case PPCISD::LXVD2X: return "PPCISD::LXVD2X";
1566 case PPCISD::STXVD2X: return "PPCISD::STXVD2X";
1567 case PPCISD::LOAD_VEC_BE: return "PPCISD::LOAD_VEC_BE";
1568 case PPCISD::STORE_VEC_BE: return "PPCISD::STORE_VEC_BE";
1569 case PPCISD::ST_VSR_SCAL_INT:
1570 return "PPCISD::ST_VSR_SCAL_INT";
1571 case PPCISD::COND_BRANCH: return "PPCISD::COND_BRANCH";
1572 case PPCISD::BDNZ: return "PPCISD::BDNZ";
1573 case PPCISD::BDZ: return "PPCISD::BDZ";
1574 case PPCISD::MFFS: return "PPCISD::MFFS";
1575 case PPCISD::FADDRTZ: return "PPCISD::FADDRTZ";
1576 case PPCISD::TC_RETURN: return "PPCISD::TC_RETURN";
1577 case PPCISD::CR6SET: return "PPCISD::CR6SET";
1578 case PPCISD::CR6UNSET: return "PPCISD::CR6UNSET";
1579 case PPCISD::PPC32_GOT: return "PPCISD::PPC32_GOT";
1580 case PPCISD::PPC32_PICGOT: return "PPCISD::PPC32_PICGOT";
1581 case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1582 case PPCISD::LD_GOT_TPREL_L: return "PPCISD::LD_GOT_TPREL_L";
1583 case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS";
1584 case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
1585 case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
1586 case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
1587 case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1588 case PPCISD::TLSGD_AIX: return "PPCISD::TLSGD_AIX";
1589 case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
1590 case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
1591 case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
1592 case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1593 case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1594 case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
1595 case PPCISD::PADDI_DTPREL:
1596 return "PPCISD::PADDI_DTPREL";
1597 case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
1598 case PPCISD::SC: return "PPCISD::SC";
1599 case PPCISD::CLRBHRB: return "PPCISD::CLRBHRB";
1600 case PPCISD::MFBHRBE: return "PPCISD::MFBHRBE";
1601 case PPCISD::RFEBB: return "PPCISD::RFEBB";
1602 case PPCISD::XXSWAPD: return "PPCISD::XXSWAPD";
1603 case PPCISD::SWAP_NO_CHAIN: return "PPCISD::SWAP_NO_CHAIN";
1604 case PPCISD::VABSD: return "PPCISD::VABSD";
1605 case PPCISD::BUILD_FP128: return "PPCISD::BUILD_FP128";
1606 case PPCISD::BUILD_SPE64: return "PPCISD::BUILD_SPE64";
1607 case PPCISD::EXTRACT_SPE: return "PPCISD::EXTRACT_SPE";
1608 case PPCISD::EXTSWSLI: return "PPCISD::EXTSWSLI";
1609 case PPCISD::LD_VSX_LH: return "PPCISD::LD_VSX_LH";
1610 case PPCISD::FP_EXTEND_HALF: return "PPCISD::FP_EXTEND_HALF";
1611 case PPCISD::MAT_PCREL_ADDR: return "PPCISD::MAT_PCREL_ADDR";
1612 case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
1613 return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1614 case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR:
1615 return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1616 case PPCISD::ACC_BUILD: return "PPCISD::ACC_BUILD";
1617 case PPCISD::PAIR_BUILD: return "PPCISD::PAIR_BUILD";
1618 case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG";
1619 case PPCISD::XXMFACC: return "PPCISD::XXMFACC";
1620 case PPCISD::LD_SPLAT: return "PPCISD::LD_SPLAT";
1621 case PPCISD::FNMSUB: return "PPCISD::FNMSUB";
1622 case PPCISD::STRICT_FADDRTZ:
1623 return "PPCISD::STRICT_FADDRTZ";
1624 case PPCISD::STRICT_FCTIDZ:
1625 return "PPCISD::STRICT_FCTIDZ";
1626 case PPCISD::STRICT_FCTIWZ:
1627 return "PPCISD::STRICT_FCTIWZ";
1628 case PPCISD::STRICT_FCTIDUZ:
1629 return "PPCISD::STRICT_FCTIDUZ";
1630 case PPCISD::STRICT_FCTIWUZ:
1631 return "PPCISD::STRICT_FCTIWUZ";
1632 case PPCISD::STRICT_FCFID:
1633 return "PPCISD::STRICT_FCFID";
1634 case PPCISD::STRICT_FCFIDU:
1635 return "PPCISD::STRICT_FCFIDU";
1636 case PPCISD::STRICT_FCFIDS:
1637 return "PPCISD::STRICT_FCFIDS";
1638 case PPCISD::STRICT_FCFIDUS:
1639 return "PPCISD::STRICT_FCFIDUS";
1640 case PPCISD::LXVRZX: return "PPCISD::LXVRZX";
1641 }
1642 return nullptr;
1643}
1644
1645EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1646 EVT VT) const {
1647 if (!VT.isVector())
1648 return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1649
1650 return VT.changeVectorElementTypeToInteger();
1651}
1652
1653bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1654 assert(VT.isFloatingPoint() && "Non-floating-point FMA?")((VT.isFloatingPoint() && "Non-floating-point FMA?") ?
static_cast<void> (0) : __assert_fail ("VT.isFloatingPoint() && \"Non-floating-point FMA?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 1654, __PRETTY_FUNCTION__))
;
1655 return true;
1656}
1657
1658//===----------------------------------------------------------------------===//
1659// Node matching predicates, for use by the tblgen matching code.
1660//===----------------------------------------------------------------------===//
1661
1662/// isFloatingPointZero - Return true if this is 0.0 or -0.0.
1663static bool isFloatingPointZero(SDValue Op) {
1664 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1665 return CFP->getValueAPF().isZero();
1666 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1667 // Maybe this has already been legalized into the constant pool?
1668 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1669 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1670 return CFP->getValueAPF().isZero();
1671 }
1672 return false;
1673}
1674
1675/// isConstantOrUndef - Op is either an undef node or a ConstantSDNode. Return
1676/// true if Op is undef or if it matches the specified value.
1677static bool isConstantOrUndef(int Op, int Val) {
1678 return Op < 0 || Op == Val;
1679}
1680
1681/// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1682/// VPKUHUM instruction.
1683/// The ShuffleKind distinguishes between big-endian operations with
1684/// two different inputs (0), either-endian operations with two identical
1685/// inputs (1), and little-endian operations with two different inputs (2).
1686/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1687bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1688 SelectionDAG &DAG) {
1689 bool IsLE = DAG.getDataLayout().isLittleEndian();
1690 if (ShuffleKind == 0) {
1691 if (IsLE)
1692 return false;
1693 for (unsigned i = 0; i != 16; ++i)
1694 if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1695 return false;
1696 } else if (ShuffleKind == 2) {
1697 if (!IsLE)
1698 return false;
1699 for (unsigned i = 0; i != 16; ++i)
1700 if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1701 return false;
1702 } else if (ShuffleKind == 1) {
1703 unsigned j = IsLE ? 0 : 1;
1704 for (unsigned i = 0; i != 8; ++i)
1705 if (!isConstantOrUndef(N->getMaskElt(i), i*2+j) ||
1706 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j))
1707 return false;
1708 }
1709 return true;
1710}
1711
1712/// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1713/// VPKUWUM instruction.
1714/// The ShuffleKind distinguishes between big-endian operations with
1715/// two different inputs (0), either-endian operations with two identical
1716/// inputs (1), and little-endian operations with two different inputs (2).
1717/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1718bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1719 SelectionDAG &DAG) {
1720 bool IsLE = DAG.getDataLayout().isLittleEndian();
1721 if (ShuffleKind == 0) {
1722 if (IsLE)
1723 return false;
1724 for (unsigned i = 0; i != 16; i += 2)
1725 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+2) ||
1726 !isConstantOrUndef(N->getMaskElt(i+1), i*2+3))
1727 return false;
1728 } else if (ShuffleKind == 2) {
1729 if (!IsLE)
1730 return false;
1731 for (unsigned i = 0; i != 16; i += 2)
1732 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1733 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1))
1734 return false;
1735 } else if (ShuffleKind == 1) {
1736 unsigned j = IsLE ? 0 : 2;
1737 for (unsigned i = 0; i != 8; i += 2)
1738 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1739 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1740 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1741 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1))
1742 return false;
1743 }
1744 return true;
1745}
1746
1747/// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1748/// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1749/// current subtarget.
1750///
1751/// The ShuffleKind distinguishes between big-endian operations with
1752/// two different inputs (0), either-endian operations with two identical
1753/// inputs (1), and little-endian operations with two different inputs (2).
1754/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
1755bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1756 SelectionDAG &DAG) {
1757 const PPCSubtarget& Subtarget =
1758 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1759 if (!Subtarget.hasP8Vector())
1760 return false;
1761
1762 bool IsLE = DAG.getDataLayout().isLittleEndian();
1763 if (ShuffleKind == 0) {
1764 if (IsLE)
1765 return false;
1766 for (unsigned i = 0; i != 16; i += 4)
1767 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+4) ||
1768 !isConstantOrUndef(N->getMaskElt(i+1), i*2+5) ||
1769 !isConstantOrUndef(N->getMaskElt(i+2), i*2+6) ||
1770 !isConstantOrUndef(N->getMaskElt(i+3), i*2+7))
1771 return false;
1772 } else if (ShuffleKind == 2) {
1773 if (!IsLE)
1774 return false;
1775 for (unsigned i = 0; i != 16; i += 4)
1776 if (!isConstantOrUndef(N->getMaskElt(i ), i*2) ||
1777 !isConstantOrUndef(N->getMaskElt(i+1), i*2+1) ||
1778 !isConstantOrUndef(N->getMaskElt(i+2), i*2+2) ||
1779 !isConstantOrUndef(N->getMaskElt(i+3), i*2+3))
1780 return false;
1781 } else if (ShuffleKind == 1) {
1782 unsigned j = IsLE ? 0 : 4;
1783 for (unsigned i = 0; i != 8; i += 4)
1784 if (!isConstantOrUndef(N->getMaskElt(i ), i*2+j) ||
1785 !isConstantOrUndef(N->getMaskElt(i+1), i*2+j+1) ||
1786 !isConstantOrUndef(N->getMaskElt(i+2), i*2+j+2) ||
1787 !isConstantOrUndef(N->getMaskElt(i+3), i*2+j+3) ||
1788 !isConstantOrUndef(N->getMaskElt(i+8), i*2+j) ||
1789 !isConstantOrUndef(N->getMaskElt(i+9), i*2+j+1) ||
1790 !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1791 !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1792 return false;
1793 }
1794 return true;
1795}
1796
1797/// isVMerge - Common function, used to match vmrg* shuffles.
1798///
1799static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1800 unsigned LHSStart, unsigned RHSStart) {
1801 if (N->getValueType(0) != MVT::v16i8)
1802 return false;
1803 assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&(((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
"Unsupported merge size!") ? static_cast<void> (0) : __assert_fail
("(UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && \"Unsupported merge size!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 1804, __PRETTY_FUNCTION__))
1804 "Unsupported merge size!")(((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
"Unsupported merge size!") ? static_cast<void> (0) : __assert_fail
("(UnitSize == 1 || UnitSize == 2 || UnitSize == 4) && \"Unsupported merge size!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 1804, __PRETTY_FUNCTION__))
;
1805
1806 for (unsigned i = 0; i != 8/UnitSize; ++i) // Step over units
1807 for (unsigned j = 0; j != UnitSize; ++j) { // Step over bytes within unit
1808 if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1809 LHSStart+j+i*UnitSize) ||
1810 !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1811 RHSStart+j+i*UnitSize))
1812 return false;
1813 }
1814 return true;
1815}
1816
1817/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1818/// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1819/// The ShuffleKind distinguishes between big-endian merges with two
1820/// different inputs (0), either-endian merges with two identical inputs (1),
1821/// and little-endian merges with two different inputs (2). For the latter,
1822/// the input operands are swapped (see PPCInstrAltivec.td).
1823bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1824 unsigned ShuffleKind, SelectionDAG &DAG) {
1825 if (DAG.getDataLayout().isLittleEndian()) {
1826 if (ShuffleKind == 1) // unary
1827 return isVMerge(N, UnitSize, 0, 0);
1828 else if (ShuffleKind == 2) // swapped
1829 return isVMerge(N, UnitSize, 0, 16);
1830 else
1831 return false;
1832 } else {
1833 if (ShuffleKind == 1) // unary
1834 return isVMerge(N, UnitSize, 8, 8);
1835 else if (ShuffleKind == 0) // normal
1836 return isVMerge(N, UnitSize, 8, 24);
1837 else
1838 return false;
1839 }
1840}
1841
1842/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1843/// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1844/// The ShuffleKind distinguishes between big-endian merges with two
1845/// different inputs (0), either-endian merges with two identical inputs (1),
1846/// and little-endian merges with two different inputs (2). For the latter,
1847/// the input operands are swapped (see PPCInstrAltivec.td).
1848bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1849 unsigned ShuffleKind, SelectionDAG &DAG) {
1850 if (DAG.getDataLayout().isLittleEndian()) {
1851 if (ShuffleKind == 1) // unary
1852 return isVMerge(N, UnitSize, 8, 8);
1853 else if (ShuffleKind == 2) // swapped
1854 return isVMerge(N, UnitSize, 8, 24);
1855 else
1856 return false;
1857 } else {
1858 if (ShuffleKind == 1) // unary
1859 return isVMerge(N, UnitSize, 0, 0);
1860 else if (ShuffleKind == 0) // normal
1861 return isVMerge(N, UnitSize, 0, 16);
1862 else
1863 return false;
1864 }
1865}
1866
1867/**
1868 * Common function used to match vmrgew and vmrgow shuffles
1869 *
1870 * The indexOffset determines whether to look for even or odd words in
1871 * the shuffle mask. This is based on the of the endianness of the target
1872 * machine.
1873 * - Little Endian:
1874 * - Use offset of 0 to check for odd elements
1875 * - Use offset of 4 to check for even elements
1876 * - Big Endian:
1877 * - Use offset of 0 to check for even elements
1878 * - Use offset of 4 to check for odd elements
1879 * A detailed description of the vector element ordering for little endian and
1880 * big endian can be found at
1881 * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1882 * Targeting your applications - what little endian and big endian IBM XL C/C++
1883 * compiler differences mean to you
1884 *
1885 * The mask to the shuffle vector instruction specifies the indices of the
1886 * elements from the two input vectors to place in the result. The elements are
1887 * numbered in array-access order, starting with the first vector. These vectors
1888 * are always of type v16i8, thus each vector will contain 16 elements of size
1889 * 8. More info on the shuffle vector can be found in the
1890 * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1891 * Language Reference.
1892 *
1893 * The RHSStartValue indicates whether the same input vectors are used (unary)
1894 * or two different input vectors are used, based on the following:
1895 * - If the instruction uses the same vector for both inputs, the range of the
1896 * indices will be 0 to 15. In this case, the RHSStart value passed should
1897 * be 0.
1898 * - If the instruction has two different vectors then the range of the
1899 * indices will be 0 to 31. In this case, the RHSStart value passed should
1900 * be 16 (indices 0-15 specify elements in the first vector while indices 16
1901 * to 31 specify elements in the second vector).
1902 *
1903 * \param[in] N The shuffle vector SD Node to analyze
1904 * \param[in] IndexOffset Specifies whether to look for even or odd elements
1905 * \param[in] RHSStartValue Specifies the starting index for the righthand input
1906 * vector to the shuffle_vector instruction
1907 * \return true iff this shuffle vector represents an even or odd word merge
1908 */
1909static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1910 unsigned RHSStartValue) {
1911 if (N->getValueType(0) != MVT::v16i8)
1912 return false;
1913
1914 for (unsigned i = 0; i < 2; ++i)
1915 for (unsigned j = 0; j < 4; ++j)
1916 if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1917 i*RHSStartValue+j+IndexOffset) ||
1918 !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1919 i*RHSStartValue+j+IndexOffset+8))
1920 return false;
1921 return true;
1922}
1923
1924/**
1925 * Determine if the specified shuffle mask is suitable for the vmrgew or
1926 * vmrgow instructions.
1927 *
1928 * \param[in] N The shuffle vector SD Node to analyze
1929 * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1930 * \param[in] ShuffleKind Identify the type of merge:
1931 * - 0 = big-endian merge with two different inputs;
1932 * - 1 = either-endian merge with two identical inputs;
1933 * - 2 = little-endian merge with two different inputs (inputs are swapped for
1934 * little-endian merges).
1935 * \param[in] DAG The current SelectionDAG
1936 * \return true iff this shuffle mask
1937 */
1938bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1939 unsigned ShuffleKind, SelectionDAG &DAG) {
1940 if (DAG.getDataLayout().isLittleEndian()) {
1941 unsigned indexOffset = CheckEven ? 4 : 0;
1942 if (ShuffleKind == 1) // Unary
1943 return isVMerge(N, indexOffset, 0);
1944 else if (ShuffleKind == 2) // swapped
1945 return isVMerge(N, indexOffset, 16);
1946 else
1947 return false;
1948 }
1949 else {
1950 unsigned indexOffset = CheckEven ? 0 : 4;
1951 if (ShuffleKind == 1) // Unary
1952 return isVMerge(N, indexOffset, 0);
1953 else if (ShuffleKind == 0) // Normal
1954 return isVMerge(N, indexOffset, 16);
1955 else
1956 return false;
1957 }
1958 return false;
1959}
1960
1961/// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1962/// amount, otherwise return -1.
1963/// The ShuffleKind distinguishes between big-endian operations with two
1964/// different inputs (0), either-endian operations with two identical inputs
1965/// (1), and little-endian operations with two different inputs (2). For the
1966/// latter, the input operands are swapped (see PPCInstrAltivec.td).
1967int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1968 SelectionDAG &DAG) {
1969 if (N->getValueType(0) != MVT::v16i8)
1970 return -1;
1971
1972 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1973
1974 // Find the first non-undef value in the shuffle mask.
1975 unsigned i;
1976 for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1977 /*search*/;
1978
1979 if (i == 16) return -1; // all undef.
1980
1981 // Otherwise, check to see if the rest of the elements are consecutively
1982 // numbered from this value.
1983 unsigned ShiftAmt = SVOp->getMaskElt(i);
1984 if (ShiftAmt < i) return -1;
1985
1986 ShiftAmt -= i;
1987 bool isLE = DAG.getDataLayout().isLittleEndian();
1988
1989 if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1990 // Check the rest of the elements to see if they are consecutive.
1991 for (++i; i != 16; ++i)
1992 if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1993 return -1;
1994 } else if (ShuffleKind == 1) {
1995 // Check the rest of the elements to see if they are consecutive.
1996 for (++i; i != 16; ++i)
1997 if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1998 return -1;
1999 } else
2000 return -1;
2001
2002 if (isLE)
2003 ShiftAmt = 16 - ShiftAmt;
2004
2005 return ShiftAmt;
2006}
2007
2008/// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
2009/// specifies a splat of a single element that is suitable for input to
2010/// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
2011bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
2012 assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&((N->getValueType(0) == MVT::v16i8 && isPowerOf2_32
(EltSize) && EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && EltSize <= 8 && \"Can only handle 1,2,4,8 byte element sizes\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2013, __PRETTY_FUNCTION__))
2013 EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes")((N->getValueType(0) == MVT::v16i8 && isPowerOf2_32
(EltSize) && EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) && EltSize <= 8 && \"Can only handle 1,2,4,8 byte element sizes\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2013, __PRETTY_FUNCTION__))
;
2014
2015 // The consecutive indices need to specify an element, not part of two
2016 // different elements. So abandon ship early if this isn't the case.
2017 if (N->getMaskElt(0) % EltSize != 0)
2018 return false;
2019
2020 // This is a splat operation if each element of the permute is the same, and
2021 // if the value doesn't reference the second vector.
2022 unsigned ElementBase = N->getMaskElt(0);
2023
2024 // FIXME: Handle UNDEF elements too!
2025 if (ElementBase >= 16)
2026 return false;
2027
2028 // Check that the indices are consecutive, in the case of a multi-byte element
2029 // splatted with a v16i8 mask.
2030 for (unsigned i = 1; i != EltSize; ++i)
2031 if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
2032 return false;
2033
2034 for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2035 if (N->getMaskElt(i) < 0) continue;
2036 for (unsigned j = 0; j != EltSize; ++j)
2037 if (N->getMaskElt(i+j) != N->getMaskElt(j))
2038 return false;
2039 }
2040 return true;
2041}
2042
2043/// Check that the mask is shuffling N byte elements. Within each N byte
2044/// element of the mask, the indices could be either in increasing or
2045/// decreasing order as long as they are consecutive.
2046/// \param[in] N the shuffle vector SD Node to analyze
2047/// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
2048/// Word/DoubleWord/QuadWord).
2049/// \param[in] StepLen the delta indices number among the N byte element, if
2050/// the mask is in increasing/decreasing order then it is 1/-1.
2051/// \return true iff the mask is shuffling N byte elements.
2052static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
2053 int StepLen) {
2054 assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&(((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
"Unexpected element width.") ? static_cast<void> (0) :
__assert_fail ("(Width == 2 || Width == 4 || Width == 8 || Width == 16) && \"Unexpected element width.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2055, __PRETTY_FUNCTION__))
2055 "Unexpected element width.")(((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
"Unexpected element width.") ? static_cast<void> (0) :
__assert_fail ("(Width == 2 || Width == 4 || Width == 8 || Width == 16) && \"Unexpected element width.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2055, __PRETTY_FUNCTION__))
;
2056 assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.")(((StepLen == 1 || StepLen == -1) && "Unexpected element width."
) ? static_cast<void> (0) : __assert_fail ("(StepLen == 1 || StepLen == -1) && \"Unexpected element width.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2056, __PRETTY_FUNCTION__))
;
2057
2058 unsigned NumOfElem = 16 / Width;
2059 unsigned MaskVal[16]; // Width is never greater than 16
2060 for (unsigned i = 0; i < NumOfElem; ++i) {
2061 MaskVal[0] = N->getMaskElt(i * Width);
2062 if ((StepLen == 1) && (MaskVal[0] % Width)) {
2063 return false;
2064 } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2065 return false;
2066 }
2067
2068 for (unsigned int j = 1; j < Width; ++j) {
2069 MaskVal[j] = N->getMaskElt(i * Width + j);
2070 if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2071 return false;
2072 }
2073 }
2074 }
2075
2076 return true;
2077}
2078
2079bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2080 unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2081 if (!isNByteElemShuffleMask(N, 4, 1))
2082 return false;
2083
2084 // Now we look at mask elements 0,4,8,12
2085 unsigned M0 = N->getMaskElt(0) / 4;
2086 unsigned M1 = N->getMaskElt(4) / 4;
2087 unsigned M2 = N->getMaskElt(8) / 4;
2088 unsigned M3 = N->getMaskElt(12) / 4;
2089 unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2090 unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2091
2092 // Below, let H and L be arbitrary elements of the shuffle mask
2093 // where H is in the range [4,7] and L is in the range [0,3].
2094 // H, 1, 2, 3 or L, 5, 6, 7
2095 if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2096 (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2097 ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2098 InsertAtByte = IsLE ? 12 : 0;
2099 Swap = M0 < 4;
2100 return true;
2101 }
2102 // 0, H, 2, 3 or 4, L, 6, 7
2103 if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2104 (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2105 ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2106 InsertAtByte = IsLE ? 8 : 4;
2107 Swap = M1 < 4;
2108 return true;
2109 }
2110 // 0, 1, H, 3 or 4, 5, L, 7
2111 if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2112 (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2113 ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2114 InsertAtByte = IsLE ? 4 : 8;
2115 Swap = M2 < 4;
2116 return true;
2117 }
2118 // 0, 1, 2, H or 4, 5, 6, L
2119 if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2120 (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2121 ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2122 InsertAtByte = IsLE ? 0 : 12;
2123 Swap = M3 < 4;
2124 return true;
2125 }
2126
2127 // If both vector operands for the shuffle are the same vector, the mask will
2128 // contain only elements from the first one and the second one will be undef.
2129 if (N->getOperand(1).isUndef()) {
2130 ShiftElts = 0;
2131 Swap = true;
2132 unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2133 if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2134 InsertAtByte = IsLE ? 12 : 0;
2135 return true;
2136 }
2137 if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2138 InsertAtByte = IsLE ? 8 : 4;
2139 return true;
2140 }
2141 if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2142 InsertAtByte = IsLE ? 4 : 8;
2143 return true;
2144 }
2145 if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2146 InsertAtByte = IsLE ? 0 : 12;
2147 return true;
2148 }
2149 }
2150
2151 return false;
2152}
2153
2154bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2155 bool &Swap, bool IsLE) {
2156 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")((N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && \"Shuffle vector expects v16i8\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2156, __PRETTY_FUNCTION__))
;
25
'?' condition is true
2157 // Ensure each byte index of the word is consecutive.
2158 if (!isNByteElemShuffleMask(N, 4, 1))
26
Assuming the condition is false
27
Taking false branch
2159 return false;
2160
2161 // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2162 unsigned M0 = N->getMaskElt(0) / 4;
2163 unsigned M1 = N->getMaskElt(4) / 4;
2164 unsigned M2 = N->getMaskElt(8) / 4;
2165 unsigned M3 = N->getMaskElt(12) / 4;
2166
2167 // If both vector operands for the shuffle are the same vector, the mask will
2168 // contain only elements from the first one and the second one will be undef.
2169 if (N->getOperand(1).isUndef()) {
28
Calling 'SDValue::isUndef'
34
Returning from 'SDValue::isUndef'
35
Taking false branch
2170 assert(M0 < 4 && "Indexing into an undef vector?")((M0 < 4 && "Indexing into an undef vector?") ? static_cast
<void> (0) : __assert_fail ("M0 < 4 && \"Indexing into an undef vector?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2170, __PRETTY_FUNCTION__))
;
2171 if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2172 return false;
2173
2174 ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2175 Swap = false;
2176 return true;
2177 }
2178
2179 // Ensure each word index of the ShuffleVector Mask is consecutive.
2180 if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
36
Assuming the condition is false
37
Assuming the condition is false
38
Assuming the condition is false
39
Taking false branch
2181 return false;
2182
2183 if (IsLE) {
40
Assuming 'IsLE' is false
41
Taking false branch
2184 if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2185 // Input vectors don't need to be swapped if the leading element
2186 // of the result is one of the 3 left elements of the second vector
2187 // (or if there is no shift to be done at all).
2188 Swap = false;
2189 ShiftElts = (8 - M0) % 8;
2190 } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2191 // Input vectors need to be swapped if the leading element
2192 // of the result is one of the 3 left elements of the first vector
2193 // (or if we're shifting by 4 - thereby simply swapping the vectors).
2194 Swap = true;
2195 ShiftElts = (4 - M0) % 4;
2196 }
2197
2198 return true;
2199 } else { // BE
2200 if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
42
Assuming 'M0' is not equal to 0
43
Assuming 'M0' is not equal to 1
44
Assuming 'M0' is not equal to 2
45
Assuming 'M0' is not equal to 3
46
Taking false branch
2201 // Input vectors don't need to be swapped if the leading element
2202 // of the result is one of the 4 elements of the first vector.
2203 Swap = false;
2204 ShiftElts = M0;
2205 } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
47
Assuming 'M0' is not equal to 4
48
Assuming 'M0' is not equal to 5
49
Assuming 'M0' is not equal to 6
50
Assuming 'M0' is not equal to 7
51
Taking false branch
2206 // Input vectors need to be swapped if the leading element
2207 // of the result is one of the 4 elements of the right vector.
2208 Swap = true;
2209 ShiftElts = M0 - 4;
2210 }
2211
2212 return true;
52
Returning without writing to 'ShiftElts'
53
Returning the value 1, which participates in a condition later
2213 }
2214}
2215
2216bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2217 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")((N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && \"Shuffle vector expects v16i8\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2217, __PRETTY_FUNCTION__))
;
2218
2219 if (!isNByteElemShuffleMask(N, Width, -1))
2220 return false;
2221
2222 for (int i = 0; i < 16; i += Width)
2223 if (N->getMaskElt(i) != i + Width - 1)
2224 return false;
2225
2226 return true;
2227}
2228
2229bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2230 return isXXBRShuffleMaskHelper(N, 2);
2231}
2232
2233bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2234 return isXXBRShuffleMaskHelper(N, 4);
2235}
2236
2237bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2238 return isXXBRShuffleMaskHelper(N, 8);
2239}
2240
2241bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2242 return isXXBRShuffleMaskHelper(N, 16);
2243}
2244
2245/// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2246/// if the inputs to the instruction should be swapped and set \p DM to the
2247/// value for the immediate.
2248/// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2249/// AND element 0 of the result comes from the first input (LE) or second input
2250/// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2251/// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2252/// mask.
2253bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2254 bool &Swap, bool IsLE) {
2255 assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8")((N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v16i8 && \"Shuffle vector expects v16i8\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2255, __PRETTY_FUNCTION__))
;
2256
2257 // Ensure each byte index of the double word is consecutive.
2258 if (!isNByteElemShuffleMask(N, 8, 1))
2259 return false;
2260
2261 unsigned M0 = N->getMaskElt(0) / 8;
2262 unsigned M1 = N->getMaskElt(8) / 8;
2263 assert(((M0 | M1) < 4) && "A mask element out of bounds?")((((M0 | M1) < 4) && "A mask element out of bounds?"
) ? static_cast<void> (0) : __assert_fail ("((M0 | M1) < 4) && \"A mask element out of bounds?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2263, __PRETTY_FUNCTION__))
;
2264
2265 // If both vector operands for the shuffle are the same vector, the mask will
2266 // contain only elements from the first one and the second one will be undef.
2267 if (N->getOperand(1).isUndef()) {
2268 if ((M0 | M1) < 2) {
2269 DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2270 Swap = false;
2271 return true;
2272 } else
2273 return false;
2274 }
2275
2276 if (IsLE) {
2277 if (M0 > 1 && M1 < 2) {
2278 Swap = false;
2279 } else if (M0 < 2 && M1 > 1) {
2280 M0 = (M0 + 2) % 4;
2281 M1 = (M1 + 2) % 4;
2282 Swap = true;
2283 } else
2284 return false;
2285
2286 // Note: if control flow comes here that means Swap is already set above
2287 DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2288 return true;
2289 } else { // BE
2290 if (M0 < 2 && M1 > 1) {
2291 Swap = false;
2292 } else if (M0 > 1 && M1 < 2) {
2293 M0 = (M0 + 2) % 4;
2294 M1 = (M1 + 2) % 4;
2295 Swap = true;
2296 } else
2297 return false;
2298
2299 // Note: if control flow comes here that means Swap is already set above
2300 DM = (M0 << 1) + (M1 & 1);
2301 return true;
2302 }
2303}
2304
2305
2306/// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2307/// appropriate for PPC mnemonics (which have a big endian bias - namely
2308/// elements are counted from the left of the vector register).
2309unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2310 SelectionDAG &DAG) {
2311 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2312 assert(isSplatShuffleMask(SVOp, EltSize))((isSplatShuffleMask(SVOp, EltSize)) ? static_cast<void>
(0) : __assert_fail ("isSplatShuffleMask(SVOp, EltSize)", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2312, __PRETTY_FUNCTION__))
;
2313 if (DAG.getDataLayout().isLittleEndian())
2314 return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2315 else
2316 return SVOp->getMaskElt(0) / EltSize;
2317}
2318
2319/// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2320/// by using a vspltis[bhw] instruction of the specified element size, return
2321/// the constant being splatted. The ByteSize field indicates the number of
2322/// bytes of each element [124] -> [bhw].
2323SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2324 SDValue OpVal(nullptr, 0);
2325
2326 // If ByteSize of the splat is bigger than the element size of the
2327 // build_vector, then we have a case where we are checking for a splat where
2328 // multiple elements of the buildvector are folded together into a single
2329 // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2330 unsigned EltSize = 16/N->getNumOperands();
2331 if (EltSize < ByteSize) {
2332 unsigned Multiple = ByteSize/EltSize; // Number of BV entries per spltval.
2333 SDValue UniquedVals[4];
2334 assert(Multiple > 1 && Multiple <= 4 && "How can this happen?")((Multiple > 1 && Multiple <= 4 && "How can this happen?"
) ? static_cast<void> (0) : __assert_fail ("Multiple > 1 && Multiple <= 4 && \"How can this happen?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2334, __PRETTY_FUNCTION__))
;
2335
2336 // See if all of the elements in the buildvector agree across.
2337 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2338 if (N->getOperand(i).isUndef()) continue;
2339 // If the element isn't a constant, bail fully out.
2340 if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2341
2342 if (!UniquedVals[i&(Multiple-1)].getNode())
2343 UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2344 else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2345 return SDValue(); // no match.
2346 }
2347
2348 // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2349 // either constant or undef values that are identical for each chunk. See
2350 // if these chunks can form into a larger vspltis*.
2351
2352 // Check to see if all of the leading entries are either 0 or -1. If
2353 // neither, then this won't fit into the immediate field.
2354 bool LeadingZero = true;
2355 bool LeadingOnes = true;
2356 for (unsigned i = 0; i != Multiple-1; ++i) {
2357 if (!UniquedVals[i].getNode()) continue; // Must have been undefs.
2358
2359 LeadingZero &= isNullConstant(UniquedVals[i]);
2360 LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2361 }
2362 // Finally, check the least significant entry.
2363 if (LeadingZero) {
2364 if (!UniquedVals[Multiple-1].getNode())
2365 return DAG.getTargetConstant(0, SDLoc(N), MVT::i32); // 0,0,0,undef
2366 int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2367 if (Val < 16) // 0,0,0,4 -> vspltisw(4)
2368 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2369 }
2370 if (LeadingOnes) {
2371 if (!UniquedVals[Multiple-1].getNode())
2372 return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2373 int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2374 if (Val >= -16) // -1,-1,-1,-2 -> vspltisw(-2)
2375 return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2376 }
2377
2378 return SDValue();
2379 }
2380
2381 // Check to see if this buildvec has a single non-undef value in its elements.
2382 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2383 if (N->getOperand(i).isUndef()) continue;
2384 if (!OpVal.getNode())
2385 OpVal = N->getOperand(i);
2386 else if (OpVal != N->getOperand(i))
2387 return SDValue();
2388 }
2389
2390 if (!OpVal.getNode()) return SDValue(); // All UNDEF: use implicit def.
2391
2392 unsigned ValSizeInBytes = EltSize;
2393 uint64_t Value = 0;
2394 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2395 Value = CN->getZExtValue();
2396 } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2397 assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!")((CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!"
) ? static_cast<void> (0) : __assert_fail ("CN->getValueType(0) == MVT::f32 && \"Only one legal FP vector type!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2397, __PRETTY_FUNCTION__))
;
2398 Value = FloatToBits(CN->getValueAPF().convertToFloat());
2399 }
2400
2401 // If the splat value is larger than the element value, then we can never do
2402 // this splat. The only case that we could fit the replicated bits into our
2403 // immediate field for would be zero, and we prefer to use vxor for it.
2404 if (ValSizeInBytes < ByteSize) return SDValue();
2405
2406 // If the element value is larger than the splat value, check if it consists
2407 // of a repeated bit pattern of size ByteSize.
2408 if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2409 return SDValue();
2410
2411 // Properly sign extend the value.
2412 int MaskVal = SignExtend32(Value, ByteSize * 8);
2413
2414 // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2415 if (MaskVal == 0) return SDValue();
2416
2417 // Finally, if this value fits in a 5 bit sext field, return it
2418 if (SignExtend32<5>(MaskVal) == MaskVal)
2419 return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2420 return SDValue();
2421}
2422
2423//===----------------------------------------------------------------------===//
2424// Addressing Mode Selection
2425//===----------------------------------------------------------------------===//
2426
2427/// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2428/// or 64-bit immediate, and if the value can be accurately represented as a
2429/// sign extension from a 16-bit value. If so, this returns true and the
2430/// immediate.
2431bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2432 if (!isa<ConstantSDNode>(N))
2433 return false;
2434
2435 Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2436 if (N->getValueType(0) == MVT::i32)
2437 return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2438 else
2439 return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2440}
2441bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2442 return isIntS16Immediate(Op.getNode(), Imm);
2443}
2444
2445
2446/// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2447/// be represented as an indexed [r+r] operation.
2448bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2449 SDValue &Index,
2450 SelectionDAG &DAG) const {
2451 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2452 UI != E; ++UI) {
2453 if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2454 if (Memop->getMemoryVT() == MVT::f64) {
2455 Base = N.getOperand(0);
2456 Index = N.getOperand(1);
2457 return true;
2458 }
2459 }
2460 }
2461 return false;
2462}
2463
2464/// isIntS34Immediate - This method tests if value of node given can be
2465/// accurately represented as a sign extension from a 34-bit value. If so,
2466/// this returns true and the immediate.
2467bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
2468 if (!isa<ConstantSDNode>(N))
2469 return false;
2470
2471 Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2472 return isInt<34>(Imm);
2473}
2474bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
2475 return isIntS34Immediate(Op.getNode(), Imm);
2476}
2477
2478/// SelectAddressRegReg - Given the specified addressed, check to see if it
2479/// can be represented as an indexed [r+r] operation. Returns false if it
2480/// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2481/// non-zero and N can be represented by a base register plus a signed 16-bit
2482/// displacement, make a more precise judgement by checking (displacement % \p
2483/// EncodingAlignment).
2484bool PPCTargetLowering::SelectAddressRegReg(
2485 SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2486 MaybeAlign EncodingAlignment) const {
2487 // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2488 // a [pc+imm].
2489 if (SelectAddressPCRel(N, Base))
2490 return false;
2491
2492 int16_t Imm = 0;
2493 if (N.getOpcode() == ISD::ADD) {
2494 // Is there any SPE load/store (f64), which can't handle 16bit offset?
2495 // SPE load/store can only handle 8-bit offsets.
2496 if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2497 return true;
2498 if (isIntS16Immediate(N.getOperand(1), Imm) &&
2499 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2500 return false; // r+i
2501 if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2502 return false; // r+i
2503
2504 Base = N.getOperand(0);
2505 Index = N.getOperand(1);
2506 return true;
2507 } else if (N.getOpcode() == ISD::OR) {
2508 if (isIntS16Immediate(N.getOperand(1), Imm) &&
2509 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2510 return false; // r+i can fold it if we can.
2511
2512 // If this is an or of disjoint bitfields, we can codegen this as an add
2513 // (for better address arithmetic) if the LHS and RHS of the OR are provably
2514 // disjoint.
2515 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2516
2517 if (LHSKnown.Zero.getBoolValue()) {
2518 KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2519 // If all of the bits are known zero on the LHS or RHS, the add won't
2520 // carry.
2521 if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2522 Base = N.getOperand(0);
2523 Index = N.getOperand(1);
2524 return true;
2525 }
2526 }
2527 }
2528
2529 return false;
2530}
2531
2532// If we happen to be doing an i64 load or store into a stack slot that has
2533// less than a 4-byte alignment, then the frame-index elimination may need to
2534// use an indexed load or store instruction (because the offset may not be a
2535// multiple of 4). The extra register needed to hold the offset comes from the
2536// register scavenger, and it is possible that the scavenger will need to use
2537// an emergency spill slot. As a result, we need to make sure that a spill slot
2538// is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2539// stack slot.
2540static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2541 // FIXME: This does not handle the LWA case.
2542 if (VT != MVT::i64)
2543 return;
2544
2545 // NOTE: We'll exclude negative FIs here, which come from argument
2546 // lowering, because there are no known test cases triggering this problem
2547 // using packed structures (or similar). We can remove this exclusion if
2548 // we find such a test case. The reason why this is so test-case driven is
2549 // because this entire 'fixup' is only to prevent crashes (from the
2550 // register scavenger) on not-really-valid inputs. For example, if we have:
2551 // %a = alloca i1
2552 // %b = bitcast i1* %a to i64*
2553 // store i64* a, i64 b
2554 // then the store should really be marked as 'align 1', but is not. If it
2555 // were marked as 'align 1' then the indexed form would have been
2556 // instruction-selected initially, and the problem this 'fixup' is preventing
2557 // won't happen regardless.
2558 if (FrameIdx < 0)
2559 return;
2560
2561 MachineFunction &MF = DAG.getMachineFunction();
2562 MachineFrameInfo &MFI = MF.getFrameInfo();
2563
2564 if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2565 return;
2566
2567 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2568 FuncInfo->setHasNonRISpills();
2569}
2570
2571/// Returns true if the address N can be represented by a base register plus
2572/// a signed 16-bit displacement [r+imm], and if it is not better
2573/// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept
2574/// displacements that are multiples of that value.
2575bool PPCTargetLowering::SelectAddressRegImm(
2576 SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2577 MaybeAlign EncodingAlignment) const {
2578 // FIXME dl should come from parent load or store, not from address
2579 SDLoc dl(N);
2580
2581 // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2582 // a [pc+imm].
2583 if (SelectAddressPCRel(N, Base))
2584 return false;
2585
2586 // If this can be more profitably realized as r+r, fail.
2587 if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2588 return false;
2589
2590 if (N.getOpcode() == ISD::ADD) {
2591 int16_t imm = 0;
2592 if (isIntS16Immediate(N.getOperand(1), imm) &&
2593 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2594 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2595 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2596 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2597 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2598 } else {
2599 Base = N.getOperand(0);
2600 }
2601 return true; // [r+i]
2602 } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2603 // Match LOAD (ADD (X, Lo(G))).
2604 assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()((!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->
getZExtValue() && "Cannot handle constant offsets yet!"
) ? static_cast<void> (0) : __assert_fail ("!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() && \"Cannot handle constant offsets yet!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2605, __PRETTY_FUNCTION__))
2605 && "Cannot handle constant offsets yet!")((!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->
getZExtValue() && "Cannot handle constant offsets yet!"
) ? static_cast<void> (0) : __assert_fail ("!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue() && \"Cannot handle constant offsets yet!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2605, __PRETTY_FUNCTION__))
;
2606 Disp = N.getOperand(1).getOperand(0); // The global address.
2607 assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||((Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode
() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::
TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable
) ? static_cast<void> (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2610, __PRETTY_FUNCTION__))
2608 Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||((Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode
() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::
TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable
) ? static_cast<void> (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2610, __PRETTY_FUNCTION__))
2609 Disp.getOpcode() == ISD::TargetConstantPool ||((Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode
() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::
TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable
) ? static_cast<void> (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2610, __PRETTY_FUNCTION__))
2610 Disp.getOpcode() == ISD::TargetJumpTable)((Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode
() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::
TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable
) ? static_cast<void> (0) : __assert_fail ("Disp.getOpcode() == ISD::TargetGlobalAddress || Disp.getOpcode() == ISD::TargetGlobalTLSAddress || Disp.getOpcode() == ISD::TargetConstantPool || Disp.getOpcode() == ISD::TargetJumpTable"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 2610, __PRETTY_FUNCTION__))
;
2611 Base = N.getOperand(0);
2612 return true; // [&g+r]
2613 }
2614 } else if (N.getOpcode() == ISD::OR) {
2615 int16_t imm = 0;
2616 if (isIntS16Immediate(N.getOperand(1), imm) &&
2617 (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2618 // If this is an or of disjoint bitfields, we can codegen this as an add
2619 // (for better address arithmetic) if the LHS and RHS of the OR are
2620 // provably disjoint.
2621 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2622
2623 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2624 // If all of the bits are known zero on the LHS or RHS, the add won't
2625 // carry.
2626 if (FrameIndexSDNode *FI =
2627 dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2628 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2629 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2630 } else {
2631 Base = N.getOperand(0);
2632 }
2633 Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2634 return true;
2635 }
2636 }
2637 } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2638 // Loading from a constant address.
2639
2640 // If this address fits entirely in a 16-bit sext immediate field, codegen
2641 // this as "d, 0"
2642 int16_t Imm;
2643 if (isIntS16Immediate(CN, Imm) &&
2644 (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2645 Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2646 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2647 CN->getValueType(0));
2648 return true;
2649 }
2650
2651 // Handle 32-bit sext immediates with LIS + addr mode.
2652 if ((CN->getValueType(0) == MVT::i32 ||
2653 (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2654 (!EncodingAlignment ||
2655 isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2656 int Addr = (int)CN->getZExtValue();
2657
2658 // Otherwise, break this down into an LIS + disp.
2659 Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2660
2661 Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2662 MVT::i32);
2663 unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2664 Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2665 return true;
2666 }
2667 }
2668
2669 Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2670 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2671 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2672 fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2673 } else
2674 Base = N;
2675 return true; // [r+0]
2676}
2677
2678/// Similar to the 16-bit case but for instructions that take a 34-bit
2679/// displacement field (prefixed loads/stores).
2680bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp,
2681 SDValue &Base,
2682 SelectionDAG &DAG) const {
2683 // Only on 64-bit targets.
2684 if (N.getValueType() != MVT::i64)
2685 return false;
2686
2687 SDLoc dl(N);
2688 int64_t Imm = 0;
2689
2690 if (N.getOpcode() == ISD::ADD) {
2691 if (!isIntS34Immediate(N.getOperand(1), Imm))
2692 return false;
2693 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2694 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2695 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2696 else
2697 Base = N.getOperand(0);
2698 return true;
2699 }
2700
2701 if (N.getOpcode() == ISD::OR) {
2702 if (!isIntS34Immediate(N.getOperand(1), Imm))
2703 return false;
2704 // If this is an or of disjoint bitfields, we can codegen this as an add
2705 // (for better address arithmetic) if the LHS and RHS of the OR are
2706 // provably disjoint.
2707 KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2708 if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL)
2709 return false;
2710 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2711 Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2712 else
2713 Base = N.getOperand(0);
2714 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2715 return true;
2716 }
2717
2718 if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const.
2719 Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2720 Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
2721 return true;
2722 }
2723
2724 return false;
2725}
2726
2727/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2728/// represented as an indexed [r+r] operation.
2729bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2730 SDValue &Index,
2731 SelectionDAG &DAG) const {
2732 // Check to see if we can easily represent this as an [r+r] address. This
2733 // will fail if it thinks that the address is more profitably represented as
2734 // reg+imm, e.g. where imm = 0.
2735 if (SelectAddressRegReg(N, Base, Index, DAG))
2736 return true;
2737
2738 // If the address is the result of an add, we will utilize the fact that the
2739 // address calculation includes an implicit add. However, we can reduce
2740 // register pressure if we do not materialize a constant just for use as the
2741 // index register. We only get rid of the add if it is not an add of a
2742 // value and a 16-bit signed constant and both have a single use.
2743 int16_t imm = 0;
2744 if (N.getOpcode() == ISD::ADD &&
2745 (!isIntS16Immediate(N.getOperand(1), imm) ||
2746 !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2747 Base = N.getOperand(0);
2748 Index = N.getOperand(1);
2749 return true;
2750 }
2751
2752 // Otherwise, do it the hard way, using R0 as the base register.
2753 Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2754 N.getValueType());
2755 Index = N;
2756 return true;
2757}
2758
2759template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2760 Ty *PCRelCand = dyn_cast<Ty>(N);
2761 return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2762}
2763
2764/// Returns true if this address is a PC Relative address.
2765/// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2766/// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
2767bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2768 // This is a materialize PC Relative node. Always select this as PC Relative.
2769 Base = N;
2770 if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2771 return true;
2772 if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2773 isValidPCRelNode<GlobalAddressSDNode>(N) ||
2774 isValidPCRelNode<JumpTableSDNode>(N) ||
2775 isValidPCRelNode<BlockAddressSDNode>(N))
2776 return true;
2777 return false;
2778}
2779
2780/// Returns true if we should use a direct load into vector instruction
2781/// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
2782static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2783
2784 // If there are any other uses other than scalar to vector, then we should
2785 // keep it as a scalar load -> direct move pattern to prevent multiple
2786 // loads.
2787 LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2788 if (!LD)
2789 return false;
2790
2791 EVT MemVT = LD->getMemoryVT();
2792 if (!MemVT.isSimple())
2793 return false;
2794 switch(MemVT.getSimpleVT().SimpleTy) {
2795 case MVT::i64:
2796 break;
2797 case MVT::i32:
2798 if (!ST.hasP8Vector())
2799 return false;
2800 break;
2801 case MVT::i16:
2802 case MVT::i8:
2803 if (!ST.hasP9Vector())
2804 return false;
2805 break;
2806 default:
2807 return false;
2808 }
2809
2810 SDValue LoadedVal(N, 0);
2811 if (!LoadedVal.hasOneUse())
2812 return false;
2813
2814 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2815 UI != UE; ++UI)
2816 if (UI.getUse().get().getResNo() == 0 &&
2817 UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2818 UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2819 return false;
2820
2821 return true;
2822}
2823
2824/// getPreIndexedAddressParts - returns true by value, base pointer and
2825/// offset pointer and addressing mode by reference if the node's address
2826/// can be legally represented as pre-indexed load / store address.
2827bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2828 SDValue &Offset,
2829 ISD::MemIndexedMode &AM,
2830 SelectionDAG &DAG) const {
2831 if (DisablePPCPreinc) return false;
2832
2833 bool isLoad = true;
2834 SDValue Ptr;
2835 EVT VT;
2836 unsigned Alignment;
2837 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2838 Ptr = LD->getBasePtr();
2839 VT = LD->getMemoryVT();
2840 Alignment = LD->getAlignment();
2841 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2842 Ptr = ST->getBasePtr();
2843 VT = ST->getMemoryVT();
2844 Alignment = ST->getAlignment();
2845 isLoad = false;
2846 } else
2847 return false;
2848
2849 // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2850 // instructions because we can fold these into a more efficient instruction
2851 // instead, (such as LXSD).
2852 if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2853 return false;
2854 }
2855
2856 // PowerPC doesn't have preinc load/store instructions for vectors
2857 if (VT.isVector())
2858 return false;
2859
2860 if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2861 // Common code will reject creating a pre-inc form if the base pointer
2862 // is a frame index, or if N is a store and the base pointer is either
2863 // the same as or a predecessor of the value being stored. Check for
2864 // those situations here, and try with swapped Base/Offset instead.
2865 bool Swap = false;
2866
2867 if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2868 Swap = true;
2869 else if (!isLoad) {
2870 SDValue Val = cast<StoreSDNode>(N)->getValue();
2871 if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2872 Swap = true;
2873 }
2874
2875 if (Swap)
2876 std::swap(Base, Offset);
2877
2878 AM = ISD::PRE_INC;
2879 return true;
2880 }
2881
2882 // LDU/STU can only handle immediates that are a multiple of 4.
2883 if (VT != MVT::i64) {
2884 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2885 return false;
2886 } else {
2887 // LDU/STU need an address with at least 4-byte alignment.
2888 if (Alignment < 4)
2889 return false;
2890
2891 if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2892 return false;
2893 }
2894
2895 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2896 // PPC64 doesn't have lwau, but it does have lwaux. Reject preinc load of
2897 // sext i32 to i64 when addr mode is r+i.
2898 if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2899 LD->getExtensionType() == ISD::SEXTLOAD &&
2900 isa<ConstantSDNode>(Offset))
2901 return false;
2902 }
2903
2904 AM = ISD::PRE_INC;
2905 return true;
2906}
2907
2908//===----------------------------------------------------------------------===//
2909// LowerOperation implementation
2910//===----------------------------------------------------------------------===//
2911
2912/// Return true if we should reference labels using a PICBase, set the HiOpFlags
2913/// and LoOpFlags to the target MO flags.
2914static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2915 unsigned &HiOpFlags, unsigned &LoOpFlags,
2916 const GlobalValue *GV = nullptr) {
2917 HiOpFlags = PPCII::MO_HA;
2918 LoOpFlags = PPCII::MO_LO;
2919
2920 // Don't use the pic base if not in PIC relocation model.
2921 if (IsPIC) {
2922 HiOpFlags |= PPCII::MO_PIC_FLAG;
2923 LoOpFlags |= PPCII::MO_PIC_FLAG;
2924 }
2925}
2926
2927static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2928 SelectionDAG &DAG) {
2929 SDLoc DL(HiPart);
2930 EVT PtrVT = HiPart.getValueType();
2931 SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2932
2933 SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2934 SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2935
2936 // With PIC, the first instruction is actually "GR+hi(&G)".
2937 if (isPIC)
2938 Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2939 DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2940
2941 // Generate non-pic code that has direct accesses to the constant pool.
2942 // The address of the global is just (hi(&g)+lo(&g)).
2943 return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2944}
2945
2946static void setUsesTOCBasePtr(MachineFunction &MF) {
2947 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2948 FuncInfo->setUsesTOCBasePtr();
2949}
2950
2951static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2952 setUsesTOCBasePtr(DAG.getMachineFunction());
2953}
2954
2955SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2956 SDValue GA) const {
2957 const bool Is64Bit = Subtarget.isPPC64();
2958 EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2959 SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2960 : Subtarget.isAIXABI()
2961 ? DAG.getRegister(PPC::R2, VT)
2962 : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2963 SDValue Ops[] = { GA, Reg };
2964 return DAG.getMemIntrinsicNode(
2965 PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2966 MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2967 MachineMemOperand::MOLoad);
2968}
2969
2970SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2971 SelectionDAG &DAG) const {
2972 EVT PtrVT = Op.getValueType();
2973 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2974 const Constant *C = CP->getConstVal();
2975
2976 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2977 // The actual address of the GlobalValue is stored in the TOC.
2978 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2979 if (Subtarget.isUsingPCRelativeCalls()) {
2980 SDLoc DL(CP);
2981 EVT Ty = getPointerTy(DAG.getDataLayout());
2982 SDValue ConstPool = DAG.getTargetConstantPool(
2983 C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2984 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2985 }
2986 setUsesTOCBasePtr(DAG);
2987 SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2988 return getTOCEntry(DAG, SDLoc(CP), GA);
2989 }
2990
2991 unsigned MOHiFlag, MOLoFlag;
2992 bool IsPIC = isPositionIndependent();
2993 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2994
2995 if (IsPIC && Subtarget.isSVR4ABI()) {
2996 SDValue GA =
2997 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2998 return getTOCEntry(DAG, SDLoc(CP), GA);
2999 }
3000
3001 SDValue CPIHi =
3002 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
3003 SDValue CPILo =
3004 DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
3005 return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
3006}
3007
3008// For 64-bit PowerPC, prefer the more compact relative encodings.
3009// This trades 32 bits per jump table entry for one or two instructions
3010// on the jump site.
3011unsigned PPCTargetLowering::getJumpTableEncoding() const {
3012 if (isJumpTableRelative())
3013 return MachineJumpTableInfo::EK_LabelDifference32;
3014
3015 return TargetLowering::getJumpTableEncoding();
3016}
3017
3018bool PPCTargetLowering::isJumpTableRelative() const {
3019 if (UseAbsoluteJumpTables)
3020 return false;
3021 if (Subtarget.isPPC64() || Subtarget.isAIXABI())
3022 return true;
3023 return TargetLowering::isJumpTableRelative();
3024}
3025
3026SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3027 SelectionDAG &DAG) const {
3028 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3029 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3030
3031 switch (getTargetMachine().getCodeModel()) {
3032 case CodeModel::Small:
3033 case CodeModel::Medium:
3034 return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3035 default:
3036 return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
3037 getPointerTy(DAG.getDataLayout()));
3038 }
3039}
3040
3041const MCExpr *
3042PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3043 unsigned JTI,
3044 MCContext &Ctx) const {
3045 if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3046 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3047
3048 switch (getTargetMachine().getCodeModel()) {
3049 case CodeModel::Small:
3050 case CodeModel::Medium:
3051 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3052 default:
3053 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
3054 }
3055}
3056
3057SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
3058 EVT PtrVT = Op.getValueType();
3059 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3060
3061 // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3062 if (Subtarget.isUsingPCRelativeCalls()) {
3063 SDLoc DL(JT);
3064 EVT Ty = getPointerTy(DAG.getDataLayout());
3065 SDValue GA =
3066 DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
3067 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3068 return MatAddr;
3069 }
3070
3071 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3072 // The actual address of the GlobalValue is stored in the TOC.
3073 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3074 setUsesTOCBasePtr(DAG);
3075 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3076 return getTOCEntry(DAG, SDLoc(JT), GA);
3077 }
3078
3079 unsigned MOHiFlag, MOLoFlag;
3080 bool IsPIC = isPositionIndependent();
3081 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3082
3083 if (IsPIC && Subtarget.isSVR4ABI()) {
3084 SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3085 PPCII::MO_PIC_FLAG);
3086 return getTOCEntry(DAG, SDLoc(GA), GA);
3087 }
3088
3089 SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3090 SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3091 return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3092}
3093
3094SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3095 SelectionDAG &DAG) const {
3096 EVT PtrVT = Op.getValueType();
3097 BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3098 const BlockAddress *BA = BASDN->getBlockAddress();
3099
3100 // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3101 if (Subtarget.isUsingPCRelativeCalls()) {
3102 SDLoc DL(BASDN);
3103 EVT Ty = getPointerTy(DAG.getDataLayout());
3104 SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3105 PPCII::MO_PCREL_FLAG);
3106 SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3107 return MatAddr;
3108 }
3109
3110 // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3111 // The actual BlockAddress is stored in the TOC.
3112 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3113 setUsesTOCBasePtr(DAG);
3114 SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3115 return getTOCEntry(DAG, SDLoc(BASDN), GA);
3116 }
3117
3118 // 32-bit position-independent ELF stores the BlockAddress in the .got.
3119 if (Subtarget.is32BitELFABI() && isPositionIndependent())
3120 return getTOCEntry(
3121 DAG, SDLoc(BASDN),
3122 DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3123
3124 unsigned MOHiFlag, MOLoFlag;
3125 bool IsPIC = isPositionIndependent();
3126 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3127 SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3128 SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3129 return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3130}
3131
3132SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3133 SelectionDAG &DAG) const {
3134 if (Subtarget.isAIXABI())
3135 return LowerGlobalTLSAddressAIX(Op, DAG);
3136
3137 return LowerGlobalTLSAddressLinux(Op, DAG);
3138}
3139
3140SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
3141 SelectionDAG &DAG) const {
3142 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3143
3144 if (DAG.getTarget().useEmulatedTLS())
3145 report_fatal_error("Emulated TLS is not yet supported on AIX");
3146
3147 SDLoc dl(GA);
3148 const GlobalValue *GV = GA->getGlobal();
3149 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3150
3151 // The general-dynamic model is the only access model supported for now, so
3152 // all the GlobalTLSAddress nodes are lowered with this model.
3153 // We need to generate two TOC entries, one for the variable offset, one for
3154 // the region handle. The global address for the TOC entry of the region
3155 // handle is created with the MO_TLSGD_FLAG flag so we can easily identify
3156 // this entry and add the right relocation.
3157 SDValue VariableOffsetTGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3158 SDValue RegionHandleTGA =
3159 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGD_FLAG);
3160 SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3161 SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
3162 return DAG.getNode(PPCISD::TLSGD_AIX, dl, PtrVT, VariableOffset,
3163 RegionHandle);
3164}
3165
3166SDValue PPCTargetLowering::LowerGlobalTLSAddressLinux(SDValue Op,
3167 SelectionDAG &DAG) const {
3168 // FIXME: TLS addresses currently use medium model code sequences,
3169 // which is the most useful form. Eventually support for small and
3170 // large models could be added if users need it, at the cost of
3171 // additional complexity.
3172 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3173 if (DAG.getTarget().useEmulatedTLS())
3174 return LowerToTLSEmulatedModel(GA, DAG);
3175
3176 SDLoc dl(GA);
3177 const GlobalValue *GV = GA->getGlobal();
3178 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3179 bool is64bit = Subtarget.isPPC64();
3180 const Module *M = DAG.getMachineFunction().getFunction().getParent();
3181 PICLevel::Level picLevel = M->getPICLevel();
3182
3183 const TargetMachine &TM = getTargetMachine();
3184 TLSModel::Model Model = TM.getTLSModel(GV);
3185
3186 if (Model == TLSModel::LocalExec) {
3187 if (Subtarget.isUsingPCRelativeCalls()) {
3188 SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3189 SDValue TGA = DAG.getTargetGlobalAddress(
3190 GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG));
3191 SDValue MatAddr =
3192 DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
3193 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
3194 }
3195
3196 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3197 PPCII::MO_TPREL_HA);
3198 SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3199 PPCII::MO_TPREL_LO);
3200 SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3201 : DAG.getRegister(PPC::R2, MVT::i32);
3202
3203 SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3204 return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3205 }
3206
3207 if (Model == TLSModel::InitialExec) {
3208 bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3209 SDValue TGA = DAG.getTargetGlobalAddress(
3210 GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3211 SDValue TGATLS = DAG.getTargetGlobalAddress(
3212 GV, dl, PtrVT, 0,
3213 IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
3214 SDValue TPOffset;
3215 if (IsPCRel) {
3216 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3217 TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3218 MachinePointerInfo());
3219 } else {
3220 SDValue GOTPtr;
3221 if (is64bit) {
3222 setUsesTOCBasePtr(DAG);
3223 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3224 GOTPtr =
3225 DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3226 } else {
3227 if (!TM.isPositionIndependent())
3228 GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3229 else if (picLevel == PICLevel::SmallPIC)
3230 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3231 else
3232 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3233 }
3234 TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3235 }
3236 return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3237 }
3238
3239 if (Model == TLSModel::GeneralDynamic) {
3240 if (Subtarget.isUsingPCRelativeCalls()) {
3241 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3242 PPCII::MO_GOT_TLSGD_PCREL_FLAG);
3243 return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3244 }
3245
3246 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3247 SDValue GOTPtr;
3248 if (is64bit) {
3249 setUsesTOCBasePtr(DAG);
3250 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3251 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3252 GOTReg, TGA);
3253 } else {
3254 if (picLevel == PICLevel::SmallPIC)
3255 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3256 else
3257 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3258 }
3259 return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3260 GOTPtr, TGA, TGA);
3261 }
3262
3263 if (Model == TLSModel::LocalDynamic) {
3264 if (Subtarget.isUsingPCRelativeCalls()) {
3265 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3266 PPCII::MO_GOT_TLSLD_PCREL_FLAG);
3267 SDValue MatPCRel =
3268 DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3269 return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA);
3270 }
3271
3272 SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3273 SDValue GOTPtr;
3274 if (is64bit) {
3275 setUsesTOCBasePtr(DAG);
3276 SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3277 GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3278 GOTReg, TGA);
3279 } else {
3280 if (picLevel == PICLevel::SmallPIC)
3281 GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3282 else
3283 GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3284 }
3285 SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3286 PtrVT, GOTPtr, TGA, TGA);
3287 SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3288 PtrVT, TLSAddr, TGA);
3289 return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3290 }
3291
3292 llvm_unreachable("Unknown TLS model!")::llvm::llvm_unreachable_internal("Unknown TLS model!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3292)
;
3293}
3294
3295SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3296 SelectionDAG &DAG) const {
3297 EVT PtrVT = Op.getValueType();
3298 GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3299 SDLoc DL(GSDN);
3300 const GlobalValue *GV = GSDN->getGlobal();
3301
3302 // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3303 // The actual address of the GlobalValue is stored in the TOC.
3304 if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3305 if (Subtarget.isUsingPCRelativeCalls()) {
3306 EVT Ty = getPointerTy(DAG.getDataLayout());
3307 if (isAccessedAsGotIndirect(Op)) {
3308 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3309 PPCII::MO_PCREL_FLAG |
3310 PPCII::MO_GOT_FLAG);
3311 SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3312 SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3313 MachinePointerInfo());
3314 return Load;
3315 } else {
3316 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3317 PPCII::MO_PCREL_FLAG);
3318 return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3319 }
3320 }
3321 setUsesTOCBasePtr(DAG);
3322 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3323 return getTOCEntry(DAG, DL, GA);
3324 }
3325
3326 unsigned MOHiFlag, MOLoFlag;
3327 bool IsPIC = isPositionIndependent();
3328 getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3329
3330 if (IsPIC && Subtarget.isSVR4ABI()) {
3331 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3332 GSDN->getOffset(),
3333 PPCII::MO_PIC_FLAG);
3334 return getTOCEntry(DAG, DL, GA);
3335 }
3336
3337 SDValue GAHi =
3338 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3339 SDValue GALo =
3340 DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3341
3342 return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3343}
3344
3345SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3346 bool IsStrict = Op->isStrictFPOpcode();
3347 ISD::CondCode CC =
3348 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
3349 SDValue LHS = Op.getOperand(IsStrict ? 1 : 0);
3350 SDValue RHS = Op.getOperand(IsStrict ? 2 : 1);
3351 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
3352 EVT LHSVT = LHS.getValueType();
3353 SDLoc dl(Op);
3354
3355 // Soften the setcc with libcall if it is fp128.
3356 if (LHSVT == MVT::f128) {
3357 assert(!Subtarget.hasP9Vector() &&((!Subtarget.hasP9Vector() && "SETCC for f128 is already legal under Power9!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.hasP9Vector() && \"SETCC for f128 is already legal under Power9!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3358, __PRETTY_FUNCTION__))
3358 "SETCC for f128 is already legal under Power9!")((!Subtarget.hasP9Vector() && "SETCC for f128 is already legal under Power9!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.hasP9Vector() && \"SETCC for f128 is already legal under Power9!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3358, __PRETTY_FUNCTION__))
;
3359 softenSetCCOperands(DAG, LHSVT, LHS, RHS, CC, dl, LHS, RHS, Chain,
3360 Op->getOpcode() == ISD::STRICT_FSETCCS);
3361 if (RHS.getNode())
3362 LHS = DAG.getNode(ISD::SETCC, dl, Op.getValueType(), LHS, RHS,
3363 DAG.getCondCode(CC));
3364 if (IsStrict)
3365 return DAG.getMergeValues({LHS, Chain}, dl);
3366 return LHS;
3367 }
3368
3369 assert(!IsStrict && "Don't know how to handle STRICT_FSETCC!")((!IsStrict && "Don't know how to handle STRICT_FSETCC!"
) ? static_cast<void> (0) : __assert_fail ("!IsStrict && \"Don't know how to handle STRICT_FSETCC!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3369, __PRETTY_FUNCTION__))
;
3370
3371 if (Op.getValueType() == MVT::v2i64) {
3372 // When the operands themselves are v2i64 values, we need to do something
3373 // special because VSX has no underlying comparison operations for these.
3374 if (LHS.getValueType() == MVT::v2i64) {
3375 // Equality can be handled by casting to the legal type for Altivec
3376 // comparisons, everything else needs to be expanded.
3377 if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3378 return DAG.getNode(
3379 ISD::BITCAST, dl, MVT::v2i64,
3380 DAG.getSetCC(dl, MVT::v4i32,
3381 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, LHS),
3382 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, RHS), CC));
3383 }
3384
3385 return SDValue();
3386 }
3387
3388 // We handle most of these in the usual way.
3389 return Op;
3390 }
3391
3392 // If we're comparing for equality to zero, expose the fact that this is
3393 // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3394 // fold the new nodes.
3395 if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3396 return V;
3397
3398 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
3399 // Leave comparisons against 0 and -1 alone for now, since they're usually
3400 // optimized. FIXME: revisit this when we can custom lower all setcc
3401 // optimizations.
3402 if (C->isAllOnesValue() || C->isNullValue())
3403 return SDValue();
3404 }
3405
3406 // If we have an integer seteq/setne, turn it into a compare against zero
3407 // by xor'ing the rhs with the lhs, which is faster than setting a
3408 // condition register, reading it back out, and masking the correct bit. The
3409 // normal approach here uses sub to do this instead of xor. Using xor exposes
3410 // the result to other bit-twiddling opportunities.
3411 if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3412 EVT VT = Op.getValueType();
3413 SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, LHS, RHS);
3414 return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3415 }
3416 return SDValue();
3417}
3418
3419SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3420 SDNode *Node = Op.getNode();
3421 EVT VT = Node->getValueType(0);
3422 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3423 SDValue InChain = Node->getOperand(0);
3424 SDValue VAListPtr = Node->getOperand(1);
3425 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3426 SDLoc dl(Node);
3427
3428 assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only")((!Subtarget.isPPC64() && "LowerVAARG is PPC32 only")
? static_cast<void> (0) : __assert_fail ("!Subtarget.isPPC64() && \"LowerVAARG is PPC32 only\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3428, __PRETTY_FUNCTION__))
;
3429
3430 // gpr_index
3431 SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3432 VAListPtr, MachinePointerInfo(SV), MVT::i8);
3433 InChain = GprIndex.getValue(1);
3434
3435 if (VT == MVT::i64) {
3436 // Check if GprIndex is even
3437 SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3438 DAG.getConstant(1, dl, MVT::i32));
3439 SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3440 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3441 SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3442 DAG.getConstant(1, dl, MVT::i32));
3443 // Align GprIndex to be even if it isn't
3444 GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3445 GprIndex);
3446 }
3447
3448 // fpr index is 1 byte after gpr
3449 SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3450 DAG.getConstant(1, dl, MVT::i32));
3451
3452 // fpr
3453 SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3454 FprPtr, MachinePointerInfo(SV), MVT::i8);
3455 InChain = FprIndex.getValue(1);
3456
3457 SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3458 DAG.getConstant(8, dl, MVT::i32));
3459
3460 SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3461 DAG.getConstant(4, dl, MVT::i32));
3462
3463 // areas
3464 SDValue OverflowArea =
3465 DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3466 InChain = OverflowArea.getValue(1);
3467
3468 SDValue RegSaveArea =
3469 DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3470 InChain = RegSaveArea.getValue(1);
3471
3472 // select overflow_area if index > 8
3473 SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3474 DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3475
3476 // adjustment constant gpr_index * 4/8
3477 SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3478 VT.isInteger() ? GprIndex : FprIndex,
3479 DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3480 MVT::i32));
3481
3482 // OurReg = RegSaveArea + RegConstant
3483 SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3484 RegConstant);
3485
3486 // Floating types are 32 bytes into RegSaveArea
3487 if (VT.isFloatingPoint())
3488 OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3489 DAG.getConstant(32, dl, MVT::i32));
3490
3491 // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3492 SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3493 VT.isInteger() ? GprIndex : FprIndex,
3494 DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3495 MVT::i32));
3496
3497 InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3498 VT.isInteger() ? VAListPtr : FprPtr,
3499 MachinePointerInfo(SV), MVT::i8);
3500
3501 // determine if we should load from reg_save_area or overflow_area
3502 SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3503
3504 // increase overflow_area by 4/8 if gpr/fpr > 8
3505 SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3506 DAG.getConstant(VT.isInteger() ? 4 : 8,
3507 dl, MVT::i32));
3508
3509 OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3510 OverflowAreaPlusN);
3511
3512 InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3513 MachinePointerInfo(), MVT::i32);
3514
3515 return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3516}
3517
3518SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3519 assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only")((!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.isPPC64() && \"LowerVACOPY is PPC32 only\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3519, __PRETTY_FUNCTION__))
;
3520
3521 // We have to copy the entire va_list struct:
3522 // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3523 return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3524 DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3525 false, true, false, MachinePointerInfo(),
3526 MachinePointerInfo());
3527}
3528
3529SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3530 SelectionDAG &DAG) const {
3531 if (Subtarget.isAIXABI())
3532 report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3533
3534 return Op.getOperand(0);
3535}
3536
3537SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3538 SelectionDAG &DAG) const {
3539 if (Subtarget.isAIXABI())
3540 report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3541
3542 SDValue Chain = Op.getOperand(0);
3543 SDValue Trmp = Op.getOperand(1); // trampoline
3544 SDValue FPtr = Op.getOperand(2); // nested function
3545 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3546 SDLoc dl(Op);
3547
3548 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3549 bool isPPC64 = (PtrVT == MVT::i64);
3550 Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3551
3552 TargetLowering::ArgListTy Args;
3553 TargetLowering::ArgListEntry Entry;
3554
3555 Entry.Ty = IntPtrTy;
3556 Entry.Node = Trmp; Args.push_back(Entry);
3557
3558 // TrampSize == (isPPC64 ? 48 : 40);
3559 Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3560 isPPC64 ? MVT::i64 : MVT::i32);
3561 Args.push_back(Entry);
3562
3563 Entry.Node = FPtr; Args.push_back(Entry);
3564 Entry.Node = Nest; Args.push_back(Entry);
3565
3566 // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3567 TargetLowering::CallLoweringInfo CLI(DAG);
3568 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3569 CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3570 DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3571
3572 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3573 return CallResult.second;
3574}
3575
3576SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3577 MachineFunction &MF = DAG.getMachineFunction();
3578 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3579 EVT PtrVT = getPointerTy(MF.getDataLayout());
3580
3581 SDLoc dl(Op);
3582
3583 if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3584 // vastart just stores the address of the VarArgsFrameIndex slot into the
3585 // memory location argument.
3586 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3587 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3588 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3589 MachinePointerInfo(SV));
3590 }
3591
3592 // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3593 // We suppose the given va_list is already allocated.
3594 //
3595 // typedef struct {
3596 // char gpr; /* index into the array of 8 GPRs
3597 // * stored in the register save area
3598 // * gpr=0 corresponds to r3,
3599 // * gpr=1 to r4, etc.
3600 // */
3601 // char fpr; /* index into the array of 8 FPRs
3602 // * stored in the register save area
3603 // * fpr=0 corresponds to f1,
3604 // * fpr=1 to f2, etc.
3605 // */
3606 // char *overflow_arg_area;
3607 // /* location on stack that holds
3608 // * the next overflow argument
3609 // */
3610 // char *reg_save_area;
3611 // /* where r3:r10 and f1:f8 (if saved)
3612 // * are stored
3613 // */
3614 // } va_list[1];
3615
3616 SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3617 SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3618 SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3619 PtrVT);
3620 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3621 PtrVT);
3622
3623 uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3624 SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3625
3626 uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3627 SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3628
3629 uint64_t FPROffset = 1;
3630 SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3631
3632 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3633
3634 // Store first byte : number of int regs
3635 SDValue firstStore =
3636 DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3637 MachinePointerInfo(SV), MVT::i8);
3638 uint64_t nextOffset = FPROffset;
3639 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3640 ConstFPROffset);
3641
3642 // Store second byte : number of float regs
3643 SDValue secondStore =
3644 DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3645 MachinePointerInfo(SV, nextOffset), MVT::i8);
3646 nextOffset += StackOffset;
3647 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3648
3649 // Store second word : arguments given on stack
3650 SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3651 MachinePointerInfo(SV, nextOffset));
3652 nextOffset += FrameOffset;
3653 nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3654
3655 // Store third word : arguments given in registers
3656 return DAG.getStore(thirdStore, dl, FR, nextPtr,
3657 MachinePointerInfo(SV, nextOffset));
3658}
3659
3660/// FPR - The set of FP registers that should be allocated for arguments
3661/// on Darwin and AIX.
3662static const MCPhysReg FPR[] = {PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5,
3663 PPC::F6, PPC::F7, PPC::F8, PPC::F9, PPC::F10,
3664 PPC::F11, PPC::F12, PPC::F13};
3665
3666/// CalculateStackSlotSize - Calculates the size reserved for this argument on
3667/// the stack.
3668static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3669 unsigned PtrByteSize) {
3670 unsigned ArgSize = ArgVT.getStoreSize();
3671 if (Flags.isByVal())
3672 ArgSize = Flags.getByValSize();
3673
3674 // Round up to multiples of the pointer size, except for array members,
3675 // which are always packed.
3676 if (!Flags.isInConsecutiveRegs())
3677 ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3678
3679 return ArgSize;
3680}
3681
3682/// CalculateStackSlotAlignment - Calculates the alignment of this argument
3683/// on the stack.
3684static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3685 ISD::ArgFlagsTy Flags,
3686 unsigned PtrByteSize) {
3687 Align Alignment(PtrByteSize);
3688
3689 // Altivec parameters are padded to a 16 byte boundary.
3690 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3691 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3692 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3693 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3694 Alignment = Align(16);
3695
3696 // ByVal parameters are aligned as requested.
3697 if (Flags.isByVal()) {
3698 auto BVAlign = Flags.getNonZeroByValAlign();
3699 if (BVAlign > PtrByteSize) {
3700 if (BVAlign.value() % PtrByteSize != 0)
3701 llvm_unreachable(::llvm::llvm_unreachable_internal("ByVal alignment is not a multiple of the pointer size"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3702)
3702 "ByVal alignment is not a multiple of the pointer size")::llvm::llvm_unreachable_internal("ByVal alignment is not a multiple of the pointer size"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3702)
;
3703
3704 Alignment = BVAlign;
3705 }
3706 }
3707
3708 // Array members are always packed to their original alignment.
3709 if (Flags.isInConsecutiveRegs()) {
3710 // If the array member was split into multiple registers, the first
3711 // needs to be aligned to the size of the full type. (Except for
3712 // ppcf128, which is only aligned as its f64 components.)
3713 if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3714 Alignment = Align(OrigVT.getStoreSize());
3715 else
3716 Alignment = Align(ArgVT.getStoreSize());
3717 }
3718
3719 return Alignment;
3720}
3721
3722/// CalculateStackSlotUsed - Return whether this argument will use its
3723/// stack slot (instead of being passed in registers). ArgOffset,
3724/// AvailableFPRs, and AvailableVRs must hold the current argument
3725/// position, and will be updated to account for this argument.
3726static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
3727 unsigned PtrByteSize, unsigned LinkageSize,
3728 unsigned ParamAreaSize, unsigned &ArgOffset,
3729 unsigned &AvailableFPRs,
3730 unsigned &AvailableVRs) {
3731 bool UseMemory = false;
3732
3733 // Respect alignment of argument on the stack.
3734 Align Alignment =
3735 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3736 ArgOffset = alignTo(ArgOffset, Alignment);
3737 // If there's no space left in the argument save area, we must
3738 // use memory (this check also catches zero-sized arguments).
3739 if (ArgOffset >= LinkageSize + ParamAreaSize)
3740 UseMemory = true;
3741
3742 // Allocate argument on the stack.
3743 ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3744 if (Flags.isInConsecutiveRegsLast())
3745 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3746 // If we overran the argument save area, we must use memory
3747 // (this check catches arguments passed partially in memory)
3748 if (ArgOffset > LinkageSize + ParamAreaSize)
3749 UseMemory = true;
3750
3751 // However, if the argument is actually passed in an FPR or a VR,
3752 // we don't use memory after all.
3753 if (!Flags.isByVal()) {
3754 if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3755 if (AvailableFPRs > 0) {
3756 --AvailableFPRs;
3757 return false;
3758 }
3759 if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3760 ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3761 ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3762 ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3763 if (AvailableVRs > 0) {
3764 --AvailableVRs;
3765 return false;
3766 }
3767 }
3768
3769 return UseMemory;
3770}
3771
3772/// EnsureStackAlignment - Round stack frame size up from NumBytes to
3773/// ensure minimum alignment required for target.
3774static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3775 unsigned NumBytes) {
3776 return alignTo(NumBytes, Lowering->getStackAlign());
3777}
3778
3779SDValue PPCTargetLowering::LowerFormalArguments(
3780 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3781 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3782 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3783 if (Subtarget.isAIXABI())
3784 return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3785 InVals);
3786 if (Subtarget.is64BitELFABI())
3787 return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3788 InVals);
3789 assert(Subtarget.is32BitELFABI())((Subtarget.is32BitELFABI()) ? static_cast<void> (0) : __assert_fail
("Subtarget.is32BitELFABI()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3789, __PRETTY_FUNCTION__))
;
3790 return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3791 InVals);
3792}
3793
3794SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3795 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3796 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3797 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3798
3799 // 32-bit SVR4 ABI Stack Frame Layout:
3800 // +-----------------------------------+
3801 // +--> | Back chain |
3802 // | +-----------------------------------+
3803 // | | Floating-point register save area |
3804 // | +-----------------------------------+
3805 // | | General register save area |
3806 // | +-----------------------------------+
3807 // | | CR save word |
3808 // | +-----------------------------------+
3809 // | | VRSAVE save word |
3810 // | +-----------------------------------+
3811 // | | Alignment padding |
3812 // | +-----------------------------------+
3813 // | | Vector register save area |
3814 // | +-----------------------------------+
3815 // | | Local variable space |
3816 // | +-----------------------------------+
3817 // | | Parameter list area |
3818 // | +-----------------------------------+
3819 // | | LR save word |
3820 // | +-----------------------------------+
3821 // SP--> +--- | Back chain |
3822 // +-----------------------------------+
3823 //
3824 // Specifications:
3825 // System V Application Binary Interface PowerPC Processor Supplement
3826 // AltiVec Technology Programming Interface Manual
3827
3828 MachineFunction &MF = DAG.getMachineFunction();
3829 MachineFrameInfo &MFI = MF.getFrameInfo();
3830 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3831
3832 EVT PtrVT = getPointerTy(MF.getDataLayout());
3833 // Potential tail calls could cause overwriting of argument stack slots.
3834 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3835 (CallConv == CallingConv::Fast));
3836 const Align PtrAlign(4);
3837
3838 // Assign locations to all of the incoming arguments.
3839 SmallVector<CCValAssign, 16> ArgLocs;
3840 PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3841 *DAG.getContext());
3842
3843 // Reserve space for the linkage area on the stack.
3844 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3845 CCInfo.AllocateStack(LinkageSize, PtrAlign);
3846 if (useSoftFloat())
3847 CCInfo.PreAnalyzeFormalArguments(Ins);
3848
3849 CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3850 CCInfo.clearWasPPCF128();
3851
3852 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3853 CCValAssign &VA = ArgLocs[i];
3854
3855 // Arguments stored in registers.
3856 if (VA.isRegLoc()) {
3857 const TargetRegisterClass *RC;
3858 EVT ValVT = VA.getValVT();
3859
3860 switch (ValVT.getSimpleVT().SimpleTy) {
3861 default:
3862 llvm_unreachable("ValVT not supported by formal arguments Lowering")::llvm::llvm_unreachable_internal("ValVT not supported by formal arguments Lowering"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3862)
;
3863 case MVT::i1:
3864 case MVT::i32:
3865 RC = &PPC::GPRCRegClass;
3866 break;
3867 case MVT::f32:
3868 if (Subtarget.hasP8Vector())
3869 RC = &PPC::VSSRCRegClass;
3870 else if (Subtarget.hasSPE())
3871 RC = &PPC::GPRCRegClass;
3872 else
3873 RC = &PPC::F4RCRegClass;
3874 break;
3875 case MVT::f64:
3876 if (Subtarget.hasVSX())
3877 RC = &PPC::VSFRCRegClass;
3878 else if (Subtarget.hasSPE())
3879 // SPE passes doubles in GPR pairs.
3880 RC = &PPC::GPRCRegClass;
3881 else
3882 RC = &PPC::F8RCRegClass;
3883 break;
3884 case MVT::v16i8:
3885 case MVT::v8i16:
3886 case MVT::v4i32:
3887 RC = &PPC::VRRCRegClass;
3888 break;
3889 case MVT::v4f32:
3890 RC = &PPC::VRRCRegClass;
3891 break;
3892 case MVT::v2f64:
3893 case MVT::v2i64:
3894 RC = &PPC::VRRCRegClass;
3895 break;
3896 }
3897
3898 SDValue ArgValue;
3899 // Transform the arguments stored in physical registers into
3900 // virtual ones.
3901 if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3902 assert(i + 1 < e && "No second half of double precision argument")((i + 1 < e && "No second half of double precision argument"
) ? static_cast<void> (0) : __assert_fail ("i + 1 < e && \"No second half of double precision argument\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3902, __PRETTY_FUNCTION__))
;
3903 unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3904 unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3905 SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3906 SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3907 if (!Subtarget.isLittleEndian())
3908 std::swap (ArgValueLo, ArgValueHi);
3909 ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3910 ArgValueHi);
3911 } else {
3912 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3913 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3914 ValVT == MVT::i1 ? MVT::i32 : ValVT);
3915 if (ValVT == MVT::i1)
3916 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3917 }
3918
3919 InVals.push_back(ArgValue);
3920 } else {
3921 // Argument stored in memory.
3922 assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail
("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 3922, __PRETTY_FUNCTION__))
;
3923
3924 // Get the extended size of the argument type in stack
3925 unsigned ArgSize = VA.getLocVT().getStoreSize();
3926 // Get the actual size of the argument type
3927 unsigned ObjSize = VA.getValVT().getStoreSize();
3928 unsigned ArgOffset = VA.getLocMemOffset();
3929 // Stack objects in PPC32 are right justified.
3930 ArgOffset += ArgSize - ObjSize;
3931 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3932
3933 // Create load nodes to retrieve arguments from the stack.
3934 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3935 InVals.push_back(
3936 DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3937 }
3938 }
3939
3940 // Assign locations to all of the incoming aggregate by value arguments.
3941 // Aggregates passed by value are stored in the local variable space of the
3942 // caller's stack frame, right above the parameter list area.
3943 SmallVector<CCValAssign, 16> ByValArgLocs;
3944 CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3945 ByValArgLocs, *DAG.getContext());
3946
3947 // Reserve stack space for the allocations in CCInfo.
3948 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3949
3950 CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3951
3952 // Area that is at least reserved in the caller of this function.
3953 unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3954 MinReservedArea = std::max(MinReservedArea, LinkageSize);
3955
3956 // Set the size that is at least reserved in caller of this function. Tail
3957 // call optimized function's reserved stack space needs to be aligned so that
3958 // taking the difference between two stack areas will result in an aligned
3959 // stack.
3960 MinReservedArea =
3961 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3962 FuncInfo->setMinReservedArea(MinReservedArea);
3963
3964 SmallVector<SDValue, 8> MemOps;
3965
3966 // If the function takes variable number of arguments, make a frame index for
3967 // the start of the first vararg value... for expansion of llvm.va_start.
3968 if (isVarArg) {
3969 static const MCPhysReg GPArgRegs[] = {
3970 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3971 PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3972 };
3973 const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3974
3975 static const MCPhysReg FPArgRegs[] = {
3976 PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3977 PPC::F8
3978 };
3979 unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3980
3981 if (useSoftFloat() || hasSPE())
3982 NumFPArgRegs = 0;
3983
3984 FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3985 FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3986
3987 // Make room for NumGPArgRegs and NumFPArgRegs.
3988 int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3989 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3990
3991 FuncInfo->setVarArgsStackOffset(
3992 MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3993 CCInfo.getNextStackOffset(), true));
3994
3995 FuncInfo->setVarArgsFrameIndex(
3996 MFI.CreateStackObject(Depth, Align(8), false));
3997 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3998
3999 // The fixed integer arguments of a variadic function are stored to the
4000 // VarArgsFrameIndex on the stack so that they may be loaded by
4001 // dereferencing the result of va_next.
4002 for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
4003 // Get an existing live-in vreg, or add a new one.
4004 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
4005 if (!VReg)
4006 VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
4007
4008 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4009 SDValue Store =
4010 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4011 MemOps.push_back(Store);
4012 // Increment the address by four for the next argument to store
4013 SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4014 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4015 }
4016
4017 // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
4018 // is set.
4019 // The double arguments are stored to the VarArgsFrameIndex
4020 // on the stack.
4021 for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
4022 // Get an existing live-in vreg, or add a new one.
4023 unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
4024 if (!VReg)
4025 VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
4026
4027 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
4028 SDValue Store =
4029 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4030 MemOps.push_back(Store);
4031 // Increment the address by eight for the next argument to store
4032 SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
4033 PtrVT);
4034 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4035 }
4036 }
4037
4038 if (!MemOps.empty())
4039 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4040
4041 return Chain;
4042}
4043
4044// PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4045// value to MVT::i64 and then truncate to the correct register size.
4046SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
4047 EVT ObjectVT, SelectionDAG &DAG,
4048 SDValue ArgVal,
4049 const SDLoc &dl) const {
4050 if (Flags.isSExt())
4051 ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
4052 DAG.getValueType(ObjectVT));
4053 else if (Flags.isZExt())
4054 ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
4055 DAG.getValueType(ObjectVT));
4056
4057 return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
4058}
4059
4060SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
4061 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4062 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4063 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4064 // TODO: add description of PPC stack frame format, or at least some docs.
4065 //
4066 bool isELFv2ABI = Subtarget.isELFv2ABI();
4067 bool isLittleEndian = Subtarget.isLittleEndian();
4068 MachineFunction &MF = DAG.getMachineFunction();
4069 MachineFrameInfo &MFI = MF.getFrameInfo();
4070 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4071
4072 assert(!(CallConv == CallingConv::Fast && isVarArg) &&((!(CallConv == CallingConv::Fast && isVarArg) &&
"fastcc not supported on varargs functions") ? static_cast<
void> (0) : __assert_fail ("!(CallConv == CallingConv::Fast && isVarArg) && \"fastcc not supported on varargs functions\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 4073, __PRETTY_FUNCTION__))
4073 "fastcc not supported on varargs functions")((!(CallConv == CallingConv::Fast && isVarArg) &&
"fastcc not supported on varargs functions") ? static_cast<
void> (0) : __assert_fail ("!(CallConv == CallingConv::Fast && isVarArg) && \"fastcc not supported on varargs functions\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 4073, __PRETTY_FUNCTION__))
;
4074
4075 EVT PtrVT = getPointerTy(MF.getDataLayout());
4076 // Potential tail calls could cause overwriting of argument stack slots.
4077 bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4078 (CallConv == CallingConv::Fast));
4079 unsigned PtrByteSize = 8;
4080 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4081
4082 static const MCPhysReg GPR[] = {
4083 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4084 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4085 };
4086 static const MCPhysReg VR[] = {
4087 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4088 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4089 };
4090
4091 const unsigned Num_GPR_Regs = array_lengthof(GPR);
4092 const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4093 const unsigned Num_VR_Regs = array_lengthof(VR);
4094
4095 // Do a first pass over the arguments to determine whether the ABI
4096 // guarantees that our caller has allocated the parameter save area
4097 // on its stack frame. In the ELFv1 ABI, this is always the case;
4098 // in the ELFv2 ABI, it is true if this is a vararg function or if
4099 // any parameter is located in a stack slot.
4100
4101 bool HasParameterArea = !isELFv2ABI || isVarArg;
4102 unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
4103 unsigned NumBytes = LinkageSize;
4104 unsigned AvailableFPRs = Num_FPR_Regs;
4105 unsigned AvailableVRs = Num_VR_Regs;
4106 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4107 if (Ins[i].Flags.isNest())
4108 continue;
4109
4110 if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
4111 PtrByteSize, LinkageSize, ParamAreaSize,
4112 NumBytes, AvailableFPRs, AvailableVRs))
4113 HasParameterArea = true;
4114 }
4115
4116 // Add DAG nodes to load the arguments or copy them out of registers. On
4117 // entry to a function on PPC, the arguments start after the linkage area,
4118 // although the first ones are often in registers.
4119
4120 unsigned ArgOffset = LinkageSize;
4121 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4122 SmallVector<SDValue, 8> MemOps;
4123 Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4124 unsigned CurArgIdx = 0;
4125 for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4126 SDValue ArgVal;
4127 bool needsLoad = false;
4128 EVT ObjectVT = Ins[ArgNo].VT;
4129 EVT OrigVT = Ins[ArgNo].ArgVT;
4130 unsigned ObjSize = ObjectVT.getStoreSize();
4131 unsigned ArgSize = ObjSize;
4132 ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4133 if (Ins[ArgNo].isOrigArg()) {
4134 std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4135 CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4136 }
4137 // We re-align the argument offset for each argument, except when using the
4138 // fast calling convention, when we need to make sure we do that only when
4139 // we'll actually use a stack slot.
4140 unsigned CurArgOffset;
4141 Align Alignment;
4142 auto ComputeArgOffset = [&]() {
4143 /* Respect alignment of argument on the stack. */
4144 Alignment =
4145 CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4146 ArgOffset = alignTo(ArgOffset, Alignment);
4147 CurArgOffset = ArgOffset;
4148 };
4149
4150 if (CallConv != CallingConv::Fast) {
4151 ComputeArgOffset();
4152
4153 /* Compute GPR index associated with argument offset. */
4154 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4155 GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4156 }
4157
4158 // FIXME the codegen can be much improved in some cases.
4159 // We do not have to keep everything in memory.
4160 if (Flags.isByVal()) {
4161 assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit")((Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit"
) ? static_cast<void> (0) : __assert_fail ("Ins[ArgNo].isOrigArg() && \"Byval arguments cannot be implicit\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 4161, __PRETTY_FUNCTION__))
;
4162
4163 if (CallConv == CallingConv::Fast)
4164 ComputeArgOffset();
4165
4166 // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4167 ObjSize = Flags.getByValSize();
4168 ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4169 // Empty aggregate parameters do not take up registers. Examples:
4170 // struct { } a;
4171 // union { } b;
4172 // int c[0];
4173 // etc. However, we have to provide a place-holder in InVals, so
4174 // pretend we have an 8-byte item at the current address for that
4175 // purpose.
4176 if (!ObjSize) {
4177 int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4178 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4179 InVals.push_back(FIN);
4180 continue;
4181 }
4182
4183 // Create a stack object covering all stack doublewords occupied
4184 // by the argument. If the argument is (fully or partially) on
4185 // the stack, or if the argument is fully in registers but the
4186 // caller has allocated the parameter save anyway, we can refer
4187 // directly to the caller's stack frame. Otherwise, create a
4188 // local copy in our own frame.
4189 int FI;
4190 if (HasParameterArea ||
4191 ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4192 FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4193 else
4194 FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4195 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4196
4197 // Handle aggregates smaller than 8 bytes.
4198 if (ObjSize < PtrByteSize) {
4199 // The value of the object is its address, which differs from the
4200 // address of the enclosing doubleword on big-endian systems.
4201 SDValue Arg = FIN;
4202 if (!isLittleEndian) {
4203 SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4204 Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4205 }
4206 InVals.push_back(Arg);
4207
4208 if (GPR_idx != Num_GPR_Regs) {
4209 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4210 FuncInfo->addLiveInAttr(VReg, Flags);
4211 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4212 SDValue Store;
4213
4214 if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4215 EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4216 (ObjSize == 2 ? MVT::i16 : MVT::i32));
4217 Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4218 MachinePointerInfo(&*FuncArg), ObjType);
4219 } else {
4220 // For sizes that don't fit a truncating store (3, 5, 6, 7),
4221 // store the whole register as-is to the parameter save area
4222 // slot.
4223 Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4224 MachinePointerInfo(&*FuncArg));
4225 }
4226
4227 MemOps.push_back(Store);
4228 }
4229 // Whether we copied from a register or not, advance the offset
4230 // into the parameter save area by a full doubleword.
4231 ArgOffset += PtrByteSize;
4232 continue;
4233 }
4234
4235 // The value of the object is its address, which is the address of
4236 // its first stack doubleword.
4237 InVals.push_back(FIN);
4238
4239 // Store whatever pieces of the object are in registers to memory.
4240 for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4241 if (GPR_idx == Num_GPR_Regs)
4242 break;
4243
4244 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4245 FuncInfo->addLiveInAttr(VReg, Flags);
4246 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4247 SDValue Addr = FIN;
4248 if (j) {
4249 SDValue Off = DAG.getConstant(j, dl, PtrVT);
4250 Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4251 }
4252 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4253 MachinePointerInfo(&*FuncArg, j));
4254 MemOps.push_back(Store);
4255 ++GPR_idx;
4256 }
4257 ArgOffset += ArgSize;
4258 continue;
4259 }
4260
4261 switch (ObjectVT.getSimpleVT().SimpleTy) {
4262 default: llvm_unreachable("Unhandled argument type!")::llvm::llvm_unreachable_internal("Unhandled argument type!",
"/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 4262)
;
4263 case MVT::i1:
4264 case MVT::i32:
4265 case MVT::i64:
4266 if (Flags.isNest()) {
4267 // The 'nest' parameter, if any, is passed in R11.
4268 unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4269 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4270
4271 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4272 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4273
4274 break;
4275 }
4276
4277 // These can be scalar arguments or elements of an integer array type
4278 // passed directly. Clang may use those instead of "byval" aggregate
4279 // types to avoid forcing arguments to memory unnecessarily.
4280 if (GPR_idx != Num_GPR_Regs) {
4281 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4282 FuncInfo->addLiveInAttr(VReg, Flags);
4283 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4284
4285 if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4286 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4287 // value to MVT::i64 and then truncate to the correct register size.
4288 ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4289 } else {
4290 if (CallConv == CallingConv::Fast)
4291 ComputeArgOffset();
4292
4293 needsLoad = true;
4294 ArgSize = PtrByteSize;
4295 }
4296 if (CallConv != CallingConv::Fast || needsLoad)
4297 ArgOffset += 8;
4298 break;
4299
4300 case MVT::f32:
4301 case MVT::f64:
4302 // These can be scalar arguments or elements of a float array type
4303 // passed directly. The latter are used to implement ELFv2 homogenous
4304 // float aggregates.
4305 if (FPR_idx != Num_FPR_Regs) {
4306 unsigned VReg;
4307
4308 if (ObjectVT == MVT::f32)
4309 VReg = MF.addLiveIn(FPR[FPR_idx],
4310 Subtarget.hasP8Vector()
4311 ? &PPC::VSSRCRegClass
4312 : &PPC::F4RCRegClass);
4313 else
4314 VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4315 ? &PPC::VSFRCRegClass
4316 : &PPC::F8RCRegClass);
4317
4318 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4319 ++FPR_idx;
4320 } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4321 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4322 // once we support fp <-> gpr moves.
4323
4324 // This can only ever happen in the presence of f32 array types,
4325 // since otherwise we never run out of FPRs before running out
4326 // of GPRs.
4327 unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4328 FuncInfo->addLiveInAttr(VReg, Flags);
4329 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4330
4331 if (ObjectVT == MVT::f32) {
4332 if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4333 ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4334 DAG.getConstant(32, dl, MVT::i32));
4335 ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4336 }
4337
4338 ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4339 } else {
4340 if (CallConv == CallingConv::Fast)
4341 ComputeArgOffset();
4342
4343 needsLoad = true;
4344 }
4345
4346 // When passing an array of floats, the array occupies consecutive
4347 // space in the argument area; only round up to the next doubleword
4348 // at the end of the array. Otherwise, each float takes 8 bytes.
4349 if (CallConv != CallingConv::Fast || needsLoad) {
4350 ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4351 ArgOffset += ArgSize;
4352 if (Flags.isInConsecutiveRegsLast())
4353 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4354 }
4355 break;
4356 case MVT::v4f32:
4357 case MVT::v4i32:
4358 case MVT::v8i16:
4359 case MVT::v16i8:
4360 case MVT::v2f64:
4361 case MVT::v2i64:
4362 case MVT::v1i128:
4363 case MVT::f128:
4364 // These can be scalar arguments or elements of a vector array type
4365 // passed directly. The latter are used to implement ELFv2 homogenous
4366 // vector aggregates.
4367 if (VR_idx != Num_VR_Regs) {
4368 unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4369 ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4370 ++VR_idx;
4371 } else {
4372 if (CallConv == CallingConv::Fast)
4373 ComputeArgOffset();
4374 needsLoad = true;
4375 }
4376 if (CallConv != CallingConv::Fast || needsLoad)
4377 ArgOffset += 16;
4378 break;
4379 }
4380
4381 // We need to load the argument to a virtual register if we determined
4382 // above that we ran out of physical registers of the appropriate type.
4383 if (needsLoad) {
4384 if (ObjSize < ArgSize && !isLittleEndian)
4385 CurArgOffset += ArgSize - ObjSize;
4386 int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4387 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4388 ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4389 }
4390
4391 InVals.push_back(ArgVal);
4392 }
4393
4394 // Area that is at least reserved in the caller of this function.
4395 unsigned MinReservedArea;
4396 if (HasParameterArea)
4397 MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4398 else
4399 MinReservedArea = LinkageSize;
4400
4401 // Set the size that is at least reserved in caller of this function. Tail
4402 // call optimized functions' reserved stack space needs to be aligned so that
4403 // taking the difference between two stack areas will result in an aligned
4404 // stack.
4405 MinReservedArea =
4406 EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4407 FuncInfo->setMinReservedArea(MinReservedArea);
4408
4409 // If the function takes variable number of arguments, make a frame index for
4410 // the start of the first vararg value... for expansion of llvm.va_start.
4411 // On ELFv2ABI spec, it writes:
4412 // C programs that are intended to be *portable* across different compilers
4413 // and architectures must use the header file <stdarg.h> to deal with variable
4414 // argument lists.
4415 if (isVarArg && MFI.hasVAStart()) {
4416 int Depth = ArgOffset;
4417
4418 FuncInfo->setVarArgsFrameIndex(
4419 MFI.CreateFixedObject(PtrByteSize, Depth, true));
4420 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4421
4422 // If this function is vararg, store any remaining integer argument regs
4423 // to their spots on the stack so that they may be loaded by dereferencing
4424 // the result of va_next.
4425 for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4426 GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4427 unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4428 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4429 SDValue Store =
4430 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4431 MemOps.push_back(Store);
4432 // Increment the address by four for the next argument to store
4433 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4434 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4435 }
4436 }
4437
4438 if (!MemOps.empty())
4439 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4440
4441 return Chain;
4442}
4443
4444/// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4445/// adjusted to accommodate the arguments for the tailcall.
4446static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4447 unsigned ParamSize) {
4448
4449 if (!isTailCall) return 0;
4450
4451 PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4452 unsigned CallerMinReservedArea = FI->getMinReservedArea();
4453 int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4454 // Remember only if the new adjustment is bigger.
4455 if (SPDiff < FI->getTailCallSPDelta())
4456 FI->setTailCallSPDelta(SPDiff);
4457
4458 return SPDiff;
4459}
4460
4461static bool isFunctionGlobalAddress(SDValue Callee);
4462
4463static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4464 const TargetMachine &TM) {
4465 // It does not make sense to call callsShareTOCBase() with a caller that
4466 // is PC Relative since PC Relative callers do not have a TOC.
4467#ifndef NDEBUG
4468 const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4469 assert(!STICaller->isUsingPCRelativeCalls() &&((!STICaller->isUsingPCRelativeCalls() && "PC Relative callers do not have a TOC and cannot share a TOC Base"
) ? static_cast<void> (0) : __assert_fail ("!STICaller->isUsingPCRelativeCalls() && \"PC Relative callers do not have a TOC and cannot share a TOC Base\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 4470, __PRETTY_FUNCTION__))
4470 "PC Relative callers do not have a TOC and cannot share a TOC Base")((!STICaller->isUsingPCRelativeCalls() && "PC Relative callers do not have a TOC and cannot share a TOC Base"
) ? static_cast<void> (0) : __assert_fail ("!STICaller->isUsingPCRelativeCalls() && \"PC Relative callers do not have a TOC and cannot share a TOC Base\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 4470, __PRETTY_FUNCTION__))
;
4471#endif
4472
4473 // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4474 // don't have enough information to determine if the caller and callee share
4475 // the same TOC base, so we have to pessimistically assume they don't for
4476 // correctness.
4477 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4478 if (!G)
4479 return false;
4480
4481 const GlobalValue *GV = G->getGlobal();
4482
4483 // If the callee is preemptable, then the static linker will use a plt-stub
4484 // which saves the toc to the stack, and needs a nop after the call
4485 // instruction to convert to a toc-restore.
4486 if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4487 return false;
4488
4489 // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4490 // We may need a TOC restore in the situation where the caller requires a
4491 // valid TOC but the callee is PC Relative and does not.
4492 const Function *F = dyn_cast<Function>(GV);
4493 const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4494
4495 // If we have an Alias we can try to get the function from there.
4496 if (Alias) {
4497 const GlobalObject *GlobalObj = Alias->getBaseObject();
4498 F = dyn_cast<Function>(GlobalObj);
4499 }
4500
4501 // If we still have no valid function pointer we do not have enough
4502 // information to determine if the callee uses PC Relative calls so we must
4503 // assume that it does.
4504 if (!F)
4505 return false;
4506
4507 // If the callee uses PC Relative we cannot guarantee that the callee won't
4508 // clobber the TOC of the caller and so we must assume that the two
4509 // functions do not share a TOC base.
4510 const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4511 if (STICallee->isUsingPCRelativeCalls())
4512 return false;
4513
4514 // If the GV is not a strong definition then we need to assume it can be
4515 // replaced by another function at link time. The function that replaces
4516 // it may not share the same TOC as the caller since the callee may be
4517 // replaced by a PC Relative version of the same function.
4518 if (!GV->isStrongDefinitionForLinker())
4519 return false;
4520
4521 // The medium and large code models are expected to provide a sufficiently
4522 // large TOC to provide all data addressing needs of a module with a
4523 // single TOC.
4524 if (CodeModel::Medium == TM.getCodeModel() ||
4525 CodeModel::Large == TM.getCodeModel())
4526 return true;
4527
4528 // Any explicitly-specified sections and section prefixes must also match.
4529 // Also, if we're using -ffunction-sections, then each function is always in
4530 // a different section (the same is true for COMDAT functions).
4531 if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4532 GV->getSection() != Caller->getSection())
4533 return false;
4534 if (const auto *F = dyn_cast<Function>(GV)) {
4535 if (F->getSectionPrefix() != Caller->getSectionPrefix())
4536 return false;
4537 }
4538
4539 return true;
4540}
4541
4542static bool
4543needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4544 const SmallVectorImpl<ISD::OutputArg> &Outs) {
4545 assert(Subtarget.is64BitELFABI())((Subtarget.is64BitELFABI()) ? static_cast<void> (0) : __assert_fail
("Subtarget.is64BitELFABI()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 4545, __PRETTY_FUNCTION__))
;
4546
4547 const unsigned PtrByteSize = 8;
4548 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4549
4550 static const MCPhysReg GPR[] = {
4551 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4552 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4553 };
4554 static const MCPhysReg VR[] = {
4555 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4556 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4557 };
4558
4559 const unsigned NumGPRs = array_lengthof(GPR);
4560 const unsigned NumFPRs = 13;
4561 const unsigned NumVRs = array_lengthof(VR);
4562 const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4563
4564 unsigned NumBytes = LinkageSize;
4565 unsigned AvailableFPRs = NumFPRs;
4566 unsigned AvailableVRs = NumVRs;
4567
4568 for (const ISD::OutputArg& Param : Outs) {
4569 if (Param.Flags.isNest()) continue;
4570
4571 if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4572 LinkageSize, ParamAreaSize, NumBytes,
4573 AvailableFPRs, AvailableVRs))
4574 return true;
4575 }
4576 return false;
4577}
4578
4579static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4580 if (CB.arg_size() != CallerFn->arg_size())
4581 return false;
4582
4583 auto CalleeArgIter = CB.arg_begin();
4584 auto CalleeArgEnd = CB.arg_end();
4585 Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4586
4587 for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4588 const Value* CalleeArg = *CalleeArgIter;
4589 const Value* CallerArg = &(*CallerArgIter);
4590 if (CalleeArg == CallerArg)
4591 continue;
4592
4593 // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4594 // tail call @callee([4 x i64] undef, [4 x i64] %b)
4595 // }
4596 // 1st argument of callee is undef and has the same type as caller.
4597 if (CalleeArg->getType() == CallerArg->getType() &&
4598 isa<UndefValue>(CalleeArg))
4599 continue;
4600
4601 return false;
4602 }
4603
4604 return true;
4605}
4606
4607// Returns true if TCO is possible between the callers and callees
4608// calling conventions.
4609static bool
4610areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4611 CallingConv::ID CalleeCC) {
4612 // Tail calls are possible with fastcc and ccc.
4613 auto isTailCallableCC = [] (CallingConv::ID CC){
4614 return CC == CallingConv::C || CC == CallingConv::Fast;
4615 };
4616 if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4617 return false;
4618
4619 // We can safely tail call both fastcc and ccc callees from a c calling
4620 // convention caller. If the caller is fastcc, we may have less stack space
4621 // than a non-fastcc caller with the same signature so disable tail-calls in
4622 // that case.
4623 return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4624}
4625
4626bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4627 SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4628 const SmallVectorImpl<ISD::OutputArg> &Outs,
4629 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4630 bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4631
4632 if (DisableSCO && !TailCallOpt) return false;
4633
4634 // Variadic argument functions are not supported.
4635 if (isVarArg) return false;
4636
4637 auto &Caller = DAG.getMachineFunction().getFunction();
4638 // Check that the calling conventions are compatible for tco.
4639 if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4640 return false;
4641
4642 // Caller contains any byval parameter is not supported.
4643 if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4644 return false;
4645
4646 // Callee contains any byval parameter is not supported, too.
4647 // Note: This is a quick work around, because in some cases, e.g.
4648 // caller's stack size > callee's stack size, we are still able to apply
4649 // sibling call optimization. For example, gcc is able to do SCO for caller1
4650 // in the following example, but not for caller2.
4651 // struct test {
4652 // long int a;
4653 // char ary[56];
4654 // } gTest;
4655 // __attribute__((noinline)) int callee(struct test v, struct test *b) {
4656 // b->a = v.a;
4657 // return 0;
4658 // }
4659 // void caller1(struct test a, struct test c, struct test *b) {
4660 // callee(gTest, b); }
4661 // void caller2(struct test *b) { callee(gTest, b); }
4662 if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4663 return false;
4664
4665 // If callee and caller use different calling conventions, we cannot pass
4666 // parameters on stack since offsets for the parameter area may be different.
4667 if (Caller.getCallingConv() != CalleeCC &&
4668 needStackSlotPassParameters(Subtarget, Outs))
4669 return false;
4670
4671 // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4672 // the caller and callee share the same TOC for TCO/SCO. If the caller and
4673 // callee potentially have different TOC bases then we cannot tail call since
4674 // we need to restore the TOC pointer after the call.
4675 // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4676 // We cannot guarantee this for indirect calls or calls to external functions.
4677 // When PC-Relative addressing is used, the concept of the TOC is no longer
4678 // applicable so this check is not required.
4679 // Check first for indirect calls.
4680 if (!Subtarget.isUsingPCRelativeCalls() &&
4681 !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4682 return false;
4683
4684 // Check if we share the TOC base.
4685 if (!Subtarget.isUsingPCRelativeCalls() &&
4686 !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4687 return false;
4688
4689 // TCO allows altering callee ABI, so we don't have to check further.
4690 if (CalleeCC == CallingConv::Fast && TailCallOpt)
4691 return true;
4692
4693 if (DisableSCO) return false;
4694
4695 // If callee use the same argument list that caller is using, then we can
4696 // apply SCO on this case. If it is not, then we need to check if callee needs
4697 // stack for passing arguments.
4698 // PC Relative tail calls may not have a CallBase.
4699 // If there is no CallBase we cannot verify if we have the same argument
4700 // list so assume that we don't have the same argument list.
4701 if (CB && !hasSameArgumentList(&Caller, *CB) &&
4702 needStackSlotPassParameters(Subtarget, Outs))
4703 return false;
4704 else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4705 return false;
4706
4707 return true;
4708}
4709
4710/// IsEligibleForTailCallOptimization - Check whether the call is eligible
4711/// for tail call optimization. Targets which want to do tail call
4712/// optimization should implement this function.
4713bool
4714PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4715 CallingConv::ID CalleeCC,
4716 bool isVarArg,
4717 const SmallVectorImpl<ISD::InputArg> &Ins,
4718 SelectionDAG& DAG) const {
4719 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4720 return false;
4721
4722 // Variable argument functions are not supported.
4723 if (isVarArg)
4724 return false;
4725
4726 MachineFunction &MF = DAG.getMachineFunction();
4727 CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4728 if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4729 // Functions containing by val parameters are not supported.
4730 for (unsigned i = 0; i != Ins.size(); i++) {
4731 ISD::ArgFlagsTy Flags = Ins[i].Flags;
4732 if (Flags.isByVal()) return false;
4733 }
4734
4735 // Non-PIC/GOT tail calls are supported.
4736 if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4737 return true;
4738
4739 // At the moment we can only do local tail calls (in same module, hidden
4740 // or protected) if we are generating PIC.
4741 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4742 return G->getGlobal()->hasHiddenVisibility()
4743 || G->getGlobal()->hasProtectedVisibility();
4744 }
4745
4746 return false;
4747}
4748
4749/// isCallCompatibleAddress - Return the immediate to use if the specified
4750/// 32-bit value is representable in the immediate field of a BxA instruction.
4751static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4752 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4753 if (!C) return nullptr;
4754
4755 int Addr = C->getZExtValue();
4756 if ((Addr & 3) != 0 || // Low 2 bits are implicitly zero.
4757 SignExtend32<26>(Addr) != Addr)
4758 return nullptr; // Top 6 bits have to be sext of immediate.
4759
4760 return DAG
4761 .getConstant(
4762 (int)C->getZExtValue() >> 2, SDLoc(Op),
4763 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4764 .getNode();
4765}
4766
4767namespace {
4768
4769struct TailCallArgumentInfo {
4770 SDValue Arg;
4771 SDValue FrameIdxOp;
4772 int FrameIdx = 0;
4773
4774 TailCallArgumentInfo() = default;
4775};
4776
4777} // end anonymous namespace
4778
4779/// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
4780static void StoreTailCallArgumentsToStackSlot(
4781 SelectionDAG &DAG, SDValue Chain,
4782 const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4783 SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4784 for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4785 SDValue Arg = TailCallArgs[i].Arg;
4786 SDValue FIN = TailCallArgs[i].FrameIdxOp;
4787 int FI = TailCallArgs[i].FrameIdx;
4788 // Store relative to framepointer.
4789 MemOpChains.push_back(DAG.getStore(
4790 Chain, dl, Arg, FIN,
4791 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4792 }
4793}
4794
4795/// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4796/// the appropriate stack slot for the tail call optimized function call.
4797static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4798 SDValue OldRetAddr, SDValue OldFP,
4799 int SPDiff, const SDLoc &dl) {
4800 if (SPDiff) {
4801 // Calculate the new stack slot for the return address.
4802 MachineFunction &MF = DAG.getMachineFunction();
4803 const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4804 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4805 bool isPPC64 = Subtarget.isPPC64();
4806 int SlotSize = isPPC64 ? 8 : 4;
4807 int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4808 int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4809 NewRetAddrLoc, true);
4810 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4811 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4812 Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4813 MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4814 }
4815 return Chain;
4816}
4817
4818/// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4819/// the position of the argument.
4820static void
4821CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4822 SDValue Arg, int SPDiff, unsigned ArgOffset,
4823 SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4824 int Offset = ArgOffset + SPDiff;
4825 uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4826 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4827 EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4828 SDValue FIN = DAG.getFrameIndex(FI, VT);
4829 TailCallArgumentInfo Info;
4830 Info.Arg = Arg;
4831 Info.FrameIdxOp = FIN;
4832 Info.FrameIdx = FI;
4833 TailCallArguments.push_back(Info);
4834}
4835
4836/// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4837/// stack slot. Returns the chain as result and the loaded frame pointers in
4838/// LROpOut/FPOpout. Used when tail calling.
4839SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4840 SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4841 SDValue &FPOpOut, const SDLoc &dl) const {
4842 if (SPDiff) {
4843 // Load the LR and FP stack slot for later adjusting.
4844 EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4845 LROpOut = getReturnAddrFrameIndex(DAG);
4846 LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4847 Chain = SDValue(LROpOut.getNode(), 1);
4848 }
4849 return Chain;
4850}
4851
4852/// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4853/// by "Src" to address "Dst" of size "Size". Alignment information is
4854/// specified by the specific parameter attribute. The copy will be passed as
4855/// a byval function parameter.
4856/// Sometimes what we are copying is the end of a larger object, the part that
4857/// does not fit in registers.
4858static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
4859 SDValue Chain, ISD::ArgFlagsTy Flags,
4860 SelectionDAG &DAG, const SDLoc &dl) {
4861 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
4862 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
4863 Flags.getNonZeroByValAlign(), false, false, false,
4864 MachinePointerInfo(), MachinePointerInfo());
4865}
4866
4867/// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
4868/// tail calls.
4869static void LowerMemOpCallTo(
4870 SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
4871 SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
4872 bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
4873 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
4874 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4875 if (!isTailCall) {
4876 if (isVector) {
4877 SDValue StackPtr;
4878 if (isPPC64)
4879 StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
4880 else
4881 StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
4882 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
4883 DAG.getConstant(ArgOffset, dl, PtrVT));
4884 }
4885 MemOpChains.push_back(
4886 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
4887 // Calculate and remember argument location.
4888 } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
4889 TailCallArguments);
4890}
4891
4892static void
4893PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
4894 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
4895 SDValue FPOp,
4896 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
4897 // Emit a sequence of copyto/copyfrom virtual registers for arguments that
4898 // might overwrite each other in case of tail call optimization.
4899 SmallVector<SDValue, 8> MemOpChains2;
4900 // Do not flag preceding copytoreg stuff together with the following stuff.
4901 InFlag = SDValue();
4902 StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
4903 MemOpChains2, dl);
4904 if (!MemOpChains2.empty())
4905 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4906
4907 // Store the return address to the appropriate stack slot.
4908 Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
4909
4910 // Emit callseq_end just before tailcall node.
4911 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
4912 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4913 InFlag = Chain.getValue(1);
4914}
4915
4916// Is this global address that of a function that can be called by name? (as
4917// opposed to something that must hold a descriptor for an indirect call).
4918static bool isFunctionGlobalAddress(SDValue Callee) {
4919 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
4920 if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
4921 Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
4922 return false;
4923
4924 return G->getGlobal()->getValueType()->isFunctionTy();
4925 }
4926
4927 return false;
4928}
4929
4930SDValue PPCTargetLowering::LowerCallResult(
4931 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
4932 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4933 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4934 SmallVector<CCValAssign, 16> RVLocs;
4935 CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
4936 *DAG.getContext());
4937
4938 CCRetInfo.AnalyzeCallResult(
4939 Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
4940 ? RetCC_PPC_Cold
4941 : RetCC_PPC);
4942
4943 // Copy all of the result registers out of their specified physreg.
4944 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4945 CCValAssign &VA = RVLocs[i];
4946 assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 4946, __PRETTY_FUNCTION__))
;
4947
4948 SDValue Val;
4949
4950 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
4951 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
4952 InFlag);
4953 Chain = Lo.getValue(1);
4954 InFlag = Lo.getValue(2);
4955 VA = RVLocs[++i]; // skip ahead to next loc
4956 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
4957 InFlag);
4958 Chain = Hi.getValue(1);
4959 InFlag = Hi.getValue(2);
4960 if (!Subtarget.isLittleEndian())
4961 std::swap (Lo, Hi);
4962 Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
4963 } else {
4964 Val = DAG.getCopyFromReg(Chain, dl,
4965 VA.getLocReg(), VA.getLocVT(), InFlag);
4966 Chain = Val.getValue(1);
4967 InFlag = Val.getValue(2);
4968 }
4969
4970 switch (VA.getLocInfo()) {
4971 default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 4971)
;
4972 case CCValAssign::Full: break;
4973 case CCValAssign::AExt:
4974 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4975 break;
4976 case CCValAssign::ZExt:
4977 Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
4978 DAG.getValueType(VA.getValVT()));
4979 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4980 break;
4981 case CCValAssign::SExt:
4982 Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
4983 DAG.getValueType(VA.getValVT()));
4984 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
4985 break;
4986 }
4987
4988 InVals.push_back(Val);
4989 }
4990
4991 return Chain;
4992}
4993
4994static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
4995 const PPCSubtarget &Subtarget, bool isPatchPoint) {
4996 // PatchPoint calls are not indirect.
4997 if (isPatchPoint)
4998 return false;
4999
5000 if (isFunctionGlobalAddress(Callee) || isa<ExternalSymbolSDNode>(Callee))
5001 return false;
5002
5003 // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5004 // becuase the immediate function pointer points to a descriptor instead of
5005 // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5006 // pointer immediate points to the global entry point, while the BLA would
5007 // need to jump to the local entry point (see rL211174).
5008 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5009 isBLACompatibleAddress(Callee, DAG))
5010 return false;
5011
5012 return true;
5013}
5014
5015// AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
5016static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5017 return Subtarget.isAIXABI() ||
5018 (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5019}
5020
5021static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5022 const Function &Caller,
5023 const SDValue &Callee,
5024 const PPCSubtarget &Subtarget,
5025 const TargetMachine &TM) {
5026 if (CFlags.IsTailCall)
5027 return PPCISD::TC_RETURN;
5028
5029 // This is a call through a function pointer.
5030 if (CFlags.IsIndirect) {
5031 // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5032 // indirect calls. The save of the caller's TOC pointer to the stack will be
5033 // inserted into the DAG as part of call lowering. The restore of the TOC
5034 // pointer is modeled by using a pseudo instruction for the call opcode that
5035 // represents the 2 instruction sequence of an indirect branch and link,
5036 // immediately followed by a load of the TOC pointer from the the stack save
5037 // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5038 // as it is not saved or used.
5039 return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5040 : PPCISD::BCTRL;
5041 }
5042
5043 if (Subtarget.isUsingPCRelativeCalls()) {
5044 assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.")((Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI."
) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64BitELFABI() && \"PC Relative is only on ELF ABI.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5044, __PRETTY_FUNCTION__))
;
5045 return PPCISD::CALL_NOTOC;
5046 }
5047
5048 // The ABIs that maintain a TOC pointer accross calls need to have a nop
5049 // immediately following the call instruction if the caller and callee may
5050 // have different TOC bases. At link time if the linker determines the calls
5051 // may not share a TOC base, the call is redirected to a trampoline inserted
5052 // by the linker. The trampoline will (among other things) save the callers
5053 // TOC pointer at an ABI designated offset in the linkage area and the linker
5054 // will rewrite the nop to be a load of the TOC pointer from the linkage area
5055 // into gpr2.
5056 if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5057 return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5058 : PPCISD::CALL_NOP;
5059
5060 return PPCISD::CALL;
5061}
5062
5063static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5064 const SDLoc &dl, const PPCSubtarget &Subtarget) {
5065 if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5066 if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5067 return SDValue(Dest, 0);
5068
5069 // Returns true if the callee is local, and false otherwise.
5070 auto isLocalCallee = [&]() {
5071 const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5072 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5073 const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5074
5075 return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5076 !dyn_cast_or_null<GlobalIFunc>(GV);
5077 };
5078
5079 // The PLT is only used in 32-bit ELF PIC mode. Attempting to use the PLT in
5080 // a static relocation model causes some versions of GNU LD (2.17.50, at
5081 // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5082 // built with secure-PLT.
5083 bool UsePlt =
5084 Subtarget.is32BitELFABI() && !isLocalCallee() &&
5085 Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5086
5087 const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5088 const TargetMachine &TM = Subtarget.getTargetMachine();
5089 const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5090 MCSymbolXCOFF *S =
5091 cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5092
5093 MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5094 return DAG.getMCSymbol(S, PtrVT);
5095 };
5096
5097 if (isFunctionGlobalAddress(Callee)) {
5098 const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5099
5100 if (Subtarget.isAIXABI()) {
5101 assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.")((!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX."
) ? static_cast<void> (0) : __assert_fail ("!isa<GlobalIFunc>(GV) && \"IFunc is not supported on AIX.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5101, __PRETTY_FUNCTION__))
;
5102 return getAIXFuncEntryPointSymbolSDNode(GV);
5103 }
5104 return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5105 UsePlt ? PPCII::MO_PLT : 0);
5106 }
5107
5108 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5109 const char *SymName = S->getSymbol();
5110 if (Subtarget.isAIXABI()) {
5111 // If there exists a user-declared function whose name is the same as the
5112 // ExternalSymbol's, then we pick up the user-declared version.
5113 const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5114 if (const Function *F =
5115 dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5116 return getAIXFuncEntryPointSymbolSDNode(F);
5117
5118 // On AIX, direct function calls reference the symbol for the function's
5119 // entry point, which is named by prepending a "." before the function's
5120 // C-linkage name. A Qualname is returned here because an external
5121 // function entry point is a csect with XTY_ER property.
5122 const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5123 auto &Context = DAG.getMachineFunction().getMMI().getContext();
5124 MCSectionXCOFF *Sec = Context.getXCOFFSection(
5125 (Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(),
5126 XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER));
5127 return Sec->getQualNameSymbol();
5128 };
5129
5130 SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5131 }
5132 return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5133 UsePlt ? PPCII::MO_PLT : 0);
5134 }
5135
5136 // No transformation needed.
5137 assert(Callee.getNode() && "What no callee?")((Callee.getNode() && "What no callee?") ? static_cast
<void> (0) : __assert_fail ("Callee.getNode() && \"What no callee?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5137, __PRETTY_FUNCTION__))
;
5138 return Callee;
5139}
5140
5141static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5142 assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&((CallSeqStart.getOpcode() == ISD::CALLSEQ_START && "Expected a CALLSEQ_STARTSDNode."
) ? static_cast<void> (0) : __assert_fail ("CallSeqStart.getOpcode() == ISD::CALLSEQ_START && \"Expected a CALLSEQ_STARTSDNode.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5143, __PRETTY_FUNCTION__))
5143 "Expected a CALLSEQ_STARTSDNode.")((CallSeqStart.getOpcode() == ISD::CALLSEQ_START && "Expected a CALLSEQ_STARTSDNode."
) ? static_cast<void> (0) : __assert_fail ("CallSeqStart.getOpcode() == ISD::CALLSEQ_START && \"Expected a CALLSEQ_STARTSDNode.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5143, __PRETTY_FUNCTION__))
;
5144
5145 // The last operand is the chain, except when the node has glue. If the node
5146 // has glue, then the last operand is the glue, and the chain is the second
5147 // last operand.
5148 SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5149 if (LastValue.getValueType() != MVT::Glue)
5150 return LastValue;
5151
5152 return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5153}
5154
5155// Creates the node that moves a functions address into the count register
5156// to prepare for an indirect call instruction.
5157static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5158 SDValue &Glue, SDValue &Chain,
5159 const SDLoc &dl) {
5160 SDValue MTCTROps[] = {Chain, Callee, Glue};
5161 EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5162 Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5163 makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5164 // The glue is the second value produced.
5165 Glue = Chain.getValue(1);
5166}
5167
5168static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5169 SDValue &Glue, SDValue &Chain,
5170 SDValue CallSeqStart,
5171 const CallBase *CB, const SDLoc &dl,
5172 bool hasNest,
5173 const PPCSubtarget &Subtarget) {
5174 // Function pointers in the 64-bit SVR4 ABI do not point to the function
5175 // entry point, but to the function descriptor (the function entry point
5176 // address is part of the function descriptor though).
5177 // The function descriptor is a three doubleword structure with the
5178 // following fields: function entry point, TOC base address and
5179 // environment pointer.
5180 // Thus for a call through a function pointer, the following actions need
5181 // to be performed:
5182 // 1. Save the TOC of the caller in the TOC save area of its stack
5183 // frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5184 // 2. Load the address of the function entry point from the function
5185 // descriptor.
5186 // 3. Load the TOC of the callee from the function descriptor into r2.
5187 // 4. Load the environment pointer from the function descriptor into
5188 // r11.
5189 // 5. Branch to the function entry point address.
5190 // 6. On return of the callee, the TOC of the caller needs to be
5191 // restored (this is done in FinishCall()).
5192 //
5193 // The loads are scheduled at the beginning of the call sequence, and the
5194 // register copies are flagged together to ensure that no other
5195 // operations can be scheduled in between. E.g. without flagging the
5196 // copies together, a TOC access in the caller could be scheduled between
5197 // the assignment of the callee TOC and the branch to the callee, which leads
5198 // to incorrect code.
5199
5200 // Start by loading the function address from the descriptor.
5201 SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5202 auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5203 ? (MachineMemOperand::MODereferenceable |
5204 MachineMemOperand::MOInvariant)
5205 : MachineMemOperand::MONone;
5206
5207 MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5208
5209 // Registers used in building the DAG.
5210 const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5211 const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5212
5213 // Offsets of descriptor members.
5214 const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5215 const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5216
5217 const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5218 const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5219
5220 // One load for the functions entry point address.
5221 SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5222 Alignment, MMOFlags);
5223
5224 // One for loading the TOC anchor for the module that contains the called
5225 // function.
5226 SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5227 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5228 SDValue TOCPtr =
5229 DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5230 MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5231
5232 // One for loading the environment pointer.
5233 SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5234 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5235 SDValue LoadEnvPtr =
5236 DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5237 MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5238
5239
5240 // Then copy the newly loaded TOC anchor to the TOC pointer.
5241 SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5242 Chain = TOCVal.getValue(0);
5243 Glue = TOCVal.getValue(1);
5244
5245 // If the function call has an explicit 'nest' parameter, it takes the
5246 // place of the environment pointer.
5247 assert((!hasNest || !Subtarget.isAIXABI()) &&(((!hasNest || !Subtarget.isAIXABI()) && "Nest parameter is not supported on AIX."
) ? static_cast<void> (0) : __assert_fail ("(!hasNest || !Subtarget.isAIXABI()) && \"Nest parameter is not supported on AIX.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5248, __PRETTY_FUNCTION__))
5248 "Nest parameter is not supported on AIX.")(((!hasNest || !Subtarget.isAIXABI()) && "Nest parameter is not supported on AIX."
) ? static_cast<void> (0) : __assert_fail ("(!hasNest || !Subtarget.isAIXABI()) && \"Nest parameter is not supported on AIX.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5248, __PRETTY_FUNCTION__))
;
5249 if (!hasNest) {
5250 SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5251 Chain = EnvVal.getValue(0);
5252 Glue = EnvVal.getValue(1);
5253 }
5254
5255 // The rest of the indirect call sequence is the same as the non-descriptor
5256 // DAG.
5257 prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5258}
5259
5260static void
5261buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5262 PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5263 SelectionDAG &DAG,
5264 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5265 SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5266 const PPCSubtarget &Subtarget) {
5267 const bool IsPPC64 = Subtarget.isPPC64();
5268 // MVT for a general purpose register.
5269 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5270
5271 // First operand is always the chain.
5272 Ops.push_back(Chain);
5273
5274 // If it's a direct call pass the callee as the second operand.
5275 if (!CFlags.IsIndirect)
5276 Ops.push_back(Callee);
5277 else {
5278 assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.")((!CFlags.IsPatchPoint && "Patch point calls are not indirect."
) ? static_cast<void> (0) : __assert_fail ("!CFlags.IsPatchPoint && \"Patch point calls are not indirect.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5278, __PRETTY_FUNCTION__))
;
5279
5280 // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5281 // on the stack (this would have been done in `LowerCall_64SVR4` or
5282 // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5283 // represents both the indirect branch and a load that restores the TOC
5284 // pointer from the linkage area. The operand for the TOC restore is an add
5285 // of the TOC save offset to the stack pointer. This must be the second
5286 // operand: after the chain input but before any other variadic arguments.
5287 // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5288 // saved or used.
5289 if (isTOCSaveRestoreRequired(Subtarget)) {
5290 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5291
5292 SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5293 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5294 SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5295 SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5296 Ops.push_back(AddTOC);
5297 }
5298
5299 // Add the register used for the environment pointer.
5300 if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5301 Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5302 RegVT));
5303
5304
5305 // Add CTR register as callee so a bctr can be emitted later.
5306 if (CFlags.IsTailCall)
5307 Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5308 }
5309
5310 // If this is a tail call add stack pointer delta.
5311 if (CFlags.IsTailCall)
5312 Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5313
5314 // Add argument registers to the end of the list so that they are known live
5315 // into the call.
5316 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5317 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5318 RegsToPass[i].second.getValueType()));
5319
5320 // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5321 // no way to mark dependencies as implicit here.
5322 // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5323 if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5324 !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5325 Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5326
5327 // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5328 if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5329 Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5330
5331 // Add a register mask operand representing the call-preserved registers.
5332 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5333 const uint32_t *Mask =
5334 TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5335 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5335, __PRETTY_FUNCTION__))
;
5336 Ops.push_back(DAG.getRegisterMask(Mask));
5337
5338 // If the glue is valid, it is the last operand.
5339 if (Glue.getNode())
5340 Ops.push_back(Glue);
5341}
5342
5343SDValue PPCTargetLowering::FinishCall(
5344 CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5345 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5346 SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5347 unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5348 SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5349
5350 if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5351 Subtarget.isAIXABI())
5352 setUsesTOCBasePtr(DAG);
5353
5354 unsigned CallOpc =
5355 getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5356 Subtarget, DAG.getTarget());
5357
5358 if (!CFlags.IsIndirect)
5359 Callee = transformCallee(Callee, DAG, dl, Subtarget);
5360 else if (Subtarget.usesFunctionDescriptors())
5361 prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5362 dl, CFlags.HasNest, Subtarget);
5363 else
5364 prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5365
5366 // Build the operand list for the call instruction.
5367 SmallVector<SDValue, 8> Ops;
5368 buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5369 SPDiff, Subtarget);
5370
5371 // Emit tail call.
5372 if (CFlags.IsTailCall) {
5373 // Indirect tail call when using PC Relative calls do not have the same
5374 // constraints.
5375 assert(((Callee.getOpcode() == ISD::Register &&((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode
>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() ==
ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress
|| isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect &&
Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5383, __PRETTY_FUNCTION__))
5376 cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode
>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() ==
ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress
|| isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect &&
Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5383, __PRETTY_FUNCTION__))
5377 Callee.getOpcode() == ISD::TargetExternalSymbol ||((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode
>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() ==
ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress
|| isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect &&
Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5383, __PRETTY_FUNCTION__))
5378 Callee.getOpcode() == ISD::TargetGlobalAddress ||((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode
>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() ==
ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress
|| isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect &&
Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5383, __PRETTY_FUNCTION__))
5379 isa<ConstantSDNode>(Callee) ||((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode
>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() ==
ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress
|| isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect &&
Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5383, __PRETTY_FUNCTION__))
5380 (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode
>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() ==
ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress
|| isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect &&
Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5383, __PRETTY_FUNCTION__))
5381 "Expecting a global address, external symbol, absolute value, "((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode
>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() ==
ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress
|| isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect &&
Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5383, __PRETTY_FUNCTION__))
5382 "register or an indirect tail call when PC Relative calls are "((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode
>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() ==
ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress
|| isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect &&
Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5383, __PRETTY_FUNCTION__))
5383 "used.")((((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode
>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() ==
ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress
|| isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect &&
Subtarget.isUsingPCRelativeCalls())) && "Expecting a global address, external symbol, absolute value, "
"register or an indirect tail call when PC Relative calls are "
"used.") ? static_cast<void> (0) : __assert_fail ("((Callee.getOpcode() == ISD::Register && cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) || Callee.getOpcode() == ISD::TargetExternalSymbol || Callee.getOpcode() == ISD::TargetGlobalAddress || isa<ConstantSDNode>(Callee) || (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) && \"Expecting a global address, external symbol, absolute value, \" \"register or an indirect tail call when PC Relative calls are \" \"used.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5383, __PRETTY_FUNCTION__))
;
5384 // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5385 assert(CallOpc == PPCISD::TC_RETURN &&((CallOpc == PPCISD::TC_RETURN && "Unexpected call opcode for a tail call."
) ? static_cast<void> (0) : __assert_fail ("CallOpc == PPCISD::TC_RETURN && \"Unexpected call opcode for a tail call.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5386, __PRETTY_FUNCTION__))
5386 "Unexpected call opcode for a tail call.")((CallOpc == PPCISD::TC_RETURN && "Unexpected call opcode for a tail call."
) ? static_cast<void> (0) : __assert_fail ("CallOpc == PPCISD::TC_RETURN && \"Unexpected call opcode for a tail call.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5386, __PRETTY_FUNCTION__))
;
5387 DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5388 return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5389 }
5390
5391 std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5392 Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5393 DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5394 Glue = Chain.getValue(1);
5395
5396 // When performing tail call optimization the callee pops its arguments off
5397 // the stack. Account for this here so these bytes can be pushed back on in
5398 // PPCFrameLowering::eliminateCallFramePseudoInstr.
5399 int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5400 getTargetMachine().Options.GuaranteedTailCallOpt)
5401 ? NumBytes
5402 : 0;
5403
5404 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5405 DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5406 Glue, dl);
5407 Glue = Chain.getValue(1);
5408
5409 return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5410 DAG, InVals);
5411}
5412
5413SDValue
5414PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5415 SmallVectorImpl<SDValue> &InVals) const {
5416 SelectionDAG &DAG = CLI.DAG;
5417 SDLoc &dl = CLI.DL;
5418 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5419 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
5420 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
5421 SDValue Chain = CLI.Chain;
5422 SDValue Callee = CLI.Callee;
5423 bool &isTailCall = CLI.IsTailCall;
5424 CallingConv::ID CallConv = CLI.CallConv;
5425 bool isVarArg = CLI.IsVarArg;
5426 bool isPatchPoint = CLI.IsPatchPoint;
5427 const CallBase *CB = CLI.CB;
5428
5429 if (isTailCall) {
5430 if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5431 isTailCall = false;
5432 else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5433 isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5434 Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5435 else
5436 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5437 Ins, DAG);
5438 if (isTailCall) {
5439 ++NumTailCalls;
5440 if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5441 ++NumSiblingCalls;
5442
5443 // PC Relative calls no longer guarantee that the callee is a Global
5444 // Address Node. The callee could be an indirect tail call in which
5445 // case the SDValue for the callee could be a load (to load the address
5446 // of a function pointer) or it may be a register copy (to move the
5447 // address of the callee from a function parameter into a virtual
5448 // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5449 assert((Subtarget.isUsingPCRelativeCalls() ||(((Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode
>(Callee)) && "Callee should be an llvm::Function object."
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode>(Callee)) && \"Callee should be an llvm::Function object.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5451, __PRETTY_FUNCTION__))
5450 isa<GlobalAddressSDNode>(Callee)) &&(((Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode
>(Callee)) && "Callee should be an llvm::Function object."
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode>(Callee)) && \"Callee should be an llvm::Function object.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5451, __PRETTY_FUNCTION__))
5451 "Callee should be an llvm::Function object.")(((Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode
>(Callee)) && "Callee should be an llvm::Function object."
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.isUsingPCRelativeCalls() || isa<GlobalAddressSDNode>(Callee)) && \"Callee should be an llvm::Function object.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5451, __PRETTY_FUNCTION__))
;
5452
5453 LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("ppc-lowering")) { dbgs() << "TCO caller: " << DAG
.getMachineFunction().getName() << "\nTCO callee: "; } }
while (false)
5454 << "\nTCO callee: ")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("ppc-lowering")) { dbgs() << "TCO caller: " << DAG
.getMachineFunction().getName() << "\nTCO callee: "; } }
while (false)
;
5455 LLVM_DEBUG(Callee.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("ppc-lowering")) { Callee.dump(); } } while (false)
;
5456 }
5457 }
5458
5459 if (!isTailCall && CB && CB->isMustTailCall())
5460 report_fatal_error("failed to perform tail call elimination on a call "
5461 "site marked musttail");
5462
5463 // When long calls (i.e. indirect calls) are always used, calls are always
5464 // made via function pointer. If we have a function name, first translate it
5465 // into a pointer.
5466 if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5467 !isTailCall)
5468 Callee = LowerGlobalAddress(Callee, DAG);
5469
5470 CallFlags CFlags(
5471 CallConv, isTailCall, isVarArg, isPatchPoint,
5472 isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5473 // hasNest
5474 Subtarget.is64BitELFABI() &&
5475 any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5476 CLI.NoMerge);
5477
5478 if (Subtarget.isAIXABI())
5479 return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5480 InVals, CB);
5481
5482 assert(Subtarget.isSVR4ABI())((Subtarget.isSVR4ABI()) ? static_cast<void> (0) : __assert_fail
("Subtarget.isSVR4ABI()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5482, __PRETTY_FUNCTION__))
;
5483 if (Subtarget.isPPC64())
5484 return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5485 InVals, CB);
5486 return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5487 InVals, CB);
5488}
5489
5490SDValue PPCTargetLowering::LowerCall_32SVR4(
5491 SDValue Chain, SDValue Callee, CallFlags CFlags,
5492 const SmallVectorImpl<ISD::OutputArg> &Outs,
5493 const SmallVectorImpl<SDValue> &OutVals,
5494 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5495 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5496 const CallBase *CB) const {
5497 // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5498 // of the 32-bit SVR4 ABI stack frame layout.
5499
5500 const CallingConv::ID CallConv = CFlags.CallConv;
5501 const bool IsVarArg = CFlags.IsVarArg;
5502 const bool IsTailCall = CFlags.IsTailCall;
5503
5504 assert((CallConv == CallingConv::C ||(((CallConv == CallingConv::C || CallConv == CallingConv::Cold
|| CallConv == CallingConv::Fast) && "Unknown calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unknown calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5506, __PRETTY_FUNCTION__))
5505 CallConv == CallingConv::Cold ||(((CallConv == CallingConv::C || CallConv == CallingConv::Cold
|| CallConv == CallingConv::Fast) && "Unknown calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unknown calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5506, __PRETTY_FUNCTION__))
5506 CallConv == CallingConv::Fast) && "Unknown calling convention!")(((CallConv == CallingConv::C || CallConv == CallingConv::Cold
|| CallConv == CallingConv::Fast) && "Unknown calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unknown calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5506, __PRETTY_FUNCTION__))
;
5507
5508 const Align PtrAlign(4);
5509
5510 MachineFunction &MF = DAG.getMachineFunction();
5511
5512 // Mark this function as potentially containing a function that contains a
5513 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5514 // and restoring the callers stack pointer in this functions epilog. This is
5515 // done because by tail calling the called function might overwrite the value
5516 // in this function's (MF) stack pointer stack slot 0(SP).
5517 if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5518 CallConv == CallingConv::Fast)
5519 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5520
5521 // Count how many bytes are to be pushed on the stack, including the linkage
5522 // area, parameter list area and the part of the local variable space which
5523 // contains copies of aggregates which are passed by value.
5524
5525 // Assign locations to all of the outgoing arguments.
5526 SmallVector<CCValAssign, 16> ArgLocs;
5527 PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5528
5529 // Reserve space for the linkage area on the stack.
5530 CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5531 PtrAlign);
5532 if (useSoftFloat())
5533 CCInfo.PreAnalyzeCallOperands(Outs);
5534
5535 if (IsVarArg) {
5536 // Handle fixed and variable vector arguments differently.
5537 // Fixed vector arguments go into registers as long as registers are
5538 // available. Variable vector arguments always go into memory.
5539 unsigned NumArgs = Outs.size();
5540
5541 for (unsigned i = 0; i != NumArgs; ++i) {
5542 MVT ArgVT = Outs[i].VT;
5543 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5544 bool Result;
5545
5546 if (Outs[i].IsFixed) {
5547 Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5548 CCInfo);
5549 } else {
5550 Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5551 ArgFlags, CCInfo);
5552 }
5553
5554 if (Result) {
5555#ifndef NDEBUG
5556 errs() << "Call operand #" << i << " has unhandled type "
5557 << EVT(ArgVT).getEVTString() << "\n";
5558#endif
5559 llvm_unreachable(nullptr)::llvm::llvm_unreachable_internal(nullptr, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5559)
;
5560 }
5561 }
5562 } else {
5563 // All arguments are treated the same.
5564 CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5565 }
5566 CCInfo.clearWasPPCF128();
5567
5568 // Assign locations to all of the outgoing aggregate by value arguments.
5569 SmallVector<CCValAssign, 16> ByValArgLocs;
5570 CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5571
5572 // Reserve stack space for the allocations in CCInfo.
5573 CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5574
5575 CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5576
5577 // Size of the linkage area, parameter list area and the part of the local
5578 // space variable where copies of aggregates which are passed by value are
5579 // stored.
5580 unsigned NumBytes = CCByValInfo.getNextStackOffset();
5581
5582 // Calculate by how many bytes the stack has to be adjusted in case of tail
5583 // call optimization.
5584 int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5585
5586 // Adjust the stack pointer for the new arguments...
5587 // These operations are automatically eliminated by the prolog/epilog pass
5588 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5589 SDValue CallSeqStart = Chain;
5590
5591 // Load the return address and frame pointer so it can be moved somewhere else
5592 // later.
5593 SDValue LROp, FPOp;
5594 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5595
5596 // Set up a copy of the stack pointer for use loading and storing any
5597 // arguments that may not fit in the registers available for argument
5598 // passing.
5599 SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5600
5601 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5602 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5603 SmallVector<SDValue, 8> MemOpChains;
5604
5605 bool seenFloatArg = false;
5606 // Walk the register/memloc assignments, inserting copies/loads.
5607 // i - Tracks the index into the list of registers allocated for the call
5608 // RealArgIdx - Tracks the index into the list of actual function arguments
5609 // j - Tracks the index into the list of byval arguments
5610 for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5611 i != e;
5612 ++i, ++RealArgIdx) {
5613 CCValAssign &VA = ArgLocs[i];
5614 SDValue Arg = OutVals[RealArgIdx];
5615 ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5616
5617 if (Flags.isByVal()) {
5618 // Argument is an aggregate which is passed by value, thus we need to
5619 // create a copy of it in the local variable space of the current stack
5620 // frame (which is the stack frame of the caller) and pass the address of
5621 // this copy to the callee.
5622 assert((j < ByValArgLocs.size()) && "Index out of bounds!")(((j < ByValArgLocs.size()) && "Index out of bounds!"
) ? static_cast<void> (0) : __assert_fail ("(j < ByValArgLocs.size()) && \"Index out of bounds!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5622, __PRETTY_FUNCTION__))
;
5623 CCValAssign &ByValVA = ByValArgLocs[j++];
5624 assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!")(((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"
) ? static_cast<void> (0) : __assert_fail ("(VA.getValNo() == ByValVA.getValNo()) && \"ValNo mismatch!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5624, __PRETTY_FUNCTION__))
;
5625
5626 // Memory reserved in the local variable space of the callers stack frame.
5627 unsigned LocMemOffset = ByValVA.getLocMemOffset();
5628
5629 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5630 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5631 StackPtr, PtrOff);
5632
5633 // Create a copy of the argument in the local area of the current
5634 // stack frame.
5635 SDValue MemcpyCall =
5636 CreateCopyOfByValArgument(Arg, PtrOff,
5637 CallSeqStart.getNode()->getOperand(0),
5638 Flags, DAG, dl);
5639
5640 // This must go outside the CALLSEQ_START..END.
5641 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5642 SDLoc(MemcpyCall));
5643 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5644 NewCallSeqStart.getNode());
5645 Chain = CallSeqStart = NewCallSeqStart;
5646
5647 // Pass the address of the aggregate copy on the stack either in a
5648 // physical register or in the parameter list area of the current stack
5649 // frame to the callee.
5650 Arg = PtrOff;
5651 }
5652
5653 // When useCRBits() is true, there can be i1 arguments.
5654 // It is because getRegisterType(MVT::i1) => MVT::i1,
5655 // and for other integer types getRegisterType() => MVT::i32.
5656 // Extend i1 and ensure callee will get i32.
5657 if (Arg.getValueType() == MVT::i1)
5658 Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5659 dl, MVT::i32, Arg);
5660
5661 if (VA.isRegLoc()) {
5662 seenFloatArg |= VA.getLocVT().isFloatingPoint();
5663 // Put argument in a physical register.
5664 if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5665 bool IsLE = Subtarget.isLittleEndian();
5666 SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5667 DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5668 RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5669 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5670 DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5671 RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5672 SVal.getValue(0)));
5673 } else
5674 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5675 } else {
5676 // Put argument in the parameter list area of the current stack frame.
5677 assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail
("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5677, __PRETTY_FUNCTION__))
;
5678 unsigned LocMemOffset = VA.getLocMemOffset();
5679
5680 if (!IsTailCall) {
5681 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5682 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5683 StackPtr, PtrOff);
5684
5685 MemOpChains.push_back(
5686 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5687 } else {
5688 // Calculate and remember argument location.
5689 CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5690 TailCallArguments);
5691 }
5692 }
5693 }
5694
5695 if (!MemOpChains.empty())
5696 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5697
5698 // Build a sequence of copy-to-reg nodes chained together with token chain
5699 // and flag operands which copy the outgoing args into the appropriate regs.
5700 SDValue InFlag;
5701 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5702 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5703 RegsToPass[i].second, InFlag);
5704 InFlag = Chain.getValue(1);
5705 }
5706
5707 // Set CR bit 6 to true if this is a vararg call with floating args passed in
5708 // registers.
5709 if (IsVarArg) {
5710 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5711 SDValue Ops[] = { Chain, InFlag };
5712
5713 Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5714 dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5715
5716 InFlag = Chain.getValue(1);
5717 }
5718
5719 if (IsTailCall)
5720 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5721 TailCallArguments);
5722
5723 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5724 Callee, SPDiff, NumBytes, Ins, InVals, CB);
5725}
5726
5727// Copy an argument into memory, being careful to do this outside the
5728// call sequence for the call to which the argument belongs.
5729SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5730 SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5731 SelectionDAG &DAG, const SDLoc &dl) const {
5732 SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5733 CallSeqStart.getNode()->getOperand(0),
5734 Flags, DAG, dl);
5735 // The MEMCPY must go outside the CALLSEQ_START..END.
5736 int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5737 SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5738 SDLoc(MemcpyCall));
5739 DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5740 NewCallSeqStart.getNode());
5741 return NewCallSeqStart;
5742}
5743
5744SDValue PPCTargetLowering::LowerCall_64SVR4(
5745 SDValue Chain, SDValue Callee, CallFlags CFlags,
5746 const SmallVectorImpl<ISD::OutputArg> &Outs,
5747 const SmallVectorImpl<SDValue> &OutVals,
5748 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5749 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5750 const CallBase *CB) const {
5751 bool isELFv2ABI = Subtarget.isELFv2ABI();
5752 bool isLittleEndian = Subtarget.isLittleEndian();
5753 unsigned NumOps = Outs.size();
5754 bool IsSibCall = false;
5755 bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5756
5757 EVT PtrVT = getPointerTy(DAG.getDataLayout());
5758 unsigned PtrByteSize = 8;
5759
5760 MachineFunction &MF = DAG.getMachineFunction();
5761
5762 if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5763 IsSibCall = true;
5764
5765 // Mark this function as potentially containing a function that contains a
5766 // tail call. As a consequence the frame pointer will be used for dynamicalloc
5767 // and restoring the callers stack pointer in this functions epilog. This is
5768 // done because by tail calling the called function might overwrite the value
5769 // in this function's (MF) stack pointer stack slot 0(SP).
5770 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5771 MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5772
5773 assert(!(IsFastCall && CFlags.IsVarArg) &&((!(IsFastCall && CFlags.IsVarArg) && "fastcc not supported on varargs functions"
) ? static_cast<void> (0) : __assert_fail ("!(IsFastCall && CFlags.IsVarArg) && \"fastcc not supported on varargs functions\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5774, __PRETTY_FUNCTION__))
5774 "fastcc not supported on varargs functions")((!(IsFastCall && CFlags.IsVarArg) && "fastcc not supported on varargs functions"
) ? static_cast<void> (0) : __assert_fail ("!(IsFastCall && CFlags.IsVarArg) && \"fastcc not supported on varargs functions\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5774, __PRETTY_FUNCTION__))
;
5775
5776 // Count how many bytes are to be pushed on the stack, including the linkage
5777 // area, and parameter passing area. On ELFv1, the linkage area is 48 bytes
5778 // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5779 // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5780 unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5781 unsigned NumBytes = LinkageSize;
5782 unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5783
5784 static const MCPhysReg GPR[] = {
5785 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5786 PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5787 };
5788 static const MCPhysReg VR[] = {
5789 PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5790 PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5791 };
5792
5793 const unsigned NumGPRs = array_lengthof(GPR);
5794 const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5795 const unsigned NumVRs = array_lengthof(VR);
5796
5797 // On ELFv2, we can avoid allocating the parameter area if all the arguments
5798 // can be passed to the callee in registers.
5799 // For the fast calling convention, there is another check below.
5800 // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5801 bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5802 if (!HasParameterArea) {
5803 unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5804 unsigned AvailableFPRs = NumFPRs;
5805 unsigned AvailableVRs = NumVRs;
5806 unsigned NumBytesTmp = NumBytes;
5807 for (unsigned i = 0; i != NumOps; ++i) {
5808 if (Outs[i].Flags.isNest()) continue;
5809 if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5810 PtrByteSize, LinkageSize, ParamAreaSize,
5811 NumBytesTmp, AvailableFPRs, AvailableVRs))
5812 HasParameterArea = true;
5813 }
5814 }
5815
5816 // When using the fast calling convention, we don't provide backing for
5817 // arguments that will be in registers.
5818 unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5819
5820 // Avoid allocating parameter area for fastcc functions if all the arguments
5821 // can be passed in the registers.
5822 if (IsFastCall)
5823 HasParameterArea = false;
5824
5825 // Add up all the space actually used.
5826 for (unsigned i = 0; i != NumOps; ++i) {
5827 ISD::ArgFlagsTy Flags = Outs[i].Flags;
5828 EVT ArgVT = Outs[i].VT;
5829 EVT OrigVT = Outs[i].ArgVT;
5830
5831 if (Flags.isNest())
5832 continue;
5833
5834 if (IsFastCall) {
5835 if (Flags.isByVal()) {
5836 NumGPRsUsed += (Flags.getByValSize()+7)/8;
5837 if (NumGPRsUsed > NumGPRs)
5838 HasParameterArea = true;
5839 } else {
5840 switch (ArgVT.getSimpleVT().SimpleTy) {
5841 default: llvm_unreachable("Unexpected ValueType for argument!")::llvm::llvm_unreachable_internal("Unexpected ValueType for argument!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 5841)
;
5842 case MVT::i1:
5843 case MVT::i32:
5844 case MVT::i64:
5845 if (++NumGPRsUsed <= NumGPRs)
5846 continue;
5847 break;
5848 case MVT::v4i32:
5849 case MVT::v8i16:
5850 case MVT::v16i8:
5851 case MVT::v2f64:
5852 case MVT::v2i64:
5853 case MVT::v1i128:
5854 case MVT::f128:
5855 if (++NumVRsUsed <= NumVRs)
5856 continue;
5857 break;
5858 case MVT::v4f32:
5859 if (++NumVRsUsed <= NumVRs)
5860 continue;
5861 break;
5862 case MVT::f32:
5863 case MVT::f64:
5864 if (++NumFPRsUsed <= NumFPRs)
5865 continue;
5866 break;
5867 }
5868 HasParameterArea = true;
5869 }
5870 }
5871
5872 /* Respect alignment of argument on the stack. */
5873 auto Alignement =
5874 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5875 NumBytes = alignTo(NumBytes, Alignement);
5876
5877 NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
5878 if (Flags.isInConsecutiveRegsLast())
5879 NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
5880 }
5881
5882 unsigned NumBytesActuallyUsed = NumBytes;
5883
5884 // In the old ELFv1 ABI,
5885 // the prolog code of the callee may store up to 8 GPR argument registers to
5886 // the stack, allowing va_start to index over them in memory if its varargs.
5887 // Because we cannot tell if this is needed on the caller side, we have to
5888 // conservatively assume that it is needed. As such, make sure we have at
5889 // least enough stack space for the caller to store the 8 GPRs.
5890 // In the ELFv2 ABI, we allocate the parameter area iff a callee
5891 // really requires memory operands, e.g. a vararg function.
5892 if (HasParameterArea)
5893 NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
5894 else
5895 NumBytes = LinkageSize;
5896
5897 // Tail call needs the stack to be aligned.
5898 if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5899 NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
5900
5901 int SPDiff = 0;
5902
5903 // Calculate by how many bytes the stack has to be adjusted in case of tail
5904 // call optimization.
5905 if (!IsSibCall)
5906 SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
5907
5908 // To protect arguments on the stack from being clobbered in a tail call,
5909 // force all the loads to happen before doing any other lowering.
5910 if (CFlags.IsTailCall)
5911 Chain = DAG.getStackArgumentTokenFactor(Chain);
5912
5913 // Adjust the stack pointer for the new arguments...
5914 // These operations are automatically eliminated by the prolog/epilog pass
5915 if (!IsSibCall)
5916 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5917 SDValue CallSeqStart = Chain;
5918
5919 // Load the return address and frame pointer so it can be move somewhere else
5920 // later.
5921 SDValue LROp, FPOp;
5922 Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5923
5924 // Set up a copy of the stack pointer for use loading and storing any
5925 // arguments that may not fit in the registers available for argument
5926 // passing.
5927 SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5928
5929 // Figure out which arguments are going to go in registers, and which in
5930 // memory. Also, if this is a vararg function, floating point operations
5931 // must be stored to our stack, and loaded into integer regs as well, if
5932 // any integer regs are available for argument passing.
5933 unsigned ArgOffset = LinkageSize;
5934
5935 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5936 SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5937
5938 SmallVector<SDValue, 8> MemOpChains;
5939 for (unsigned i = 0; i != NumOps; ++i) {
5940 SDValue Arg = OutVals[i];
5941 ISD::ArgFlagsTy Flags = Outs[i].Flags;
5942 EVT ArgVT = Outs[i].VT;
5943 EVT OrigVT = Outs[i].ArgVT;
5944
5945 // PtrOff will be used to store the current argument to the stack if a
5946 // register cannot be found for it.
5947 SDValue PtrOff;
5948
5949 // We re-align the argument offset for each argument, except when using the
5950 // fast calling convention, when we need to make sure we do that only when
5951 // we'll actually use a stack slot.
5952 auto ComputePtrOff = [&]() {
5953 /* Respect alignment of argument on the stack. */
5954 auto Alignment =
5955 CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
5956 ArgOffset = alignTo(ArgOffset, Alignment);
5957
5958 PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
5959
5960 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
5961 };
5962
5963 if (!IsFastCall) {
5964 ComputePtrOff();
5965
5966 /* Compute GPR index associated with argument offset. */
5967 GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
5968 GPR_idx = std::min(GPR_idx, NumGPRs);
5969 }
5970
5971 // Promote integers to 64-bit values.
5972 if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
5973 // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
5974 unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
5975 Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
5976 }
5977
5978 // FIXME memcpy is used way more than necessary. Correctness first.
5979 // Note: "by value" is code for passing a structure by value, not
5980 // basic types.
5981 if (Flags.isByVal()) {
5982 // Note: Size includes alignment padding, so
5983 // struct x { short a; char b; }
5984 // will have Size = 4. With #pragma pack(1), it will have Size = 3.
5985 // These are the proper values we need for right-justifying the
5986 // aggregate in a parameter register.
5987 unsigned Size = Flags.getByValSize();
5988
5989 // An empty aggregate parameter takes up no storage and no
5990 // registers.
5991 if (Size == 0)
5992 continue;
5993
5994 if (IsFastCall)
5995 ComputePtrOff();
5996
5997 // All aggregates smaller than 8 bytes must be passed right-justified.
5998 if (Size==1 || Size==2 || Size==4) {
5999 EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6000 if (GPR_idx != NumGPRs) {
6001 SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6002 MachinePointerInfo(), VT);
6003 MemOpChains.push_back(Load.getValue(1));
6004 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6005
6006 ArgOffset += PtrByteSize;
6007 continue;
6008 }
6009 }
6010
6011 if (GPR_idx == NumGPRs && Size < 8) {
6012 SDValue AddPtr = PtrOff;
6013 if (!isLittleEndian) {
6014 SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6015 PtrOff.getValueType());
6016 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6017 }
6018 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6019 CallSeqStart,
6020 Flags, DAG, dl);
6021 ArgOffset += PtrByteSize;
6022 continue;
6023 }
6024 // Copy entire object into memory. There are cases where gcc-generated
6025 // code assumes it is there, even if it could be put entirely into
6026 // registers. (This is not what the doc says.)
6027
6028 // FIXME: The above statement is likely due to a misunderstanding of the
6029 // documents. All arguments must be copied into the parameter area BY
6030 // THE CALLEE in the event that the callee takes the address of any
6031 // formal argument. That has not yet been implemented. However, it is
6032 // reasonable to use the stack area as a staging area for the register
6033 // load.
6034
6035 // Skip this for small aggregates, as we will use the same slot for a
6036 // right-justified copy, below.
6037 if (Size >= 8)
6038 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6039 CallSeqStart,
6040 Flags, DAG, dl);
6041
6042 // When a register is available, pass a small aggregate right-justified.
6043 if (Size < 8 && GPR_idx != NumGPRs) {
6044 // The easiest way to get this right-justified in a register
6045 // is to copy the structure into the rightmost portion of a
6046 // local variable slot, then load the whole slot into the
6047 // register.
6048 // FIXME: The memcpy seems to produce pretty awful code for
6049 // small aggregates, particularly for packed ones.
6050 // FIXME: It would be preferable to use the slot in the
6051 // parameter save area instead of a new local variable.
6052 SDValue AddPtr = PtrOff;
6053 if (!isLittleEndian) {
6054 SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6055 AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6056 }
6057 Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6058 CallSeqStart,
6059 Flags, DAG, dl);
6060
6061 // Load the slot into the register.
6062 SDValue Load =
6063 DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6064 MemOpChains.push_back(Load.getValue(1));
6065 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6066
6067 // Done with this argument.
6068 ArgOffset += PtrByteSize;
6069 continue;
6070 }
6071
6072 // For aggregates larger than PtrByteSize, copy the pieces of the
6073 // object that fit into registers from the parameter save area.
6074 for (unsigned j=0; j<Size; j+=PtrByteSize) {
6075 SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6076 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6077 if (GPR_idx != NumGPRs) {
6078 SDValue Load =
6079 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6080 MemOpChains.push_back(Load.getValue(1));
6081 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6082 ArgOffset += PtrByteSize;
6083 } else {
6084 ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6085 break;
6086 }
6087 }
6088 continue;
6089 }
6090
6091 switch (Arg.getSimpleValueType().SimpleTy) {
6092 default: llvm_unreachable("Unexpected ValueType for argument!")::llvm::llvm_unreachable_internal("Unexpected ValueType for argument!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6092)
;
6093 case MVT::i1:
6094 case MVT::i32:
6095 case MVT::i64:
6096 if (Flags.isNest()) {
6097 // The 'nest' parameter, if any, is passed in R11.
6098 RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6099 break;
6100 }
6101
6102 // These can be scalar arguments or elements of an integer array type
6103 // passed directly. Clang may use those instead of "byval" aggregate
6104 // types to avoid forcing arguments to memory unnecessarily.
6105 if (GPR_idx != NumGPRs) {
6106 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6107 } else {
6108 if (IsFastCall)
6109 ComputePtrOff();
6110
6111 assert(HasParameterArea &&((HasParameterArea && "Parameter area must exist to pass an argument in memory."
) ? static_cast<void> (0) : __assert_fail ("HasParameterArea && \"Parameter area must exist to pass an argument in memory.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6112, __PRETTY_FUNCTION__))
6112 "Parameter area must exist to pass an argument in memory.")((HasParameterArea && "Parameter area must exist to pass an argument in memory."
) ? static_cast<void> (0) : __assert_fail ("HasParameterArea && \"Parameter area must exist to pass an argument in memory.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6112, __PRETTY_FUNCTION__))
;
6113 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6114 true, CFlags.IsTailCall, false, MemOpChains,
6115 TailCallArguments, dl);
6116 if (IsFastCall)
6117 ArgOffset += PtrByteSize;
6118 }
6119 if (!IsFastCall)
6120 ArgOffset += PtrByteSize;
6121 break;
6122 case MVT::f32:
6123 case MVT::f64: {
6124 // These can be scalar arguments or elements of a float array type
6125 // passed directly. The latter are used to implement ELFv2 homogenous
6126 // float aggregates.
6127
6128 // Named arguments go into FPRs first, and once they overflow, the
6129 // remaining arguments go into GPRs and then the parameter save area.
6130 // Unnamed arguments for vararg functions always go to GPRs and
6131 // then the parameter save area. For now, put all arguments to vararg
6132 // routines always in both locations (FPR *and* GPR or stack slot).
6133 bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6134 bool NeededLoad = false;
6135
6136 // First load the argument into the next available FPR.
6137 if (FPR_idx != NumFPRs)
6138 RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6139
6140 // Next, load the argument into GPR or stack slot if needed.
6141 if (!NeedGPROrStack)
6142 ;
6143 else if (GPR_idx != NumGPRs && !IsFastCall) {
6144 // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6145 // once we support fp <-> gpr moves.
6146
6147 // In the non-vararg case, this can only ever happen in the
6148 // presence of f32 array types, since otherwise we never run
6149 // out of FPRs before running out of GPRs.
6150 SDValue ArgVal;
6151
6152 // Double values are always passed in a single GPR.
6153 if (Arg.getValueType() != MVT::f32) {
6154 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6155
6156 // Non-array float values are extended and passed in a GPR.
6157 } else if (!Flags.isInConsecutiveRegs()) {
6158 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6159 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6160
6161 // If we have an array of floats, we collect every odd element
6162 // together with its predecessor into one GPR.
6163 } else if (ArgOffset % PtrByteSize != 0) {
6164 SDValue Lo, Hi;
6165 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6166 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6167 if (!isLittleEndian)
6168 std::swap(Lo, Hi);
6169 ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6170
6171 // The final element, if even, goes into the first half of a GPR.
6172 } else if (Flags.isInConsecutiveRegsLast()) {
6173 ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6174 ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6175 if (!isLittleEndian)
6176 ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6177 DAG.getConstant(32, dl, MVT::i32));
6178
6179 // Non-final even elements are skipped; they will be handled
6180 // together the with subsequent argument on the next go-around.
6181 } else
6182 ArgVal = SDValue();
6183
6184 if (ArgVal.getNode())
6185 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6186 } else {
6187 if (IsFastCall)
6188 ComputePtrOff();
6189
6190 // Single-precision floating-point values are mapped to the
6191 // second (rightmost) word of the stack doubleword.
6192 if (Arg.getValueType() == MVT::f32 &&
6193 !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6194 SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6195 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6196 }
6197
6198 assert(HasParameterArea &&((HasParameterArea && "Parameter area must exist to pass an argument in memory."
) ? static_cast<void> (0) : __assert_fail ("HasParameterArea && \"Parameter area must exist to pass an argument in memory.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6199, __PRETTY_FUNCTION__))
6199 "Parameter area must exist to pass an argument in memory.")((HasParameterArea && "Parameter area must exist to pass an argument in memory."
) ? static_cast<void> (0) : __assert_fail ("HasParameterArea && \"Parameter area must exist to pass an argument in memory.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6199, __PRETTY_FUNCTION__))
;
6200 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6201 true, CFlags.IsTailCall, false, MemOpChains,
6202 TailCallArguments, dl);
6203
6204 NeededLoad = true;
6205 }
6206 // When passing an array of floats, the array occupies consecutive
6207 // space in the argument area; only round up to the next doubleword
6208 // at the end of the array. Otherwise, each float takes 8 bytes.
6209 if (!IsFastCall || NeededLoad) {
6210 ArgOffset += (Arg.getValueType() == MVT::f32 &&
6211 Flags.isInConsecutiveRegs()) ? 4 : 8;
6212 if (Flags.isInConsecutiveRegsLast())
6213 ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6214 }
6215 break;
6216 }
6217 case MVT::v4f32:
6218 case MVT::v4i32:
6219 case MVT::v8i16:
6220 case MVT::v16i8:
6221 case MVT::v2f64:
6222 case MVT::v2i64:
6223 case MVT::v1i128:
6224 case MVT::f128:
6225 // These can be scalar arguments or elements of a vector array type
6226 // passed directly. The latter are used to implement ELFv2 homogenous
6227 // vector aggregates.
6228
6229 // For a varargs call, named arguments go into VRs or on the stack as
6230 // usual; unnamed arguments always go to the stack or the corresponding
6231 // GPRs when within range. For now, we always put the value in both
6232 // locations (or even all three).
6233 if (CFlags.IsVarArg) {
6234 assert(HasParameterArea &&((HasParameterArea && "Parameter area must exist if we have a varargs call."
) ? static_cast<void> (0) : __assert_fail ("HasParameterArea && \"Parameter area must exist if we have a varargs call.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6235, __PRETTY_FUNCTION__))
6235 "Parameter area must exist if we have a varargs call.")((HasParameterArea && "Parameter area must exist if we have a varargs call."
) ? static_cast<void> (0) : __assert_fail ("HasParameterArea && \"Parameter area must exist if we have a varargs call.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6235, __PRETTY_FUNCTION__))
;
6236 // We could elide this store in the case where the object fits
6237 // entirely in R registers. Maybe later.
6238 SDValue Store =
6239 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6240 MemOpChains.push_back(Store);
6241 if (VR_idx != NumVRs) {
6242 SDValue Load =
6243 DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6244 MemOpChains.push_back(Load.getValue(1));
6245 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6246 }
6247 ArgOffset += 16;
6248 for (unsigned i=0; i<16; i+=PtrByteSize) {
6249 if (GPR_idx == NumGPRs)
6250 break;
6251 SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6252 DAG.getConstant(i, dl, PtrVT));
6253 SDValue Load =
6254 DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6255 MemOpChains.push_back(Load.getValue(1));
6256 RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6257 }
6258 break;
6259 }
6260
6261 // Non-varargs Altivec params go into VRs or on the stack.
6262 if (VR_idx != NumVRs) {
6263 RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6264 } else {
6265 if (IsFastCall)
6266 ComputePtrOff();
6267
6268 assert(HasParameterArea &&((HasParameterArea && "Parameter area must exist to pass an argument in memory."
) ? static_cast<void> (0) : __assert_fail ("HasParameterArea && \"Parameter area must exist to pass an argument in memory.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6269, __PRETTY_FUNCTION__))
6269 "Parameter area must exist to pass an argument in memory.")((HasParameterArea && "Parameter area must exist to pass an argument in memory."
) ? static_cast<void> (0) : __assert_fail ("HasParameterArea && \"Parameter area must exist to pass an argument in memory.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6269, __PRETTY_FUNCTION__))
;
6270 LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6271 true, CFlags.IsTailCall, true, MemOpChains,
6272 TailCallArguments, dl);
6273 if (IsFastCall)
6274 ArgOffset += 16;
6275 }
6276
6277 if (!IsFastCall)
6278 ArgOffset += 16;
6279 break;
6280 }
6281 }
6282
6283 assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&(((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
"mismatch in size of parameter area") ? static_cast<void>
(0) : __assert_fail ("(!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && \"mismatch in size of parameter area\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6284, __PRETTY_FUNCTION__))
6284 "mismatch in size of parameter area")(((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
"mismatch in size of parameter area") ? static_cast<void>
(0) : __assert_fail ("(!HasParameterArea || NumBytesActuallyUsed == ArgOffset) && \"mismatch in size of parameter area\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6284, __PRETTY_FUNCTION__))
;
6285 (void)NumBytesActuallyUsed;
6286
6287 if (!MemOpChains.empty())
6288 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6289
6290 // Check if this is an indirect call (MTCTR/BCTRL).
6291 // See prepareDescriptorIndirectCall and buildCallOperands for more
6292 // information about calls through function pointers in the 64-bit SVR4 ABI.
6293 if (CFlags.IsIndirect) {
6294 // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6295 // caller in the TOC save area.
6296 if (isTOCSaveRestoreRequired(Subtarget)) {
6297 assert(!CFlags.IsTailCall && "Indirect tails calls not supported")((!CFlags.IsTailCall && "Indirect tails calls not supported"
) ? static_cast<void> (0) : __assert_fail ("!CFlags.IsTailCall && \"Indirect tails calls not supported\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6297, __PRETTY_FUNCTION__))
;
6298 // Load r2 into a virtual register and store it to the TOC save area.
6299 setUsesTOCBasePtr(DAG);
6300 SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6301 // TOC save area offset.
6302 unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6303 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6304 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6305 Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6306 MachinePointerInfo::getStack(
6307 DAG.getMachineFunction(), TOCSaveOffset));
6308 }
6309 // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6310 // This does not mean the MTCTR instruction must use R12; it's easier
6311 // to model this as an extra parameter, so do that.
6312 if (isELFv2ABI && !CFlags.IsPatchPoint)
6313 RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6314 }
6315
6316 // Build a sequence of copy-to-reg nodes chained together with token chain
6317 // and flag operands which copy the outgoing args into the appropriate regs.
6318 SDValue InFlag;
6319 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6320 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6321 RegsToPass[i].second, InFlag);
6322 InFlag = Chain.getValue(1);
6323 }
6324
6325 if (CFlags.IsTailCall && !IsSibCall)
6326 PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6327 TailCallArguments);
6328
6329 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6330 Callee, SPDiff, NumBytes, Ins, InVals, CB);
6331}
6332
6333// Returns true when the shadow of a general purpose argument register
6334// in the parameter save area is aligned to at least 'RequiredAlign'.
6335static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) {
6336 assert(RequiredAlign.value() <= 16 &&((RequiredAlign.value() <= 16 && "Required alignment greater than stack alignment."
) ? static_cast<void> (0) : __assert_fail ("RequiredAlign.value() <= 16 && \"Required alignment greater than stack alignment.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6337, __PRETTY_FUNCTION__))
6337 "Required alignment greater than stack alignment.")((RequiredAlign.value() <= 16 && "Required alignment greater than stack alignment."
) ? static_cast<void> (0) : __assert_fail ("RequiredAlign.value() <= 16 && \"Required alignment greater than stack alignment.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6337, __PRETTY_FUNCTION__))
;
6338 switch (Reg) {
6339 default:
6340 report_fatal_error("called on invalid register.");
6341 case PPC::R5:
6342 case PPC::R9:
6343 case PPC::X3:
6344 case PPC::X5:
6345 case PPC::X7:
6346 case PPC::X9:
6347 // These registers are 16 byte aligned which is the most strict aligment
6348 // we can support.
6349 return true;
6350 case PPC::R3:
6351 case PPC::R7:
6352 case PPC::X4:
6353 case PPC::X6:
6354 case PPC::X8:
6355 case PPC::X10:
6356 // The shadow of these registers in the PSA is 8 byte aligned.
6357 return RequiredAlign <= 8;
6358 case PPC::R4:
6359 case PPC::R6:
6360 case PPC::R8:
6361 case PPC::R10:
6362 return RequiredAlign <= 4;
6363 }
6364}
6365
6366static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6367 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6368 CCState &S) {
6369 AIXCCState &State = static_cast<AIXCCState &>(S);
6370 const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6371 State.getMachineFunction().getSubtarget());
6372 const bool IsPPC64 = Subtarget.isPPC64();
6373 const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6374 const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6375
6376 if (ValVT == MVT::f128)
6377 report_fatal_error("f128 is unimplemented on AIX.");
6378
6379 if (ArgFlags.isNest())
6380 report_fatal_error("Nest arguments are unimplemented.");
6381
6382 static const MCPhysReg GPR_32[] = {// 32-bit registers.
6383 PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6384 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6385 static const MCPhysReg GPR_64[] = {// 64-bit registers.
6386 PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6387 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6388
6389 static const MCPhysReg VR[] = {// Vector registers.
6390 PPC::V2, PPC::V3, PPC::V4, PPC::V5,
6391 PPC::V6, PPC::V7, PPC::V8, PPC::V9,
6392 PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6393
6394 if (ArgFlags.isByVal()) {
6395 if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6396 report_fatal_error("Pass-by-value arguments with alignment greater than "
6397 "register width are not supported.");
6398
6399 const unsigned ByValSize = ArgFlags.getByValSize();
6400
6401 // An empty aggregate parameter takes up no storage and no registers,
6402 // but needs a MemLoc for a stack slot for the formal arguments side.
6403 if (ByValSize == 0) {
6404 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6405 State.getNextStackOffset(), RegVT,
6406 LocInfo));
6407 return false;
6408 }
6409
6410 const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6411 unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6412 for (const unsigned E = Offset + StackSize; Offset < E;
6413 Offset += PtrAlign.value()) {
6414 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6415 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6416 else {
6417 State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6418 Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
6419 LocInfo));
6420 break;
6421 }
6422 }
6423 return false;
6424 }
6425
6426 // Arguments always reserve parameter save area.
6427 switch (ValVT.SimpleTy) {
6428 default:
6429 report_fatal_error("Unhandled value type for argument.");
6430 case MVT::i64:
6431 // i64 arguments should have been split to i32 for PPC32.
6432 assert(IsPPC64 && "PPC32 should have split i64 values.")((IsPPC64 && "PPC32 should have split i64 values.") ?
static_cast<void> (0) : __assert_fail ("IsPPC64 && \"PPC32 should have split i64 values.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6432, __PRETTY_FUNCTION__))
;
6433 LLVM_FALLTHROUGH[[gnu::fallthrough]];
6434 case MVT::i1:
6435 case MVT::i32: {
6436 const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6437 // AIX integer arguments are always passed in register width.
6438 if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
6439 LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6440 : CCValAssign::LocInfo::ZExt;
6441 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6442 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6443 else
6444 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6445
6446 return false;
6447 }
6448 case MVT::f32:
6449 case MVT::f64: {
6450 // Parameter save area (PSA) is reserved even if the float passes in fpr.
6451 const unsigned StoreSize = LocVT.getStoreSize();
6452 // Floats are always 4-byte aligned in the PSA on AIX.
6453 // This includes f64 in 64-bit mode for ABI compatibility.
6454 const unsigned Offset =
6455 State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6456 unsigned FReg = State.AllocateReg(FPR);
6457 if (FReg)
6458 State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6459
6460 // Reserve and initialize GPRs or initialize the PSA as required.
6461 for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6462 if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6463 assert(FReg && "An FPR should be available when a GPR is reserved.")((FReg && "An FPR should be available when a GPR is reserved."
) ? static_cast<void> (0) : __assert_fail ("FReg && \"An FPR should be available when a GPR is reserved.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6463, __PRETTY_FUNCTION__))
;
6464 if (State.isVarArg()) {
6465 // Successfully reserved GPRs are only initialized for vararg calls.
6466 // Custom handling is required for:
6467 // f64 in PPC32 needs to be split into 2 GPRs.
6468 // f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6469 State.addLoc(
6470 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6471 }
6472 } else {
6473 // If there are insufficient GPRs, the PSA needs to be initialized.
6474 // Initialization occurs even if an FPR was initialized for
6475 // compatibility with the AIX XL compiler. The full memory for the
6476 // argument will be initialized even if a prior word is saved in GPR.
6477 // A custom memLoc is used when the argument also passes in FPR so
6478 // that the callee handling can skip over it easily.
6479 State.addLoc(
6480 FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6481 LocInfo)
6482 : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6483 break;
6484 }
6485 }
6486
6487 return false;
6488 }
6489 case MVT::v4f32:
6490 case MVT::v4i32:
6491 case MVT::v8i16:
6492 case MVT::v16i8:
6493 case MVT::v2i64:
6494 case MVT::v2f64:
6495 case MVT::v1i128: {
6496 const unsigned VecSize = 16;
6497 const Align VecAlign(VecSize);
6498
6499 if (!State.isVarArg()) {
6500 // If there are vector registers remaining we don't consume any stack
6501 // space.
6502 if (unsigned VReg = State.AllocateReg(VR)) {
6503 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6504 return false;
6505 }
6506 // Vectors passed on the stack do not shadow GPRs or FPRs even though they
6507 // might be allocated in the portion of the PSA that is shadowed by the
6508 // GPRs.
6509 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6510 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6511 return false;
6512 }
6513
6514 const unsigned PtrSize = IsPPC64 ? 8 : 4;
6515 ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
6516
6517 unsigned NextRegIndex = State.getFirstUnallocated(GPRs);
6518 // Burn any underaligned registers and their shadowed stack space until
6519 // we reach the required alignment.
6520 while (NextRegIndex != GPRs.size() &&
6521 !isGPRShadowAligned(GPRs[NextRegIndex], VecAlign)) {
6522 // Shadow allocate register and its stack shadow.
6523 unsigned Reg = State.AllocateReg(GPRs);
6524 State.AllocateStack(PtrSize, PtrAlign);
6525 assert(Reg && "Allocating register unexpectedly failed.")((Reg && "Allocating register unexpectedly failed.") ?
static_cast<void> (0) : __assert_fail ("Reg && \"Allocating register unexpectedly failed.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6525, __PRETTY_FUNCTION__))
;
6526 (void)Reg;
6527 NextRegIndex = State.getFirstUnallocated(GPRs);
6528 }
6529
6530 // Vectors that are passed as fixed arguments are handled differently.
6531 // They are passed in VRs if any are available (unlike arguments passed
6532 // through ellipses) and shadow GPRs (unlike arguments to non-vaarg
6533 // functions)
6534 if (State.isFixed(ValNo)) {
6535 if (unsigned VReg = State.AllocateReg(VR)) {
6536 State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6537 // Shadow allocate GPRs and stack space even though we pass in a VR.
6538 for (unsigned I = 0; I != VecSize; I += PtrSize)
6539 State.AllocateReg(GPRs);
6540 State.AllocateStack(VecSize, VecAlign);
6541 return false;
6542 }
6543 // No vector registers remain so pass on the stack.
6544 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6545 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6546 return false;
6547 }
6548
6549 // If all GPRS are consumed then we pass the argument fully on the stack.
6550 if (NextRegIndex == GPRs.size()) {
6551 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6552 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6553 return false;
6554 }
6555
6556 // Corner case for 32-bit codegen. We have 2 registers to pass the first
6557 // half of the argument, and then need to pass the remaining half on the
6558 // stack.
6559 if (GPRs[NextRegIndex] == PPC::R9) {
6560 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6561 State.addLoc(
6562 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6563
6564 const unsigned FirstReg = State.AllocateReg(PPC::R9);
6565 const unsigned SecondReg = State.AllocateReg(PPC::R10);
6566 assert(FirstReg && SecondReg &&((FirstReg && SecondReg && "Allocating R9 or R10 unexpectedly failed."
) ? static_cast<void> (0) : __assert_fail ("FirstReg && SecondReg && \"Allocating R9 or R10 unexpectedly failed.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6567, __PRETTY_FUNCTION__))
6567 "Allocating R9 or R10 unexpectedly failed.")((FirstReg && SecondReg && "Allocating R9 or R10 unexpectedly failed."
) ? static_cast<void> (0) : __assert_fail ("FirstReg && SecondReg && \"Allocating R9 or R10 unexpectedly failed.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6567, __PRETTY_FUNCTION__))
;
6568 State.addLoc(
6569 CCValAssign::getCustomReg(ValNo, ValVT, FirstReg, RegVT, LocInfo));
6570 State.addLoc(
6571 CCValAssign::getCustomReg(ValNo, ValVT, SecondReg, RegVT, LocInfo));
6572 return false;
6573 }
6574
6575 // We have enough GPRs to fully pass the vector argument, and we have
6576 // already consumed any underaligned registers. Start with the custom
6577 // MemLoc and then the custom RegLocs.
6578 const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6579 State.addLoc(
6580 CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6581 for (unsigned I = 0; I != VecSize; I += PtrSize) {
6582 const unsigned Reg = State.AllocateReg(GPRs);
6583 assert(Reg && "Failed to allocated register for vararg vector argument")((Reg && "Failed to allocated register for vararg vector argument"
) ? static_cast<void> (0) : __assert_fail ("Reg && \"Failed to allocated register for vararg vector argument\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6583, __PRETTY_FUNCTION__))
;
6584 State.addLoc(
6585 CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6586 }
6587 return false;
6588 }
6589 }
6590 return true;
6591}
6592
6593static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6594 bool IsPPC64) {
6595 assert((IsPPC64 || SVT != MVT::i64) &&(((IsPPC64 || SVT != MVT::i64) && "i64 should have been split for 32-bit codegen."
) ? static_cast<void> (0) : __assert_fail ("(IsPPC64 || SVT != MVT::i64) && \"i64 should have been split for 32-bit codegen.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6596, __PRETTY_FUNCTION__))
6596 "i64 should have been split for 32-bit codegen.")(((IsPPC64 || SVT != MVT::i64) && "i64 should have been split for 32-bit codegen."
) ? static_cast<void> (0) : __assert_fail ("(IsPPC64 || SVT != MVT::i64) && \"i64 should have been split for 32-bit codegen.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6596, __PRETTY_FUNCTION__))
;
6597
6598 switch (SVT) {
6599 default:
6600 report_fatal_error("Unexpected value type for formal argument");
6601 case MVT::i1:
6602 case MVT::i32:
6603 case MVT::i64:
6604 return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6605 case MVT::f32:
6606 return &PPC::F4RCRegClass;
6607 case MVT::f64:
6608 return &PPC::F8RCRegClass;
6609 case MVT::v4f32:
6610 case MVT::v4i32:
6611 case MVT::v8i16:
6612 case MVT::v16i8:
6613 case MVT::v2i64:
6614 case MVT::v2f64:
6615 case MVT::v1i128:
6616 return &PPC::VRRCRegClass;
6617 }
6618}
6619
6620static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6621 SelectionDAG &DAG, SDValue ArgValue,
6622 MVT LocVT, const SDLoc &dl) {
6623 assert(ValVT.isScalarInteger() && LocVT.isScalarInteger())((ValVT.isScalarInteger() && LocVT.isScalarInteger())
? static_cast<void> (0) : __assert_fail ("ValVT.isScalarInteger() && LocVT.isScalarInteger()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6623, __PRETTY_FUNCTION__))
;
6624 assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())((ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())
? static_cast<void> (0) : __assert_fail ("ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6624, __PRETTY_FUNCTION__))
;
6625
6626 if (Flags.isSExt())
6627 ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6628 DAG.getValueType(ValVT));
6629 else if (Flags.isZExt())
6630 ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6631 DAG.getValueType(ValVT));
6632
6633 return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
6634}
6635
6636static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
6637 const unsigned LASize = FL->getLinkageSize();
6638
6639 if (PPC::GPRCRegClass.contains(Reg)) {
6640 assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&((Reg >= PPC::R3 && Reg <= PPC::R10 && "Reg must be a valid argument register!"
) ? static_cast<void> (0) : __assert_fail ("Reg >= PPC::R3 && Reg <= PPC::R10 && \"Reg must be a valid argument register!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6641, __PRETTY_FUNCTION__))
6641 "Reg must be a valid argument register!")((Reg >= PPC::R3 && Reg <= PPC::R10 && "Reg must be a valid argument register!"
) ? static_cast<void> (0) : __assert_fail ("Reg >= PPC::R3 && Reg <= PPC::R10 && \"Reg must be a valid argument register!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6641, __PRETTY_FUNCTION__))
;
6642 return LASize + 4 * (Reg - PPC::R3);
6643 }
6644
6645 if (PPC::G8RCRegClass.contains(Reg)) {
6646 assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&((Reg >= PPC::X3 && Reg <= PPC::X10 && "Reg must be a valid argument register!"
) ? static_cast<void> (0) : __assert_fail ("Reg >= PPC::X3 && Reg <= PPC::X10 && \"Reg must be a valid argument register!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6647, __PRETTY_FUNCTION__))
6647 "Reg must be a valid argument register!")((Reg >= PPC::X3 && Reg <= PPC::X10 && "Reg must be a valid argument register!"
) ? static_cast<void> (0) : __assert_fail ("Reg >= PPC::X3 && Reg <= PPC::X10 && \"Reg must be a valid argument register!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6647, __PRETTY_FUNCTION__))
;
6648 return LASize + 8 * (Reg - PPC::X3);
6649 }
6650
6651 llvm_unreachable("Only general purpose registers expected.")::llvm::llvm_unreachable_internal("Only general purpose registers expected."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6651)
;
6652}
6653
6654// AIX ABI Stack Frame Layout:
6655//
6656// Low Memory +--------------------------------------------+
6657// SP +---> | Back chain | ---+
6658// | +--------------------------------------------+ |
6659// | | Saved Condition Register | |
6660// | +--------------------------------------------+ |
6661// | | Saved Linkage Register | |
6662// | +--------------------------------------------+ | Linkage Area
6663// | | Reserved for compilers | |
6664// | +--------------------------------------------+ |
6665// | | Reserved for binders | |
6666// | +--------------------------------------------+ |
6667// | | Saved TOC pointer | ---+
6668// | +--------------------------------------------+
6669// | | Parameter save area |
6670// | +--------------------------------------------+
6671// | | Alloca space |
6672// | +--------------------------------------------+
6673// | | Local variable space |
6674// | +--------------------------------------------+
6675// | | Float/int conversion temporary |
6676// | +--------------------------------------------+
6677// | | Save area for AltiVec registers |
6678// | +--------------------------------------------+
6679// | | AltiVec alignment padding |
6680// | +--------------------------------------------+
6681// | | Save area for VRSAVE register |
6682// | +--------------------------------------------+
6683// | | Save area for General Purpose registers |
6684// | +--------------------------------------------+
6685// | | Save area for Floating Point registers |
6686// | +--------------------------------------------+
6687// +---- | Back chain |
6688// High Memory +--------------------------------------------+
6689//
6690// Specifications:
6691// AIX 7.2 Assembler Language Reference
6692// Subroutine linkage convention
6693
6694SDValue PPCTargetLowering::LowerFormalArguments_AIX(
6695 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
6696 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6697 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
6698
6699 assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||(((CallConv == CallingConv::C || CallConv == CallingConv::Cold
|| CallConv == CallingConv::Fast) && "Unexpected calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unexpected calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6701, __PRETTY_FUNCTION__))
6700 CallConv == CallingConv::Fast) &&(((CallConv == CallingConv::C || CallConv == CallingConv::Cold
|| CallConv == CallingConv::Fast) && "Unexpected calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unexpected calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6701, __PRETTY_FUNCTION__))
6701 "Unexpected calling convention!")(((CallConv == CallingConv::C || CallConv == CallingConv::Cold
|| CallConv == CallingConv::Fast) && "Unexpected calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::C || CallConv == CallingConv::Cold || CallConv == CallingConv::Fast) && \"Unexpected calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6701, __PRETTY_FUNCTION__))
;
6702
6703 if (getTargetMachine().Options.GuaranteedTailCallOpt)
6704 report_fatal_error("Tail call support is unimplemented on AIX.");
6705
6706 if (useSoftFloat())
6707 report_fatal_error("Soft float support is unimplemented on AIX.");
6708
6709 const PPCSubtarget &Subtarget =
6710 static_cast<const PPCSubtarget &>(DAG.getSubtarget());
6711
6712 const bool IsPPC64 = Subtarget.isPPC64();
6713 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6714
6715 // Assign locations to all of the incoming arguments.
6716 SmallVector<CCValAssign, 16> ArgLocs;
6717 MachineFunction &MF = DAG.getMachineFunction();
6718 MachineFrameInfo &MFI = MF.getFrameInfo();
6719 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
6720 AIXCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
6721
6722 const EVT PtrVT = getPointerTy(MF.getDataLayout());
6723 // Reserve space for the linkage area on the stack.
6724 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6725 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
6726 CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
6727
6728 SmallVector<SDValue, 8> MemOps;
6729
6730 for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
6731 CCValAssign &VA = ArgLocs[I++];
6732 MVT LocVT = VA.getLocVT();
6733 MVT ValVT = VA.getValVT();
6734 ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
6735 // For compatibility with the AIX XL compiler, the float args in the
6736 // parameter save area are initialized even if the argument is available
6737 // in register. The caller is required to initialize both the register
6738 // and memory, however, the callee can choose to expect it in either.
6739 // The memloc is dismissed here because the argument is retrieved from
6740 // the register.
6741 if (VA.isMemLoc() && VA.needsCustom() && ValVT.isFloatingPoint())
6742 continue;
6743
6744 auto HandleMemLoc = [&]() {
6745 const unsigned LocSize = LocVT.getStoreSize();
6746 const unsigned ValSize = ValVT.getStoreSize();
6747 assert((ValSize <= LocSize) &&(((ValSize <= LocSize) && "Object size is larger than size of MemLoc"
) ? static_cast<void> (0) : __assert_fail ("(ValSize <= LocSize) && \"Object size is larger than size of MemLoc\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6748, __PRETTY_FUNCTION__))
6748 "Object size is larger than size of MemLoc")(((ValSize <= LocSize) && "Object size is larger than size of MemLoc"
) ? static_cast<void> (0) : __assert_fail ("(ValSize <= LocSize) && \"Object size is larger than size of MemLoc\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6748, __PRETTY_FUNCTION__))
;
6749 int CurArgOffset = VA.getLocMemOffset();
6750 // Objects are right-justified because AIX is big-endian.
6751 if (LocSize > ValSize)
6752 CurArgOffset += LocSize - ValSize;
6753 // Potential tail calls could cause overwriting of argument stack slots.
6754 const bool IsImmutable =
6755 !(getTargetMachine().Options.GuaranteedTailCallOpt &&
6756 (CallConv == CallingConv::Fast));
6757 int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
6758 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6759 SDValue ArgValue =
6760 DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
6761 InVals.push_back(ArgValue);
6762 };
6763
6764 // Vector arguments to VaArg functions are passed both on the stack, and
6765 // in any available GPRs. Load the value from the stack and add the GPRs
6766 // as live ins.
6767 if (VA.isMemLoc() && VA.needsCustom()) {
6768 assert(ValVT.isVector() && "Unexpected Custom MemLoc type.")((ValVT.isVector() && "Unexpected Custom MemLoc type."
) ? static_cast<void> (0) : __assert_fail ("ValVT.isVector() && \"Unexpected Custom MemLoc type.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6768, __PRETTY_FUNCTION__))
;
6769 assert(isVarArg && "Only use custom memloc for vararg.")((isVarArg && "Only use custom memloc for vararg.") ?
static_cast<void> (0) : __assert_fail ("isVarArg && \"Only use custom memloc for vararg.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6769, __PRETTY_FUNCTION__))
;
6770 // ValNo of the custom MemLoc, so we can compare it to the ValNo of the
6771 // matching custom RegLocs.
6772 const unsigned OriginalValNo = VA.getValNo();
6773 (void)OriginalValNo;
6774
6775 auto HandleCustomVecRegLoc = [&]() {
6776 assert(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&((I != End && ArgLocs[I].isRegLoc() && ArgLocs
[I].needsCustom() && "Missing custom RegLoc.") ? static_cast
<void> (0) : __assert_fail ("I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && \"Missing custom RegLoc.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6777, __PRETTY_FUNCTION__))
6777 "Missing custom RegLoc.")((I != End && ArgLocs[I].isRegLoc() && ArgLocs
[I].needsCustom() && "Missing custom RegLoc.") ? static_cast
<void> (0) : __assert_fail ("I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && \"Missing custom RegLoc.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6777, __PRETTY_FUNCTION__))
;
6778 VA = ArgLocs[I++];
6779 assert(VA.getValVT().isVector() &&((VA.getValVT().isVector() && "Unexpected Val type for custom RegLoc."
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT().isVector() && \"Unexpected Val type for custom RegLoc.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6780, __PRETTY_FUNCTION__))
6780 "Unexpected Val type for custom RegLoc.")((VA.getValVT().isVector() && "Unexpected Val type for custom RegLoc."
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT().isVector() && \"Unexpected Val type for custom RegLoc.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6780, __PRETTY_FUNCTION__))
;
6781 assert(VA.getValNo() == OriginalValNo &&((VA.getValNo() == OriginalValNo && "ValNo mismatch between custom MemLoc and RegLoc."
) ? static_cast<void> (0) : __assert_fail ("VA.getValNo() == OriginalValNo && \"ValNo mismatch between custom MemLoc and RegLoc.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6782, __PRETTY_FUNCTION__))
6782 "ValNo mismatch between custom MemLoc and RegLoc.")((VA.getValNo() == OriginalValNo && "ValNo mismatch between custom MemLoc and RegLoc."
) ? static_cast<void> (0) : __assert_fail ("VA.getValNo() == OriginalValNo && \"ValNo mismatch between custom MemLoc and RegLoc.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6782, __PRETTY_FUNCTION__))
;
6783 MVT::SimpleValueType SVT = VA.getLocVT().SimpleTy;
6784 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
6785 };
6786
6787 HandleMemLoc();
6788 // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
6789 // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
6790 // R10.
6791 HandleCustomVecRegLoc();
6792 HandleCustomVecRegLoc();
6793
6794 // If we are targeting 32-bit, there might be 2 extra custom RegLocs if
6795 // we passed the vector in R5, R6, R7 and R8.
6796 if (I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom()) {
6797 assert(!IsPPC64 &&((!IsPPC64 && "Only 2 custom RegLocs expected for 64-bit codegen."
) ? static_cast<void> (0) : __assert_fail ("!IsPPC64 && \"Only 2 custom RegLocs expected for 64-bit codegen.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6798, __PRETTY_FUNCTION__))
6798 "Only 2 custom RegLocs expected for 64-bit codegen.")((!IsPPC64 && "Only 2 custom RegLocs expected for 64-bit codegen."
) ? static_cast<void> (0) : __assert_fail ("!IsPPC64 && \"Only 2 custom RegLocs expected for 64-bit codegen.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6798, __PRETTY_FUNCTION__))
;
6799 HandleCustomVecRegLoc();
6800 HandleCustomVecRegLoc();
6801 }
6802
6803 continue;
6804 }
6805
6806 if (VA.isRegLoc()) {
6807 if (VA.getValVT().isScalarInteger())
6808 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6809 else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector())
6810 FuncInfo->appendParameterType(VA.getValVT().SimpleTy == MVT::f32
6811 ? PPCFunctionInfo::ShortFloatPoint
6812 : PPCFunctionInfo::LongFloatPoint);
6813 }
6814
6815 if (Flags.isByVal() && VA.isMemLoc()) {
6816 const unsigned Size =
6817 alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
6818 PtrByteSize);
6819 const int FI = MF.getFrameInfo().CreateFixedObject(
6820 Size, VA.getLocMemOffset(), /* IsImmutable */ false,
6821 /* IsAliased */ true);
6822 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6823 InVals.push_back(FIN);
6824
6825 continue;
6826 }
6827
6828 if (Flags.isByVal()) {
6829 assert(VA.isRegLoc() && "MemLocs should already be handled.")((VA.isRegLoc() && "MemLocs should already be handled."
) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"MemLocs should already be handled.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6829, __PRETTY_FUNCTION__))
;
6830
6831 const MCPhysReg ArgReg = VA.getLocReg();
6832 const PPCFrameLowering *FL = Subtarget.getFrameLowering();
6833
6834 if (Flags.getNonZeroByValAlign() > PtrByteSize)
6835 report_fatal_error("Over aligned byvals not supported yet.");
6836
6837 const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
6838 const int FI = MF.getFrameInfo().CreateFixedObject(
6839 StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
6840 /* IsAliased */ true);
6841 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6842 InVals.push_back(FIN);
6843
6844 // Add live ins for all the RegLocs for the same ByVal.
6845 const TargetRegisterClass *RegClass =
6846 IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6847
6848 auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
6849 unsigned Offset) {
6850 const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
6851 // Since the callers side has left justified the aggregate in the
6852 // register, we can simply store the entire register into the stack
6853 // slot.
6854 SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
6855 // The store to the fixedstack object is needed becuase accessing a
6856 // field of the ByVal will use a gep and load. Ideally we will optimize
6857 // to extracting the value from the register directly, and elide the
6858 // stores when the arguments address is not taken, but that will need to
6859 // be future work.
6860 SDValue Store = DAG.getStore(
6861 CopyFrom.getValue(1), dl, CopyFrom,
6862 DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
6863 MachinePointerInfo::getFixedStack(MF, FI, Offset));
6864
6865 MemOps.push_back(Store);
6866 };
6867
6868 unsigned Offset = 0;
6869 HandleRegLoc(VA.getLocReg(), Offset);
6870 Offset += PtrByteSize;
6871 for (; Offset != StackSize && ArgLocs[I].isRegLoc();
6872 Offset += PtrByteSize) {
6873 assert(ArgLocs[I].getValNo() == VA.getValNo() &&((ArgLocs[I].getValNo() == VA.getValNo() && "RegLocs should be for ByVal argument."
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[I].getValNo() == VA.getValNo() && \"RegLocs should be for ByVal argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6874, __PRETTY_FUNCTION__))
6874 "RegLocs should be for ByVal argument.")((ArgLocs[I].getValNo() == VA.getValNo() && "RegLocs should be for ByVal argument."
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[I].getValNo() == VA.getValNo() && \"RegLocs should be for ByVal argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6874, __PRETTY_FUNCTION__))
;
6875
6876 const CCValAssign RL = ArgLocs[I++];
6877 HandleRegLoc(RL.getLocReg(), Offset);
6878 FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6879 }
6880
6881 if (Offset != StackSize) {
6882 assert(ArgLocs[I].getValNo() == VA.getValNo() &&((ArgLocs[I].getValNo() == VA.getValNo() && "Expected MemLoc for remaining bytes."
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[I].getValNo() == VA.getValNo() && \"Expected MemLoc for remaining bytes.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6883, __PRETTY_FUNCTION__))
6883 "Expected MemLoc for remaining bytes.")((ArgLocs[I].getValNo() == VA.getValNo() && "Expected MemLoc for remaining bytes."
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[I].getValNo() == VA.getValNo() && \"Expected MemLoc for remaining bytes.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6883, __PRETTY_FUNCTION__))
;
6884 assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.")((ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes."
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[I].isMemLoc() && \"Expected MemLoc for remaining bytes.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6884, __PRETTY_FUNCTION__))
;
6885 // Consume the MemLoc.The InVal has already been emitted, so nothing
6886 // more needs to be done.
6887 ++I;
6888 }
6889
6890 continue;
6891 }
6892
6893 if (VA.isRegLoc() && !VA.needsCustom()) {
6894 MVT::SimpleValueType SVT = ValVT.SimpleTy;
6895 unsigned VReg =
6896 MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
6897 SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
6898 if (ValVT.isScalarInteger() &&
6899 (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) {
6900 ArgValue =
6901 truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
6902 }
6903 InVals.push_back(ArgValue);
6904 continue;
6905 }
6906 if (VA.isMemLoc()) {
6907 HandleMemLoc();
6908 continue;
6909 }
6910 }
6911
6912 // On AIX a minimum of 8 words is saved to the parameter save area.
6913 const unsigned MinParameterSaveArea = 8 * PtrByteSize;
6914 // Area that is at least reserved in the caller of this function.
6915 unsigned CallerReservedArea =
6916 std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
6917
6918 // Set the size that is at least reserved in caller of this function. Tail
6919 // call optimized function's reserved stack space needs to be aligned so
6920 // that taking the difference between two stack areas will result in an
6921 // aligned stack.
6922 CallerReservedArea =
6923 EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
6924 FuncInfo->setMinReservedArea(CallerReservedArea);
6925
6926 if (isVarArg) {
6927 FuncInfo->setVarArgsFrameIndex(
6928 MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
6929 SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
6930
6931 static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6932 PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6933
6934 static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6935 PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6936 const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
6937
6938 // The fixed integer arguments of a variadic function are stored to the
6939 // VarArgsFrameIndex on the stack so that they may be loaded by
6940 // dereferencing the result of va_next.
6941 for (unsigned GPRIndex =
6942 (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
6943 GPRIndex < NumGPArgRegs; ++GPRIndex) {
6944
6945 const unsigned VReg =
6946 IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
6947 : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
6948
6949 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
6950 SDValue Store =
6951 DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
6952 MemOps.push_back(Store);
6953 // Increment the address for the next argument to store.
6954 SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
6955 FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
6956 }
6957 }
6958
6959 if (!MemOps.empty())
6960 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
6961
6962 return Chain;
6963}
6964
6965SDValue PPCTargetLowering::LowerCall_AIX(
6966 SDValue Chain, SDValue Callee, CallFlags CFlags,
6967 const SmallVectorImpl<ISD::OutputArg> &Outs,
6968 const SmallVectorImpl<SDValue> &OutVals,
6969 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6970 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6971 const CallBase *CB) const {
6972 // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
6973 // AIX ABI stack frame layout.
6974
6975 assert((CFlags.CallConv == CallingConv::C ||(((CFlags.CallConv == CallingConv::C || CFlags.CallConv == CallingConv
::Cold || CFlags.CallConv == CallingConv::Fast) && "Unexpected calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CFlags.CallConv == CallingConv::C || CFlags.CallConv == CallingConv::Cold || CFlags.CallConv == CallingConv::Fast) && \"Unexpected calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6978, __PRETTY_FUNCTION__))
6976 CFlags.CallConv == CallingConv::Cold ||(((CFlags.CallConv == CallingConv::C || CFlags.CallConv == CallingConv
::Cold || CFlags.CallConv == CallingConv::Fast) && "Unexpected calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CFlags.CallConv == CallingConv::C || CFlags.CallConv == CallingConv::Cold || CFlags.CallConv == CallingConv::Fast) && \"Unexpected calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6978, __PRETTY_FUNCTION__))
6977 CFlags.CallConv == CallingConv::Fast) &&(((CFlags.CallConv == CallingConv::C || CFlags.CallConv == CallingConv
::Cold || CFlags.CallConv == CallingConv::Fast) && "Unexpected calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CFlags.CallConv == CallingConv::C || CFlags.CallConv == CallingConv::Cold || CFlags.CallConv == CallingConv::Fast) && \"Unexpected calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6978, __PRETTY_FUNCTION__))
6978 "Unexpected calling convention!")(((CFlags.CallConv == CallingConv::C || CFlags.CallConv == CallingConv
::Cold || CFlags.CallConv == CallingConv::Fast) && "Unexpected calling convention!"
) ? static_cast<void> (0) : __assert_fail ("(CFlags.CallConv == CallingConv::C || CFlags.CallConv == CallingConv::Cold || CFlags.CallConv == CallingConv::Fast) && \"Unexpected calling convention!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 6978, __PRETTY_FUNCTION__))
;
6979
6980 if (CFlags.IsPatchPoint)
6981 report_fatal_error("This call type is unimplemented on AIX.");
6982
6983 const PPCSubtarget& Subtarget =
6984 static_cast<const PPCSubtarget&>(DAG.getSubtarget());
6985
6986 MachineFunction &MF = DAG.getMachineFunction();
6987 SmallVector<CCValAssign, 16> ArgLocs;
6988 AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
6989 *DAG.getContext());
6990
6991 // Reserve space for the linkage save area (LSA) on the stack.
6992 // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
6993 // [SP][CR][LR][2 x reserved][TOC].
6994 // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
6995 const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6996 const bool IsPPC64 = Subtarget.isPPC64();
6997 const EVT PtrVT = getPointerTy(DAG.getDataLayout());
6998 const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6999 CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7000 CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7001
7002 // The prolog code of the callee may store up to 8 GPR argument registers to
7003 // the stack, allowing va_start to index over them in memory if the callee
7004 // is variadic.
7005 // Because we cannot tell if this is needed on the caller side, we have to
7006 // conservatively assume that it is needed. As such, make sure we have at
7007 // least enough stack space for the caller to store the 8 GPRs.
7008 const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7009 const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7010 CCInfo.getNextStackOffset());
7011
7012 // Adjust the stack pointer for the new arguments...
7013 // These operations are automatically eliminated by the prolog/epilog pass.
7014 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7015 SDValue CallSeqStart = Chain;
7016
7017 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7018 SmallVector<SDValue, 8> MemOpChains;
7019
7020 // Set up a copy of the stack pointer for loading and storing any
7021 // arguments that may not fit in the registers available for argument
7022 // passing.
7023 const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7024 : DAG.getRegister(PPC::R1, MVT::i32);
7025
7026 for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7027 const unsigned ValNo = ArgLocs[I].getValNo();
7028 SDValue Arg = OutVals[ValNo];
7029 ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7030
7031 if (Flags.isByVal()) {
7032 const unsigned ByValSize = Flags.getByValSize();
7033
7034 // Nothing to do for zero-sized ByVals on the caller side.
7035 if (!ByValSize) {
7036 ++I;
7037 continue;
7038 }
7039
7040 auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7041 return DAG.getExtLoad(
7042 ISD::ZEXTLOAD, dl, PtrVT, Chain,
7043 (LoadOffset != 0)
7044 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7045 : Arg,
7046 MachinePointerInfo(), VT);
7047 };
7048
7049 unsigned LoadOffset = 0;
7050
7051 // Initialize registers, which are fully occupied by the by-val argument.
7052 while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7053 SDValue Load = GetLoad(PtrVT, LoadOffset);
7054 MemOpChains.push_back(Load.getValue(1));
7055 LoadOffset += PtrByteSize;
7056 const CCValAssign &ByValVA = ArgLocs[I++];
7057 assert(ByValVA.getValNo() == ValNo &&((ByValVA.getValNo() == ValNo && "Unexpected location for pass-by-value argument."
) ? static_cast<void> (0) : __assert_fail ("ByValVA.getValNo() == ValNo && \"Unexpected location for pass-by-value argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7058, __PRETTY_FUNCTION__))
7058 "Unexpected location for pass-by-value argument.")((ByValVA.getValNo() == ValNo && "Unexpected location for pass-by-value argument."
) ? static_cast<void> (0) : __assert_fail ("ByValVA.getValNo() == ValNo && \"Unexpected location for pass-by-value argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7058, __PRETTY_FUNCTION__))
;
7059 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7060 }
7061
7062 if (LoadOffset == ByValSize)
7063 continue;
7064
7065 // There must be one more loc to handle the remainder.
7066 assert(ArgLocs[I].getValNo() == ValNo &&((ArgLocs[I].getValNo() == ValNo && "Expected additional location for by-value argument."
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[I].getValNo() == ValNo && \"Expected additional location for by-value argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7067, __PRETTY_FUNCTION__))
7067 "Expected additional location for by-value argument.")((ArgLocs[I].getValNo() == ValNo && "Expected additional location for by-value argument."
) ? static_cast<void> (0) : __assert_fail ("ArgLocs[I].getValNo() == ValNo && \"Expected additional location for by-value argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7067, __PRETTY_FUNCTION__))
;
7068
7069 if (ArgLocs[I].isMemLoc()) {
7070 assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.")((LoadOffset < ByValSize && "Unexpected memloc for by-val arg."
) ? static_cast<void> (0) : __assert_fail ("LoadOffset < ByValSize && \"Unexpected memloc for by-val arg.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7070, __PRETTY_FUNCTION__))
;
7071 const CCValAssign &ByValVA = ArgLocs[I++];
7072 ISD::ArgFlagsTy MemcpyFlags = Flags;
7073 // Only memcpy the bytes that don't pass in register.
7074 MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7075 Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7076 (LoadOffset != 0)
7077 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7078 : Arg,
7079 DAG.getObjectPtrOffset(dl, StackPtr,
7080 TypeSize::Fixed(ByValVA.getLocMemOffset())),
7081 CallSeqStart, MemcpyFlags, DAG, dl);
7082 continue;
7083 }
7084
7085 // Initialize the final register residue.
7086 // Any residue that occupies the final by-val arg register must be
7087 // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7088 // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7089 // 2 and 1 byte loads.
7090 const unsigned ResidueBytes = ByValSize % PtrByteSize;
7091 assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&((ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize
&& "Unexpected register residue for by-value argument."
) ? static_cast<void> (0) : __assert_fail ("ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize && \"Unexpected register residue for by-value argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7092, __PRETTY_FUNCTION__))
7092 "Unexpected register residue for by-value argument.")((ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize
&& "Unexpected register residue for by-value argument."
) ? static_cast<void> (0) : __assert_fail ("ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize && \"Unexpected register residue for by-value argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7092, __PRETTY_FUNCTION__))
;
7093 SDValue ResidueVal;
7094 for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7095 const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7096 const MVT VT =
7097 N == 1 ? MVT::i8
7098 : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7099 SDValue Load = GetLoad(VT, LoadOffset);
7100 MemOpChains.push_back(Load.getValue(1));
7101 LoadOffset += N;
7102 Bytes += N;
7103
7104 // By-val arguments are passed left-justfied in register.
7105 // Every load here needs to be shifted, otherwise a full register load
7106 // should have been used.
7107 assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&((PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
"Unexpected load emitted during handling of pass-by-value " "argument."
) ? static_cast<void> (0) : __assert_fail ("PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) && \"Unexpected load emitted during handling of pass-by-value \" \"argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7109, __PRETTY_FUNCTION__))
7108 "Unexpected load emitted during handling of pass-by-value "((PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
"Unexpected load emitted during handling of pass-by-value " "argument."
) ? static_cast<void> (0) : __assert_fail ("PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) && \"Unexpected load emitted during handling of pass-by-value \" \"argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7109, __PRETTY_FUNCTION__))
7109 "argument.")((PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
"Unexpected load emitted during handling of pass-by-value " "argument."
) ? static_cast<void> (0) : __assert_fail ("PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) && \"Unexpected load emitted during handling of pass-by-value \" \"argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7109, __PRETTY_FUNCTION__))
;
7110 unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7111 EVT ShiftAmountTy =
7112 getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7113 SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7114 SDValue ShiftedLoad =
7115 DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7116 ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7117 ShiftedLoad)
7118 : ShiftedLoad;
7119 }
7120
7121 const CCValAssign &ByValVA = ArgLocs[I++];
7122 RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7123 continue;
7124 }
7125
7126 CCValAssign &VA = ArgLocs[I++];
7127 const MVT LocVT = VA.getLocVT();
7128 const MVT ValVT = VA.getValVT();
7129
7130 switch (VA.getLocInfo()) {
7131 default:
7132 report_fatal_error("Unexpected argument extension type.");
7133 case CCValAssign::Full:
7134 break;
7135 case CCValAssign::ZExt:
7136 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7137 break;
7138 case CCValAssign::SExt:
7139 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7140 break;
7141 }
7142
7143 if (VA.isRegLoc() && !VA.needsCustom()) {
7144 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7145 continue;
7146 }
7147
7148 // Vector arguments passed to VarArg functions need custom handling when
7149 // they are passed (at least partially) in GPRs.
7150 if (VA.isMemLoc() && VA.needsCustom() && ValVT.isVector()) {
7151 assert(CFlags.IsVarArg && "Custom MemLocs only used for Vector args.")((CFlags.IsVarArg && "Custom MemLocs only used for Vector args."
) ? static_cast<void> (0) : __assert_fail ("CFlags.IsVarArg && \"Custom MemLocs only used for Vector args.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7151, __PRETTY_FUNCTION__))
;
7152 // Store value to its stack slot.
7153 SDValue PtrOff =
7154 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7155 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7156 SDValue Store =
7157 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
7158 MemOpChains.push_back(Store);
7159 const unsigned OriginalValNo = VA.getValNo();
7160 // Then load the GPRs from the stack
7161 unsigned LoadOffset = 0;
7162 auto HandleCustomVecRegLoc = [&]() {
7163 assert(I != E && "Unexpected end of CCvalAssigns.")((I != E && "Unexpected end of CCvalAssigns.") ? static_cast
<void> (0) : __assert_fail ("I != E && \"Unexpected end of CCvalAssigns.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7163, __PRETTY_FUNCTION__))
;
7164 assert(ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&((ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
"Expected custom RegLoc.") ? static_cast<void> (0) : __assert_fail
("ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && \"Expected custom RegLoc.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7165, __PRETTY_FUNCTION__))
7165 "Expected custom RegLoc.")((ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
"Expected custom RegLoc.") ? static_cast<void> (0) : __assert_fail
("ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() && \"Expected custom RegLoc.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7165, __PRETTY_FUNCTION__))
;
7166 CCValAssign RegVA = ArgLocs[I++];
7167 assert(RegVA.getValNo() == OriginalValNo &&((RegVA.getValNo() == OriginalValNo && "Custom MemLoc ValNo and custom RegLoc ValNo must match."
) ? static_cast<void> (0) : __assert_fail ("RegVA.getValNo() == OriginalValNo && \"Custom MemLoc ValNo and custom RegLoc ValNo must match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7168, __PRETTY_FUNCTION__))
7168 "Custom MemLoc ValNo and custom RegLoc ValNo must match.")((RegVA.getValNo() == OriginalValNo && "Custom MemLoc ValNo and custom RegLoc ValNo must match."
) ? static_cast<void> (0) : __assert_fail ("RegVA.getValNo() == OriginalValNo && \"Custom MemLoc ValNo and custom RegLoc ValNo must match.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7168, __PRETTY_FUNCTION__))
;
7169 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
7170 DAG.getConstant(LoadOffset, dl, PtrVT));
7171 SDValue Load = DAG.getLoad(PtrVT, dl, Store, Add, MachinePointerInfo());
7172 MemOpChains.push_back(Load.getValue(1));
7173 RegsToPass.push_back(std::make_pair(RegVA.getLocReg(), Load));
7174 LoadOffset += PtrByteSize;
7175 };
7176
7177 // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
7178 // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
7179 // R10.
7180 HandleCustomVecRegLoc();
7181 HandleCustomVecRegLoc();
7182
7183 if (I != E && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
7184 ArgLocs[I].getValNo() == OriginalValNo) {
7185 assert(!IsPPC64 &&((!IsPPC64 && "Only 2 custom RegLocs expected for 64-bit codegen."
) ? static_cast<void> (0) : __assert_fail ("!IsPPC64 && \"Only 2 custom RegLocs expected for 64-bit codegen.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7186, __PRETTY_FUNCTION__))
7186 "Only 2 custom RegLocs expected for 64-bit codegen.")((!IsPPC64 && "Only 2 custom RegLocs expected for 64-bit codegen."
) ? static_cast<void> (0) : __assert_fail ("!IsPPC64 && \"Only 2 custom RegLocs expected for 64-bit codegen.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7186, __PRETTY_FUNCTION__))
;
7187 HandleCustomVecRegLoc();
7188 HandleCustomVecRegLoc();
7189 }
7190
7191 continue;
7192 }
7193
7194 if (VA.isMemLoc()) {
7195 SDValue PtrOff =
7196 DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7197 PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7198 MemOpChains.push_back(
7199 DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7200
7201 continue;
7202 }
7203
7204 if (!ValVT.isFloatingPoint())
7205 report_fatal_error(
7206 "Unexpected register handling for calling convention.");
7207
7208 // Custom handling is used for GPR initializations for vararg float
7209 // arguments.
7210 assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&((VA.isRegLoc() && VA.needsCustom() && CFlags
.IsVarArg && LocVT.isInteger() && "Custom register handling only expected for VarArg."
) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg && LocVT.isInteger() && \"Custom register handling only expected for VarArg.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7212, __PRETTY_FUNCTION__))
7211 LocVT.isInteger() &&((VA.isRegLoc() && VA.needsCustom() && CFlags
.IsVarArg && LocVT.isInteger() && "Custom register handling only expected for VarArg."
) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg && LocVT.isInteger() && \"Custom register handling only expected for VarArg.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7212, __PRETTY_FUNCTION__))
7212 "Custom register handling only expected for VarArg.")((VA.isRegLoc() && VA.needsCustom() && CFlags
.IsVarArg && LocVT.isInteger() && "Custom register handling only expected for VarArg."
) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg && LocVT.isInteger() && \"Custom register handling only expected for VarArg.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7212, __PRETTY_FUNCTION__))
;
7213
7214 SDValue ArgAsInt =
7215 DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7216
7217 if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7218 // f32 in 32-bit GPR
7219 // f64 in 64-bit GPR
7220 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7221 else if (Arg.getValueType().getFixedSizeInBits() <
7222 LocVT.getFixedSizeInBits())
7223 // f32 in 64-bit GPR.
7224 RegsToPass.push_back(std::make_pair(
7225 VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7226 else {
7227 // f64 in two 32-bit GPRs
7228 // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7229 assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&((Arg.getValueType() == MVT::f64 && CFlags.IsVarArg &&
!IsPPC64 && "Unexpected custom register for argument!"
) ? static_cast<void> (0) : __assert_fail ("Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 && \"Unexpected custom register for argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7230, __PRETTY_FUNCTION__))
7230 "Unexpected custom register for argument!")((Arg.getValueType() == MVT::f64 && CFlags.IsVarArg &&
!IsPPC64 && "Unexpected custom register for argument!"
) ? static_cast<void> (0) : __assert_fail ("Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 && \"Unexpected custom register for argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7230, __PRETTY_FUNCTION__))
;
7231 CCValAssign &GPR1 = VA;
7232 SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7233 DAG.getConstant(32, dl, MVT::i8));
7234 RegsToPass.push_back(std::make_pair(
7235 GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7236
7237 if (I != E) {
7238 // If only 1 GPR was available, there will only be one custom GPR and
7239 // the argument will also pass in memory.
7240 CCValAssign &PeekArg = ArgLocs[I];
7241 if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7242 assert(PeekArg.needsCustom() && "A second custom GPR is expected.")((PeekArg.needsCustom() && "A second custom GPR is expected."
) ? static_cast<void> (0) : __assert_fail ("PeekArg.needsCustom() && \"A second custom GPR is expected.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7242, __PRETTY_FUNCTION__))
;
7243 CCValAssign &GPR2 = ArgLocs[I++];
7244 RegsToPass.push_back(std::make_pair(
7245 GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7246 }
7247 }
7248 }
7249 }
7250
7251 if (!MemOpChains.empty())
7252 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7253
7254 // For indirect calls, we need to save the TOC base to the stack for
7255 // restoration after the call.
7256 if (CFlags.IsIndirect) {
7257 assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.")((!CFlags.IsTailCall && "Indirect tail-calls not supported."
) ? static_cast<void> (0) : __assert_fail ("!CFlags.IsTailCall && \"Indirect tail-calls not supported.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7257, __PRETTY_FUNCTION__))
;
7258 const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7259 const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7260 const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7261 const unsigned TOCSaveOffset =
7262 Subtarget.getFrameLowering()->getTOCSaveOffset();
7263
7264 setUsesTOCBasePtr(DAG);
7265 SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7266 SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7267 SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7268 SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7269 Chain = DAG.getStore(
7270 Val.getValue(1), dl, Val, AddPtr,
7271 MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7272 }
7273
7274 // Build a sequence of copy-to-reg nodes chained together with token chain
7275 // and flag operands which copy the outgoing args into the appropriate regs.
7276 SDValue InFlag;
7277 for (auto Reg : RegsToPass) {
7278 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7279 InFlag = Chain.getValue(1);
7280 }
7281
7282 const int SPDiff = 0;
7283 return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7284 Callee, SPDiff, NumBytes, Ins, InVals, CB);
7285}
7286
7287bool
7288PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7289 MachineFunction &MF, bool isVarArg,
7290 const SmallVectorImpl<ISD::OutputArg> &Outs,
7291 LLVMContext &Context) const {
7292 SmallVector<CCValAssign, 16> RVLocs;
7293 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7294 return CCInfo.CheckReturn(
7295 Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7296 ? RetCC_PPC_Cold
7297 : RetCC_PPC);
7298}
7299
7300SDValue
7301PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7302 bool isVarArg,
7303 const SmallVectorImpl<ISD::OutputArg> &Outs,
7304 const SmallVectorImpl<SDValue> &OutVals,
7305 const SDLoc &dl, SelectionDAG &DAG) const {
7306 SmallVector<CCValAssign, 16> RVLocs;
7307 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7308 *DAG.getContext());
7309 CCInfo.AnalyzeReturn(Outs,
7310 (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7311 ? RetCC_PPC_Cold
7312 : RetCC_PPC);
7313
7314 SDValue Flag;
7315 SmallVector<SDValue, 4> RetOps(1, Chain);
7316
7317 // Copy the result values into the output registers.
7318 for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7319 CCValAssign &VA = RVLocs[i];
7320 assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7320, __PRETTY_FUNCTION__))
;
7321
7322 SDValue Arg = OutVals[RealResIdx];
7323
7324 switch (VA.getLocInfo()) {
7325 default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7325)
;
7326 case CCValAssign::Full: break;
7327 case CCValAssign::AExt:
7328 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7329 break;
7330 case CCValAssign::ZExt:
7331 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7332 break;
7333 case CCValAssign::SExt:
7334 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7335 break;
7336 }
7337 if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7338 bool isLittleEndian = Subtarget.isLittleEndian();
7339 // Legalize ret f64 -> ret 2 x i32.
7340 SDValue SVal =
7341 DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7342 DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7343 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7344 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7345 SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7346 DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7347 Flag = Chain.getValue(1);
7348 VA = RVLocs[++i]; // skip ahead to next loc
7349 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7350 } else
7351 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7352 Flag = Chain.getValue(1);
7353 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7354 }
7355
7356 RetOps[0] = Chain; // Update chain.
7357
7358 // Add the flag if we have it.
7359 if (Flag.getNode())
7360 RetOps.push_back(Flag);
7361
7362 return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7363}
7364
7365SDValue
7366PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7367 SelectionDAG &DAG) const {
7368 SDLoc dl(Op);
7369
7370 // Get the correct type for integers.
7371 EVT IntVT = Op.getValueType();
7372
7373 // Get the inputs.
7374 SDValue Chain = Op.getOperand(0);
7375 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7376 // Build a DYNAREAOFFSET node.
7377 SDValue Ops[2] = {Chain, FPSIdx};
7378 SDVTList VTs = DAG.getVTList(IntVT);
7379 return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7380}
7381
7382SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7383 SelectionDAG &DAG) const {
7384 // When we pop the dynamic allocation we need to restore the SP link.
7385 SDLoc dl(Op);
7386
7387 // Get the correct type for pointers.
7388 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7389
7390 // Construct the stack pointer operand.
7391 bool isPPC64 = Subtarget.isPPC64();
7392 unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7393 SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7394
7395 // Get the operands for the STACKRESTORE.
7396 SDValue Chain = Op.getOperand(0);
7397 SDValue SaveSP = Op.getOperand(1);
7398
7399 // Load the old link SP.
7400 SDValue LoadLinkSP =
7401 DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7402
7403 // Restore the stack pointer.
7404 Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7405
7406 // Store the old link SP.
7407 return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7408}
7409
7410SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7411 MachineFunction &MF = DAG.getMachineFunction();
7412 bool isPPC64 = Subtarget.isPPC64();
7413 EVT PtrVT = getPointerTy(MF.getDataLayout());
7414
7415 // Get current frame pointer save index. The users of this index will be
7416 // primarily DYNALLOC instructions.
7417 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7418 int RASI = FI->getReturnAddrSaveIndex();
7419
7420 // If the frame pointer save index hasn't been defined yet.
7421 if (!RASI) {
7422 // Find out what the fix offset of the frame pointer save area.
7423 int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7424 // Allocate the frame index for frame pointer save area.
7425 RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7426 // Save the result.
7427 FI->setReturnAddrSaveIndex(RASI);
7428 }
7429 return DAG.getFrameIndex(RASI, PtrVT);
7430}
7431
7432SDValue
7433PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7434 MachineFunction &MF = DAG.getMachineFunction();
7435 bool isPPC64 = Subtarget.isPPC64();
7436 EVT PtrVT = getPointerTy(MF.getDataLayout());
7437
7438 // Get current frame pointer save index. The users of this index will be
7439 // primarily DYNALLOC instructions.
7440 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7441 int FPSI = FI->getFramePointerSaveIndex();
7442
7443 // If the frame pointer save index hasn't been defined yet.
7444 if (!FPSI) {
7445 // Find out what the fix offset of the frame pointer save area.
7446 int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7447 // Allocate the frame index for frame pointer save area.
7448 FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7449 // Save the result.
7450 FI->setFramePointerSaveIndex(FPSI);
7451 }
7452 return DAG.getFrameIndex(FPSI, PtrVT);
7453}
7454
7455SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7456 SelectionDAG &DAG) const {
7457 MachineFunction &MF = DAG.getMachineFunction();
7458 // Get the inputs.
7459 SDValue Chain = Op.getOperand(0);
7460 SDValue Size = Op.getOperand(1);
7461 SDLoc dl(Op);
7462
7463 // Get the correct type for pointers.
7464 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7465 // Negate the size.
7466 SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7467 DAG.getConstant(0, dl, PtrVT), Size);
7468 // Construct a node for the frame pointer save index.
7469 SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7470 SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7471 SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7472 if (hasInlineStackProbe(MF))
7473 return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7474 return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7475}
7476
7477SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7478 SelectionDAG &DAG) const {
7479 MachineFunction &MF = DAG.getMachineFunction();
7480
7481 bool isPPC64 = Subtarget.isPPC64();
7482 EVT PtrVT = getPointerTy(DAG.getDataLayout());
7483
7484 int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7485 return DAG.getFrameIndex(FI, PtrVT);
7486}
7487
7488SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7489 SelectionDAG &DAG) const {
7490 SDLoc DL(Op);
7491 return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7492 DAG.getVTList(MVT::i32, MVT::Other),
7493 Op.getOperand(0), Op.getOperand(1));
7494}
7495
7496SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7497 SelectionDAG &DAG) const {
7498 SDLoc DL(Op);
7499 return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7500 Op.getOperand(0), Op.getOperand(1));
7501}
7502
7503SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7504 if (Op.getValueType().isVector())
7505 return LowerVectorLoad(Op, DAG);
7506
7507 assert(Op.getValueType() == MVT::i1 &&((Op.getValueType() == MVT::i1 && "Custom lowering only for i1 loads"
) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::i1 && \"Custom lowering only for i1 loads\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7508, __PRETTY_FUNCTION__))
7508 "Custom lowering only for i1 loads")((Op.getValueType() == MVT::i1 && "Custom lowering only for i1 loads"
) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::i1 && \"Custom lowering only for i1 loads\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7508, __PRETTY_FUNCTION__))
;
7509
7510 // First, load 8 bits into 32 bits, then truncate to 1 bit.
7511
7512 SDLoc dl(Op);
7513 LoadSDNode *LD = cast<LoadSDNode>(Op);
7514
7515 SDValue Chain = LD->getChain();
7516 SDValue BasePtr = LD->getBasePtr();
7517 MachineMemOperand *MMO = LD->getMemOperand();
7518
7519 SDValue NewLD =
7520 DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7521 BasePtr, MVT::i8, MMO);
7522 SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7523
7524 SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7525 return DAG.getMergeValues(Ops, dl);
7526}
7527
7528SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7529 if (Op.getOperand(1).getValueType().isVector())
7530 return LowerVectorStore(Op, DAG);
7531
7532 assert(Op.getOperand(1).getValueType() == MVT::i1 &&((Op.getOperand(1).getValueType() == MVT::i1 && "Custom lowering only for i1 stores"
) ? static_cast<void> (0) : __assert_fail ("Op.getOperand(1).getValueType() == MVT::i1 && \"Custom lowering only for i1 stores\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7533, __PRETTY_FUNCTION__))
7533 "Custom lowering only for i1 stores")((Op.getOperand(1).getValueType() == MVT::i1 && "Custom lowering only for i1 stores"
) ? static_cast<void> (0) : __assert_fail ("Op.getOperand(1).getValueType() == MVT::i1 && \"Custom lowering only for i1 stores\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7533, __PRETTY_FUNCTION__))
;
7534
7535 // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7536
7537 SDLoc dl(Op);
7538 StoreSDNode *ST = cast<StoreSDNode>(Op);
7539
7540 SDValue Chain = ST->getChain();
7541 SDValue BasePtr = ST->getBasePtr();
7542 SDValue Value = ST->getValue();
7543 MachineMemOperand *MMO = ST->getMemOperand();
7544
7545 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7546 Value);
7547 return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7548}
7549
7550// FIXME: Remove this once the ANDI glue bug is fixed:
7551SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7552 assert(Op.getValueType() == MVT::i1 &&((Op.getValueType() == MVT::i1 && "Custom lowering only for i1 results"
) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::i1 && \"Custom lowering only for i1 results\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7553, __PRETTY_FUNCTION__))
7553 "Custom lowering only for i1 results")((Op.getValueType() == MVT::i1 && "Custom lowering only for i1 results"
) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::i1 && \"Custom lowering only for i1 results\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7553, __PRETTY_FUNCTION__))
;
7554
7555 SDLoc DL(Op);
7556 return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7557}
7558
7559SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7560 SelectionDAG &DAG) const {
7561
7562 // Implements a vector truncate that fits in a vector register as a shuffle.
7563 // We want to legalize vector truncates down to where the source fits in
7564 // a vector register (and target is therefore smaller than vector register
7565 // size). At that point legalization will try to custom lower the sub-legal
7566 // result and get here - where we can contain the truncate as a single target
7567 // operation.
7568
7569 // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7570 // <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7571 //
7572 // We will implement it for big-endian ordering as this (where x denotes
7573 // undefined):
7574 // < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7575 // < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7576 //
7577 // The same operation in little-endian ordering will be:
7578 // <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7579 // <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7580
7581 EVT TrgVT = Op.getValueType();
7582 assert(TrgVT.isVector() && "Vector type expected.")((TrgVT.isVector() && "Vector type expected.") ? static_cast
<void> (0) : __assert_fail ("TrgVT.isVector() && \"Vector type expected.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7582, __PRETTY_FUNCTION__))
;
7583 unsigned TrgNumElts = TrgVT.getVectorNumElements();
7584 EVT EltVT = TrgVT.getVectorElementType();
7585 if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
7586 TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
7587 !isPowerOf2_32(EltVT.getSizeInBits()))
7588 return SDValue();
7589
7590 SDValue N1 = Op.getOperand(0);
7591 EVT SrcVT = N1.getValueType();
7592 unsigned SrcSize = SrcVT.getSizeInBits();
7593 if (SrcSize > 256 ||
7594 !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
7595 !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
7596 return SDValue();
7597 if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
7598 return SDValue();
7599
7600 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7601 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7602
7603 SDLoc DL(Op);
7604 SDValue Op1, Op2;
7605 if (SrcSize == 256) {
7606 EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
7607 EVT SplitVT =
7608 N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
7609 unsigned SplitNumElts = SplitVT.getVectorNumElements();
7610 Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7611 DAG.getConstant(0, DL, VecIdxTy));
7612 Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7613 DAG.getConstant(SplitNumElts, DL, VecIdxTy));
7614 }
7615 else {
7616 Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7617 Op2 = DAG.getUNDEF(WideVT);
7618 }
7619
7620 // First list the elements we want to keep.
7621 unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7622 SmallVector<int, 16> ShuffV;
7623 if (Subtarget.isLittleEndian())
7624 for (unsigned i = 0; i < TrgNumElts; ++i)
7625 ShuffV.push_back(i * SizeMult);
7626 else
7627 for (unsigned i = 1; i <= TrgNumElts; ++i)
7628 ShuffV.push_back(i * SizeMult - 1);
7629
7630 // Populate the remaining elements with undefs.
7631 for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7632 // ShuffV.push_back(i + WideNumElts);
7633 ShuffV.push_back(WideNumElts + 1);
7634
7635 Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
7636 Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
7637 return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
7638}
7639
7640/// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7641/// possible.
7642SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7643 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7644 EVT ResVT = Op.getValueType();
7645 EVT CmpVT = Op.getOperand(0).getValueType();
7646 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7647 SDValue TV = Op.getOperand(2), FV = Op.getOperand(3);
7648 SDLoc dl(Op);
7649
7650 // Without power9-vector, we don't have native instruction for f128 comparison.
7651 // Following transformation to libcall is needed for setcc:
7652 // select_cc lhs, rhs, tv, fv, cc -> select_cc (setcc cc, x, y), 0, tv, fv, NE
7653 if (!Subtarget.hasP9Vector() && CmpVT == MVT::f128) {
7654 SDValue Z = DAG.getSetCC(
7655 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT),
7656 LHS, RHS, CC);
7657 SDValue Zero = DAG.getConstant(0, dl, Z.getValueType());
7658 return DAG.getSelectCC(dl, Z, Zero, TV, FV, ISD::SETNE);
7659 }
7660
7661 // Not FP, or using SPE? Not a fsel.
7662 if (!CmpVT.isFloatingPoint() || !TV.getValueType().isFloatingPoint() ||
7663 Subtarget.hasSPE())
7664 return Op;
7665
7666 SDNodeFlags Flags = Op.getNode()->getFlags();
7667
7668 // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7669 // presence of infinities.
7670 if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7671 switch (CC) {
7672 default:
7673 break;
7674 case ISD::SETOGT:
7675 case ISD::SETGT:
7676 return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7677 case ISD::SETOLT:
7678 case ISD::SETLT:
7679 return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7680 }
7681 }
7682
7683 // We might be able to do better than this under some circumstances, but in
7684 // general, fsel-based lowering of select is a finite-math-only optimization.
7685 // For more information, see section F.3 of the 2.06 ISA specification.
7686 // With ISA 3.0
7687 if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
7688 (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
7689 return Op;
7690
7691 // If the RHS of the comparison is a 0.0, we don't need to do the
7692 // subtraction at all.
7693 SDValue Sel1;
7694 if (isFloatingPointZero(RHS))
7695 switch (CC) {
7696 default: break; // SETUO etc aren't handled by fsel.
7697 case ISD::SETNE:
7698 std::swap(TV, FV);
7699 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7700 case ISD::SETEQ:
7701 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7702 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7703 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7704 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
7705 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7706 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7707 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7708 case ISD::SETULT:
7709 case ISD::SETLT:
7710 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
7711 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7712 case ISD::SETOGE:
7713 case ISD::SETGE:
7714 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7715 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7716 return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7717 case ISD::SETUGT:
7718 case ISD::SETGT:
7719 std::swap(TV, FV); // fsel is natively setge, swap operands for setlt
7720 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7721 case ISD::SETOLE:
7722 case ISD::SETLE:
7723 if (LHS.getValueType() == MVT::f32) // Comparison is always 64-bits
7724 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7725 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7726 DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7727 }
7728
7729 SDValue Cmp;
7730 switch (CC) {
7731 default: break; // SETUO etc aren't handled by fsel.
7732 case ISD::SETNE:
7733 std::swap(TV, FV);
7734 LLVM_FALLTHROUGH[[gnu::fallthrough]];
7735 case ISD::SETEQ:
7736 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7737 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7738 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7739 Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7740 if (Sel1.getValueType() == MVT::f32) // Comparison is always 64-bits
7741 Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7742 return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7743 DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7744 case ISD::SETULT:
7745 case ISD::SETLT:
7746 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7747 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7748 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7749 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7750 case ISD::SETOGE:
7751 case ISD::SETGE:
7752 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7753 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7754 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7755 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7756 case ISD::SETUGT:
7757 case ISD::SETGT:
7758 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7759 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7760 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7761 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7762 case ISD::SETOLE:
7763 case ISD::SETLE:
7764 Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7765 if (Cmp.getValueType() == MVT::f32) // Comparison is always 64-bits
7766 Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7767 return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7768 }
7769 return Op;
7770}
7771
7772static unsigned getPPCStrictOpcode(unsigned Opc) {
7773 switch (Opc) {
7774 default:
7775 llvm_unreachable("No strict version of this opcode!")::llvm::llvm_unreachable_internal("No strict version of this opcode!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7775)
;
7776 case PPCISD::FCTIDZ:
7777 return PPCISD::STRICT_FCTIDZ;
7778 case PPCISD::FCTIWZ:
7779 return PPCISD::STRICT_FCTIWZ;
7780 case PPCISD::FCTIDUZ:
7781 return PPCISD::STRICT_FCTIDUZ;
7782 case PPCISD::FCTIWUZ:
7783 return PPCISD::STRICT_FCTIWUZ;
7784 case PPCISD::FCFID:
7785 return PPCISD::STRICT_FCFID;
7786 case PPCISD::FCFIDU:
7787 return PPCISD::STRICT_FCFIDU;
7788 case PPCISD::FCFIDS:
7789 return PPCISD::STRICT_FCFIDS;
7790 case PPCISD::FCFIDUS:
7791 return PPCISD::STRICT_FCFIDUS;
7792 }
7793}
7794
7795static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
7796 const PPCSubtarget &Subtarget) {
7797 SDLoc dl(Op);
7798 bool IsStrict = Op->isStrictFPOpcode();
7799 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7800 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7801
7802 // TODO: Any other flags to propagate?
7803 SDNodeFlags Flags;
7804 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7805
7806 // For strict nodes, source is the second operand.
7807 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7808 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
7809 assert(Src.getValueType().isFloatingPoint())((Src.getValueType().isFloatingPoint()) ? static_cast<void
> (0) : __assert_fail ("Src.getValueType().isFloatingPoint()"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7809, __PRETTY_FUNCTION__))
;
7810 if (Src.getValueType() == MVT::f32) {
7811 if (IsStrict) {
7812 Src =
7813 DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
7814 DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
7815 Chain = Src.getValue(1);
7816 } else
7817 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7818 }
7819 SDValue Conv;
7820 unsigned Opc = ISD::DELETED_NODE;
7821 switch (Op.getSimpleValueType().SimpleTy) {
7822 default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!")::llvm::llvm_unreachable_internal("Unhandled FP_TO_INT type in custom expander!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7822)
;
7823 case MVT::i32:
7824 Opc = IsSigned ? PPCISD::FCTIWZ
7825 : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
7826 break;
7827 case MVT::i64:
7828 assert((IsSigned || Subtarget.hasFPCVT()) &&(((IsSigned || Subtarget.hasFPCVT()) && "i64 FP_TO_UINT is supported only with FPCVT"
) ? static_cast<void> (0) : __assert_fail ("(IsSigned || Subtarget.hasFPCVT()) && \"i64 FP_TO_UINT is supported only with FPCVT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7829, __PRETTY_FUNCTION__))
7829 "i64 FP_TO_UINT is supported only with FPCVT")(((IsSigned || Subtarget.hasFPCVT()) && "i64 FP_TO_UINT is supported only with FPCVT"
) ? static_cast<void> (0) : __assert_fail ("(IsSigned || Subtarget.hasFPCVT()) && \"i64 FP_TO_UINT is supported only with FPCVT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 7829, __PRETTY_FUNCTION__))
;
7830 Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
7831 }
7832 if (IsStrict) {
7833 Opc = getPPCStrictOpcode(Opc);
7834 Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other),
7835 {Chain, Src}, Flags);
7836 } else {
7837 Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
7838 }
7839 return Conv;
7840}
7841
7842void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7843 SelectionDAG &DAG,
7844 const SDLoc &dl) const {
7845 SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
7846 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7847 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7848 bool IsStrict = Op->isStrictFPOpcode();
7849
7850 // Convert the FP value to an int value through memory.
7851 bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7852 (IsSigned || Subtarget.hasFPCVT());
7853 SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
7854 int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
7855 MachinePointerInfo MPI =
7856 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
7857
7858 // Emit a store to the stack slot.
7859 SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
7860 Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
7861 if (i32Stack) {
7862 MachineFunction &MF = DAG.getMachineFunction();
7863 Alignment = Align(4);
7864 MachineMemOperand *MMO =
7865 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
7866 SDValue Ops[] = { Chain, Tmp, FIPtr };
7867 Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
7868 DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
7869 } else
7870 Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
7871
7872 // Result is a load from the stack slot. If loading 4 bytes, make sure to
7873 // add in a bias on big endian.
7874 if (Op.getValueType() == MVT::i32 && !i32Stack) {
7875 FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
7876 DAG.getConstant(4, dl, FIPtr.getValueType()));
7877 MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
7878 }
7879
7880 RLI.Chain = Chain;
7881 RLI.Ptr = FIPtr;
7882 RLI.MPI = MPI;
7883 RLI.Alignment = Alignment;
7884}
7885
7886/// Custom lowers floating point to integer conversions to use
7887/// the direct move instructions available in ISA 2.07 to avoid the
7888/// need for load/store combinations.
7889SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
7890 SelectionDAG &DAG,
7891 const SDLoc &dl) const {
7892 SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
7893 SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
7894 if (Op->isStrictFPOpcode())
7895 return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
7896 else
7897 return Mov;
7898}
7899
7900SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
7901 const SDLoc &dl) const {
7902 bool IsStrict = Op->isStrictFPOpcode();
7903 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7904 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7905 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7906 EVT SrcVT = Src.getValueType();
7907 EVT DstVT = Op.getValueType();
7908
7909 // FP to INT conversions are legal for f128.
7910 if (SrcVT == MVT::f128)
7911 return Subtarget.hasP9Vector() ? Op : SDValue();
7912
7913 // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
7914 // PPC (the libcall is not available).
7915 if (SrcVT == MVT::ppcf128) {
7916 if (DstVT == MVT::i32) {
7917 // TODO: Conservatively pass only nofpexcept flag here. Need to check and
7918 // set other fast-math flags to FP operations in both strict and
7919 // non-strict cases. (FP_TO_SINT, FSUB)
7920 SDNodeFlags Flags;
7921 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7922
7923 if (IsSigned) {
7924 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
7925 DAG.getIntPtrConstant(0, dl));
7926 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
7927 DAG.getIntPtrConstant(1, dl));
7928
7929 // Add the two halves of the long double in round-to-zero mode, and use
7930 // a smaller FP_TO_SINT.
7931 if (IsStrict) {
7932 SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl,
7933 DAG.getVTList(MVT::f64, MVT::Other),
7934 {Op.getOperand(0), Lo, Hi}, Flags);
7935 return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
7936 DAG.getVTList(MVT::i32, MVT::Other),
7937 {Res.getValue(1), Res}, Flags);
7938 } else {
7939 SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
7940 return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
7941 }
7942 } else {
7943 const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
7944 APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
7945 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
7946 SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
7947 if (IsStrict) {
7948 // Sel = Src < 0x80000000
7949 // FltOfs = select Sel, 0.0, 0x80000000
7950 // IntOfs = select Sel, 0, 0x80000000
7951 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
7952 SDValue Chain = Op.getOperand(0);
7953 EVT SetCCVT =
7954 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
7955 EVT DstSetCCVT =
7956 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
7957 SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
7958 Chain, true);
7959 Chain = Sel.getValue(1);
7960
7961 SDValue FltOfs = DAG.getSelect(
7962 dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
7963 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
7964
7965 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
7966 DAG.getVTList(SrcVT, MVT::Other),
7967 {Chain, Src, FltOfs}, Flags);
7968 Chain = Val.getValue(1);
7969 SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
7970 DAG.getVTList(DstVT, MVT::Other),
7971 {Chain, Val}, Flags);
7972 Chain = SInt.getValue(1);
7973 SDValue IntOfs = DAG.getSelect(
7974 dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
7975 SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
7976 return DAG.getMergeValues({Result, Chain}, dl);
7977 } else {
7978 // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
7979 // FIXME: generated code sucks.
7980 SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
7981 True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
7982 True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
7983 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
7984 return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
7985 }
7986 }
7987 }
7988
7989 return SDValue();
7990 }
7991
7992 if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
7993 return LowerFP_TO_INTDirectMove(Op, DAG, dl);
7994
7995 ReuseLoadInfo RLI;
7996 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
7997
7998 return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
7999 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8000}
8001
8002// We're trying to insert a regular store, S, and then a load, L. If the
8003// incoming value, O, is a load, we might just be able to have our load use the
8004// address used by O. However, we don't know if anything else will store to
8005// that address before we can load from it. To prevent this situation, we need
8006// to insert our load, L, into the chain as a peer of O. To do this, we give L
8007// the same chain operand as O, we create a token factor from the chain results
8008// of O and L, and we replace all uses of O's chain result with that token
8009// factor (see spliceIntoChain below for this last part).
8010bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8011 ReuseLoadInfo &RLI,
8012 SelectionDAG &DAG,
8013 ISD::LoadExtType ET) const {
8014 // Conservatively skip reusing for constrained FP nodes.
8015 if (Op->isStrictFPOpcode())
8016 return false;
8017
8018 SDLoc dl(Op);
8019 bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8020 (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8021 if (ET == ISD::NON_EXTLOAD &&
8022 (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8023 isOperationLegalOrCustom(Op.getOpcode(),
8024 Op.getOperand(0).getValueType())) {
8025
8026 LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8027 return true;
8028 }
8029
8030 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8031 if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8032 LD->isNonTemporal())
8033 return false;
8034 if (LD->getMemoryVT() != MemVT)
8035 return false;
8036
8037 // If the result of the load is an illegal type, then we can't build a
8038 // valid chain for reuse since the legalised loads and token factor node that
8039 // ties the legalised loads together uses a different output chain then the
8040 // illegal load.
8041 if (!isTypeLegal(LD->getValueType(0)))
8042 return false;
8043
8044 RLI.Ptr = LD->getBasePtr();
8045 if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8046 assert(LD->getAddressingMode() == ISD::PRE_INC &&((LD->getAddressingMode() == ISD::PRE_INC && "Non-pre-inc AM on PPC?"
) ? static_cast<void> (0) : __assert_fail ("LD->getAddressingMode() == ISD::PRE_INC && \"Non-pre-inc AM on PPC?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8047, __PRETTY_FUNCTION__))
8047 "Non-pre-inc AM on PPC?")((LD->getAddressingMode() == ISD::PRE_INC && "Non-pre-inc AM on PPC?"
) ? static_cast<void> (0) : __assert_fail ("LD->getAddressingMode() == ISD::PRE_INC && \"Non-pre-inc AM on PPC?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8047, __PRETTY_FUNCTION__))
;
8048 RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8049 LD->getOffset());
8050 }
8051
8052 RLI.Chain = LD->getChain();
8053 RLI.MPI = LD->getPointerInfo();
8054 RLI.IsDereferenceable = LD->isDereferenceable();
8055 RLI.IsInvariant = LD->isInvariant();
8056 RLI.Alignment = LD->getAlign();
8057 RLI.AAInfo = LD->getAAInfo();
8058 RLI.Ranges = LD->getRanges();
8059
8060 RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8061 return true;
8062}
8063
8064// Given the head of the old chain, ResChain, insert a token factor containing
8065// it and NewResChain, and make users of ResChain now be users of that token
8066// factor.
8067// TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
8068void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8069 SDValue NewResChain,
8070 SelectionDAG &DAG) const {
8071 if (!ResChain)
8072 return;
8073
8074 SDLoc dl(NewResChain);
8075
8076 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8077 NewResChain, DAG.getUNDEF(MVT::Other));
8078 assert(TF.getNode() != NewResChain.getNode() &&((TF.getNode() != NewResChain.getNode() && "A new TF really is required here"
) ? static_cast<void> (0) : __assert_fail ("TF.getNode() != NewResChain.getNode() && \"A new TF really is required here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8079, __PRETTY_FUNCTION__))
8079 "A new TF really is required here")((TF.getNode() != NewResChain.getNode() && "A new TF really is required here"
) ? static_cast<void> (0) : __assert_fail ("TF.getNode() != NewResChain.getNode() && \"A new TF really is required here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8079, __PRETTY_FUNCTION__))
;
8080
8081 DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8082 DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8083}
8084
8085/// Analyze profitability of direct move
8086/// prefer float load to int load plus direct move
8087/// when there is no integer use of int load
8088bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8089 SDNode *Origin = Op.getOperand(0).getNode();
8090 if (Origin->getOpcode() != ISD::LOAD)
8091 return true;
8092
8093 // If there is no LXSIBZX/LXSIHZX, like Power8,
8094 // prefer direct move if the memory size is 1 or 2 bytes.
8095 MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8096 if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8097 return true;
8098
8099 for (SDNode::use_iterator UI = Origin->use_begin(),
8100 UE = Origin->use_end();
8101 UI != UE; ++UI) {
8102
8103 // Only look at the users of the loaded value.
8104 if (UI.getUse().get().getResNo() != 0)
8105 continue;
8106
8107 if (UI->getOpcode() != ISD::SINT_TO_FP &&
8108 UI->getOpcode() != ISD::UINT_TO_FP &&
8109 UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
8110 UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
8111 return true;
8112 }
8113
8114 return false;
8115}
8116
8117static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
8118 const PPCSubtarget &Subtarget,
8119 SDValue Chain = SDValue()) {
8120 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8121 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8122 SDLoc dl(Op);
8123
8124 // TODO: Any other flags to propagate?
8125 SDNodeFlags Flags;
8126 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8127
8128 // If we have FCFIDS, then use it when converting to single-precision.
8129 // Otherwise, convert to double-precision and then round.
8130 bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8131 unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
8132 : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
8133 EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8134 if (Op->isStrictFPOpcode()) {
8135 if (!Chain)
8136 Chain = Op.getOperand(0);
8137 return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl,
8138 DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
8139 } else
8140 return DAG.getNode(ConvOpc, dl, ConvTy, Src);
8141}
8142
8143/// Custom lowers integer to floating point conversions to use
8144/// the direct move instructions available in ISA 2.07 to avoid the
8145/// need for load/store combinations.
8146SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8147 SelectionDAG &DAG,
8148 const SDLoc &dl) const {
8149 assert((Op.getValueType() == MVT::f32 ||(((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::
f64) && "Invalid floating point type as target of conversion"
) ? static_cast<void> (0) : __assert_fail ("(Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) && \"Invalid floating point type as target of conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8151, __PRETTY_FUNCTION__))
8150 Op.getValueType() == MVT::f64) &&(((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::
f64) && "Invalid floating point type as target of conversion"
) ? static_cast<void> (0) : __assert_fail ("(Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) && \"Invalid floating point type as target of conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8151, __PRETTY_FUNCTION__))
8151 "Invalid floating point type as target of conversion")(((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::
f64) && "Invalid floating point type as target of conversion"
) ? static_cast<void> (0) : __assert_fail ("(Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) && \"Invalid floating point type as target of conversion\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8151, __PRETTY_FUNCTION__))
;
8152 assert(Subtarget.hasFPCVT() &&((Subtarget.hasFPCVT() && "Int to FP conversions with direct moves require FPCVT"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasFPCVT() && \"Int to FP conversions with direct moves require FPCVT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8153, __PRETTY_FUNCTION__))
8153 "Int to FP conversions with direct moves require FPCVT")((Subtarget.hasFPCVT() && "Int to FP conversions with direct moves require FPCVT"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasFPCVT() && \"Int to FP conversions with direct moves require FPCVT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8153, __PRETTY_FUNCTION__))
;
8154 SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
8155 bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8156 bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
8157 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8158 unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
8159 SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
8160 return convertIntToFP(Op, Mov, DAG, Subtarget);
8161}
8162
8163static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8164
8165 EVT VecVT = Vec.getValueType();
8166 assert(VecVT.isVector() && "Expected a vector type.")((VecVT.isVector() && "Expected a vector type.") ? static_cast
<void> (0) : __assert_fail ("VecVT.isVector() && \"Expected a vector type.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8166, __PRETTY_FUNCTION__))
;
8167 assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.")((VecVT.getSizeInBits() < 128 && "Vector is already full width."
) ? static_cast<void> (0) : __assert_fail ("VecVT.getSizeInBits() < 128 && \"Vector is already full width.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8167, __PRETTY_FUNCTION__))
;
8168
8169 EVT EltVT = VecVT.getVectorElementType();
8170 unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8171 EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8172
8173 unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8174 SmallVector<SDValue, 16> Ops(NumConcat);
8175 Ops[0] = Vec;
8176 SDValue UndefVec = DAG.getUNDEF(VecVT);
8177 for (unsigned i = 1; i < NumConcat; ++i)
8178 Ops[i] = UndefVec;
8179
8180 return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8181}
8182
8183SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8184 const SDLoc &dl) const {
8185 bool IsStrict = Op->isStrictFPOpcode();
8186 unsigned Opc = Op.getOpcode();
8187 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8188 assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||(((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP || Opc ==
ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
"Unexpected conversion type") ? static_cast<void> (0) :
__assert_fail ("(Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) && \"Unexpected conversion type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8190, __PRETTY_FUNCTION__))
8189 Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&(((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP || Opc ==
ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
"Unexpected conversion type") ? static_cast<void> (0) :
__assert_fail ("(Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) && \"Unexpected conversion type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8190, __PRETTY_FUNCTION__))
8190 "Unexpected conversion type")(((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP || Opc ==
ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
"Unexpected conversion type") ? static_cast<void> (0) :
__assert_fail ("(Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) && \"Unexpected conversion type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8190, __PRETTY_FUNCTION__))
;
8191 assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&(((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT
::v4f32) && "Supports conversions to v2f64/v4f32 only."
) ? static_cast<void> (0) : __assert_fail ("(Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) && \"Supports conversions to v2f64/v4f32 only.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8192, __PRETTY_FUNCTION__))
8192 "Supports conversions to v2f64/v4f32 only.")(((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT
::v4f32) && "Supports conversions to v2f64/v4f32 only."
) ? static_cast<void> (0) : __assert_fail ("(Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) && \"Supports conversions to v2f64/v4f32 only.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8192, __PRETTY_FUNCTION__))
;
8193
8194 // TODO: Any other flags to propagate?
8195 SDNodeFlags Flags;
8196 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8197
8198 bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
8199 bool FourEltRes = Op.getValueType() == MVT::v4f32;
8200
8201 SDValue Wide = widenVec(DAG, Src, dl);
8202 EVT WideVT = Wide.getValueType();
8203 unsigned WideNumElts = WideVT.getVectorNumElements();
8204 MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8205
8206 SmallVector<int, 16> ShuffV;
8207 for (unsigned i = 0; i < WideNumElts; ++i)
8208 ShuffV.push_back(i + WideNumElts);
8209
8210 int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8211 int SaveElts = FourEltRes ? 4 : 2;
8212 if (Subtarget.isLittleEndian())
8213 for (int i = 0; i < SaveElts; i++)
8214 ShuffV[i * Stride] = i;
8215 else
8216 for (int i = 1; i <= SaveElts; i++)
8217 ShuffV[i * Stride - 1] = i - 1;
8218
8219 SDValue ShuffleSrc2 =
8220 SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8221 SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8222
8223 SDValue Extend;
8224 if (SignedConv) {
8225 Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8226 EVT ExtVT = Src.getValueType();
8227 if (Subtarget.hasP9Altivec())
8228 ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8229 IntermediateVT.getVectorNumElements());
8230
8231 Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8232 DAG.getValueType(ExtVT));
8233 } else
8234 Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8235
8236 if (IsStrict)
8237 return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
8238 {Op.getOperand(0), Extend}, Flags);
8239
8240 return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8241}
8242
8243SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8244 SelectionDAG &DAG) const {
8245 SDLoc dl(Op);
8246 bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8247 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8248 bool IsStrict = Op->isStrictFPOpcode();
8249 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8250 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
8251
8252 // TODO: Any other flags to propagate?
8253 SDNodeFlags Flags;
8254 Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8255
8256 EVT InVT = Src.getValueType();
8257 EVT OutVT = Op.getValueType();
8258 if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8259 isOperationCustom(Op.getOpcode(), InVT))
8260 return LowerINT_TO_FPVector(Op, DAG, dl);
8261
8262 // Conversions to f128 are legal.
8263 if (Op.getValueType() == MVT::f128)
8264 return Subtarget.hasP9Vector() ? Op : SDValue();
8265
8266 // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8267 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8268 return SDValue();
8269
8270 if (Src.getValueType() == MVT::i1) {
8271 SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
8272 DAG.getConstantFP(1.0, dl, Op.getValueType()),
8273 DAG.getConstantFP(0.0, dl, Op.getValueType()));
8274 if (IsStrict)
8275 return DAG.getMergeValues({Sel, Chain}, dl);
8276 else
8277 return Sel;
8278 }
8279
8280 // If we have direct moves, we can do all the conversion, skip the store/load
8281 // however, without FPCVT we can't do most conversions.
8282 if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8283 Subtarget.isPPC64() && Subtarget.hasFPCVT())
8284 return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8285
8286 assert((IsSigned || Subtarget.hasFPCVT()) &&(((IsSigned || Subtarget.hasFPCVT()) && "UINT_TO_FP is supported only with FPCVT"
) ? static_cast<void> (0) : __assert_fail ("(IsSigned || Subtarget.hasFPCVT()) && \"UINT_TO_FP is supported only with FPCVT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8287, __PRETTY_FUNCTION__))
8287 "UINT_TO_FP is supported only with FPCVT")(((IsSigned || Subtarget.hasFPCVT()) && "UINT_TO_FP is supported only with FPCVT"
) ? static_cast<void> (0) : __assert_fail ("(IsSigned || Subtarget.hasFPCVT()) && \"UINT_TO_FP is supported only with FPCVT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8287, __PRETTY_FUNCTION__))
;
8288
8289 if (Src.getValueType() == MVT::i64) {
8290 SDValue SINT = Src;
8291 // When converting to single-precision, we actually need to convert
8292 // to double-precision first and then round to single-precision.
8293 // To avoid double-rounding effects during that operation, we have
8294 // to prepare the input operand. Bits that might be truncated when
8295 // converting to double-precision are replaced by a bit that won't
8296 // be lost at this stage, but is below the single-precision rounding
8297 // position.
8298 //
8299 // However, if -enable-unsafe-fp-math is in effect, accept double
8300 // rounding to avoid the extra overhead.
8301 if (Op.getValueType() == MVT::f32 &&
8302 !Subtarget.hasFPCVT() &&
8303 !DAG.getTarget().Options.UnsafeFPMath) {
8304
8305 // Twiddle input to make sure the low 11 bits are zero. (If this
8306 // is the case, we are guaranteed the value will fit into the 53 bit
8307 // mantissa of an IEEE double-precision value without rounding.)
8308 // If any of those low 11 bits were not zero originally, make sure
8309 // bit 12 (value 2048) is set instead, so that the final rounding
8310 // to single-precision gets the correct result.
8311 SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8312 SINT, DAG.getConstant(2047, dl, MVT::i64));
8313 Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8314 Round, DAG.getConstant(2047, dl, MVT::i64));
8315 Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8316 Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8317 Round, DAG.getConstant(-2048, dl, MVT::i64));
8318
8319 // However, we cannot use that value unconditionally: if the magnitude
8320 // of the input value is small, the bit-twiddling we did above might
8321 // end up visibly changing the output. Fortunately, in that case, we
8322 // don't need to twiddle bits since the original input will convert
8323 // exactly to double-precision floating-point already. Therefore,
8324 // construct a conditional to use the original value if the top 11
8325 // bits are all sign-bit copies, and use the rounded value computed
8326 // above otherwise.
8327 SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8328 SINT, DAG.getConstant(53, dl, MVT::i32));
8329 Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8330 Cond, DAG.getConstant(1, dl, MVT::i64));
8331 Cond = DAG.getSetCC(
8332 dl,
8333 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8334 Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8335
8336 SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8337 }
8338
8339 ReuseLoadInfo RLI;
8340 SDValue Bits;
8341
8342 MachineFunction &MF = DAG.getMachineFunction();
8343 if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8344 Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8345 RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8346 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8347 } else if (Subtarget.hasLFIWAX() &&
8348 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8349 MachineMemOperand *MMO =
8350 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8351 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8352 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8353 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8354 DAG.getVTList(MVT::f64, MVT::Other),
8355 Ops, MVT::i32, MMO);
8356 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8357 } else if (Subtarget.hasFPCVT() &&
8358 canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8359 MachineMemOperand *MMO =
8360 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8361 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8362 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8363 Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8364 DAG.getVTList(MVT::f64, MVT::Other),
8365 Ops, MVT::i32, MMO);
8366 spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8367 } else if (((Subtarget.hasLFIWAX() &&
8368 SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8369 (Subtarget.hasFPCVT() &&
8370 SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8371 SINT.getOperand(0).getValueType() == MVT::i32) {
8372 MachineFrameInfo &MFI = MF.getFrameInfo();
8373 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8374
8375 int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8376 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8377
8378 SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8379 MachinePointerInfo::getFixedStack(
8380 DAG.getMachineFunction(), FrameIdx));
8381 Chain = Store;
8382
8383 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&((cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32
&& "Expected an i32 store") ? static_cast<void>
(0) : __assert_fail ("cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && \"Expected an i32 store\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8384, __PRETTY_FUNCTION__))
8384 "Expected an i32 store")((cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32
&& "Expected an i32 store") ? static_cast<void>
(0) : __assert_fail ("cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && \"Expected an i32 store\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8384, __PRETTY_FUNCTION__))
;
8385
8386 RLI.Ptr = FIdx;
8387 RLI.Chain = Chain;
8388 RLI.MPI =
8389 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8390 RLI.Alignment = Align(4);
8391
8392 MachineMemOperand *MMO =
8393 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8394 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8395 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8396 Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8397 PPCISD::LFIWZX : PPCISD::LFIWAX,
8398 dl, DAG.getVTList(MVT::f64, MVT::Other),
8399 Ops, MVT::i32, MMO);
8400 Chain = Bits.getValue(1);
8401 } else
8402 Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8403
8404 SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8405 if (IsStrict)
8406 Chain = FP.getValue(1);
8407
8408 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8409 if (IsStrict)
8410 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8411 DAG.getVTList(MVT::f32, MVT::Other),
8412 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8413 else
8414 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8415 DAG.getIntPtrConstant(0, dl));
8416 }
8417 return FP;
8418 }
8419
8420 assert(Src.getValueType() == MVT::i32 &&((Src.getValueType() == MVT::i32 && "Unhandled INT_TO_FP type in custom expander!"
) ? static_cast<void> (0) : __assert_fail ("Src.getValueType() == MVT::i32 && \"Unhandled INT_TO_FP type in custom expander!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8421, __PRETTY_FUNCTION__))
8421 "Unhandled INT_TO_FP type in custom expander!")((Src.getValueType() == MVT::i32 && "Unhandled INT_TO_FP type in custom expander!"
) ? static_cast<void> (0) : __assert_fail ("Src.getValueType() == MVT::i32 && \"Unhandled INT_TO_FP type in custom expander!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8421, __PRETTY_FUNCTION__))
;
8422 // Since we only generate this in 64-bit mode, we can take advantage of
8423 // 64-bit registers. In particular, sign extend the input value into the
8424 // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8425 // then lfd it and fcfid it.
8426 MachineFunction &MF = DAG.getMachineFunction();
8427 MachineFrameInfo &MFI = MF.getFrameInfo();
8428 EVT PtrVT = getPointerTy(MF.getDataLayout());
8429
8430 SDValue Ld;
8431 if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8432 ReuseLoadInfo RLI;
8433 bool ReusingLoad;
8434 if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8435 int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8436 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8437
8438 SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8439 MachinePointerInfo::getFixedStack(
8440 DAG.getMachineFunction(), FrameIdx));
8441 Chain = Store;
8442
8443 assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&((cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32
&& "Expected an i32 store") ? static_cast<void>
(0) : __assert_fail ("cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && \"Expected an i32 store\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8444, __PRETTY_FUNCTION__))
8444 "Expected an i32 store")((cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32
&& "Expected an i32 store") ? static_cast<void>
(0) : __assert_fail ("cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 && \"Expected an i32 store\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8444, __PRETTY_FUNCTION__))
;
8445
8446 RLI.Ptr = FIdx;
8447 RLI.Chain = Chain;
8448 RLI.MPI =
8449 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8450 RLI.Alignment = Align(4);
8451 }
8452
8453 MachineMemOperand *MMO =
8454 MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8455 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8456 SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8457 Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8458 DAG.getVTList(MVT::f64, MVT::Other), Ops,
8459 MVT::i32, MMO);
8460 Chain = Ld.getValue(1);
8461 if (ReusingLoad)
8462 spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8463 } else {
8464 assert(Subtarget.isPPC64() &&((Subtarget.isPPC64() && "i32->FP without LFIWAX supported only on PPC64"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isPPC64() && \"i32->FP without LFIWAX supported only on PPC64\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8465, __PRETTY_FUNCTION__))
8465 "i32->FP without LFIWAX supported only on PPC64")((Subtarget.isPPC64() && "i32->FP without LFIWAX supported only on PPC64"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isPPC64() && \"i32->FP without LFIWAX supported only on PPC64\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8465, __PRETTY_FUNCTION__))
;
8466
8467 int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8468 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8469
8470 SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8471
8472 // STD the extended value into the stack slot.
8473 SDValue Store = DAG.getStore(
8474 Chain, dl, Ext64, FIdx,
8475 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8476 Chain = Store;
8477
8478 // Load the value as a double.
8479 Ld = DAG.getLoad(
8480 MVT::f64, dl, Chain, FIdx,
8481 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8482 Chain = Ld.getValue(1);
8483 }
8484
8485 // FCFID it and return it.
8486 SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8487 if (IsStrict)
8488 Chain = FP.getValue(1);
8489 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8490 if (IsStrict)
8491 FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8492 DAG.getVTList(MVT::f32, MVT::Other),
8493 {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8494 else
8495 FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8496 DAG.getIntPtrConstant(0, dl));
8497 }
8498 return FP;
8499}
8500
8501SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8502 SelectionDAG &DAG) const {
8503 SDLoc dl(Op);
8504 /*
8505 The rounding mode is in bits 30:31 of FPSR, and has the following
8506 settings:
8507 00 Round to nearest
8508 01 Round to 0
8509 10 Round to +inf
8510 11 Round to -inf
8511
8512 FLT_ROUNDS, on the other hand, expects the following:
8513 -1 Undefined
8514 0 Round to 0
8515 1 Round to nearest
8516 2 Round to +inf
8517 3 Round to -inf
8518
8519 To perform the conversion, we do:
8520 ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8521 */
8522
8523 MachineFunction &MF = DAG.getMachineFunction();
8524 EVT VT = Op.getValueType();
8525 EVT PtrVT = getPointerTy(MF.getDataLayout());
8526
8527 // Save FP Control Word to register
8528 SDValue Chain = Op.getOperand(0);
8529 SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8530 Chain = MFFS.getValue(1);
8531
8532 SDValue CWD;
8533 if (isTypeLegal(MVT::i64)) {
8534 CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
8535 DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
8536 } else {
8537 // Save FP register to stack slot
8538 int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8539 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8540 Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8541
8542 // Load FP Control Word from low 32 bits of stack slot.
8543 assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&((hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&
"Stack slot adjustment is valid only on big endian subtargets!"
) ? static_cast<void> (0) : __assert_fail ("hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) && \"Stack slot adjustment is valid only on big endian subtargets!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8544, __PRETTY_FUNCTION__))
8544 "Stack slot adjustment is valid only on big endian subtargets!")((hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&
"Stack slot adjustment is valid only on big endian subtargets!"
) ? static_cast<void> (0) : __assert_fail ("hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) && \"Stack slot adjustment is valid only on big endian subtargets!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8544, __PRETTY_FUNCTION__))
;
8545 SDValue Four = DAG.getConstant(4, dl, PtrVT);
8546 SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8547 CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8548 Chain = CWD.getValue(1);
8549 }
8550
8551 // Transform as necessary
8552 SDValue CWD1 =
8553 DAG.getNode(ISD::AND, dl, MVT::i32,
8554 CWD, DAG.getConstant(3, dl, MVT::i32));
8555 SDValue CWD2 =
8556 DAG.getNode(ISD::SRL, dl, MVT::i32,
8557 DAG.getNode(ISD::AND, dl, MVT::i32,
8558 DAG.getNode(ISD::XOR, dl, MVT::i32,
8559 CWD, DAG.getConstant(3, dl, MVT::i32)),
8560 DAG.getConstant(3, dl, MVT::i32)),
8561 DAG.getConstant(1, dl, MVT::i32));
8562
8563 SDValue RetVal =
8564 DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8565
8566 RetVal =
8567 DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8568 dl, VT, RetVal);
8569
8570 return DAG.getMergeValues({RetVal, Chain}, dl);
8571}
8572
8573SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8574 EVT VT = Op.getValueType();
8575 unsigned BitWidth = VT.getSizeInBits();
8576 SDLoc dl(Op);
8577 assert(Op.getNumOperands() == 3 &&((Op.getNumOperands() == 3 && VT == Op.getOperand(1).
getValueType() && "Unexpected SHL!") ? static_cast<
void> (0) : __assert_fail ("Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && \"Unexpected SHL!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8579, __PRETTY_FUNCTION__))
8578 VT == Op.getOperand(1).getValueType() &&((Op.getNumOperands() == 3 && VT == Op.getOperand(1).
getValueType() && "Unexpected SHL!") ? static_cast<
void> (0) : __assert_fail ("Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && \"Unexpected SHL!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8579, __PRETTY_FUNCTION__))
8579 "Unexpected SHL!")((Op.getNumOperands() == 3 && VT == Op.getOperand(1).
getValueType() && "Unexpected SHL!") ? static_cast<
void> (0) : __assert_fail ("Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && \"Unexpected SHL!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8579, __PRETTY_FUNCTION__))
;
8580
8581 // Expand into a bunch of logical ops. Note that these ops
8582 // depend on the PPC behavior for oversized shift amounts.
8583 SDValue Lo = Op.getOperand(0);
8584 SDValue Hi = Op.getOperand(1);
8585 SDValue Amt = Op.getOperand(2);
8586 EVT AmtVT = Amt.getValueType();
8587
8588 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8589 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8590 SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8591 SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8592 SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8593 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8594 DAG.getConstant(-BitWidth, dl, AmtVT));
8595 SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8596 SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8597 SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8598 SDValue OutOps[] = { OutLo, OutHi };
8599 return DAG.getMergeValues(OutOps, dl);
8600}
8601
8602SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8603 EVT VT = Op.getValueType();
8604 SDLoc dl(Op);
8605 unsigned BitWidth = VT.getSizeInBits();
8606 assert(Op.getNumOperands() == 3 &&((Op.getNumOperands() == 3 && VT == Op.getOperand(1).
getValueType() && "Unexpected SRL!") ? static_cast<
void> (0) : __assert_fail ("Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && \"Unexpected SRL!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8608, __PRETTY_FUNCTION__))
8607 VT == Op.getOperand(1).getValueType() &&((Op.getNumOperands() == 3 && VT == Op.getOperand(1).
getValueType() && "Unexpected SRL!") ? static_cast<
void> (0) : __assert_fail ("Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && \"Unexpected SRL!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8608, __PRETTY_FUNCTION__))
8608 "Unexpected SRL!")((Op.getNumOperands() == 3 && VT == Op.getOperand(1).
getValueType() && "Unexpected SRL!") ? static_cast<
void> (0) : __assert_fail ("Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && \"Unexpected SRL!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8608, __PRETTY_FUNCTION__))
;
8609
8610 // Expand into a bunch of logical ops. Note that these ops
8611 // depend on the PPC behavior for oversized shift amounts.
8612 SDValue Lo = Op.getOperand(0);
8613 SDValue Hi = Op.getOperand(1);
8614 SDValue Amt = Op.getOperand(2);
8615 EVT AmtVT = Amt.getValueType();
8616
8617 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8618 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8619 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8620 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8621 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8622 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8623 DAG.getConstant(-BitWidth, dl, AmtVT));
8624 SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8625 SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8626 SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8627 SDValue OutOps[] = { OutLo, OutHi };
8628 return DAG.getMergeValues(OutOps, dl);
8629}
8630
8631SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8632 SDLoc dl(Op);
8633 EVT VT = Op.getValueType();
8634 unsigned BitWidth = VT.getSizeInBits();
8635 assert(Op.getNumOperands() == 3 &&((Op.getNumOperands() == 3 && VT == Op.getOperand(1).
getValueType() && "Unexpected SRA!") ? static_cast<
void> (0) : __assert_fail ("Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && \"Unexpected SRA!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8637, __PRETTY_FUNCTION__))
8636 VT == Op.getOperand(1).getValueType() &&((Op.getNumOperands() == 3 && VT == Op.getOperand(1).
getValueType() && "Unexpected SRA!") ? static_cast<
void> (0) : __assert_fail ("Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && \"Unexpected SRA!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8637, __PRETTY_FUNCTION__))
8637 "Unexpected SRA!")((Op.getNumOperands() == 3 && VT == Op.getOperand(1).
getValueType() && "Unexpected SRA!") ? static_cast<
void> (0) : __assert_fail ("Op.getNumOperands() == 3 && VT == Op.getOperand(1).getValueType() && \"Unexpected SRA!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8637, __PRETTY_FUNCTION__))
;
8638
8639 // Expand into a bunch of logical ops, followed by a select_cc.
8640 SDValue Lo = Op.getOperand(0);
8641 SDValue Hi = Op.getOperand(1);
8642 SDValue Amt = Op.getOperand(2);
8643 EVT AmtVT = Amt.getValueType();
8644
8645 SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8646 DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8647 SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8648 SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8649 SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8650 SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8651 DAG.getConstant(-BitWidth, dl, AmtVT));
8652 SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8653 SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8654 SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8655 Tmp4, Tmp6, ISD::SETLE);
8656 SDValue OutOps[] = { OutLo, OutHi };
8657 return DAG.getMergeValues(OutOps, dl);
8658}
8659
8660SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
8661 SelectionDAG &DAG) const {
8662 SDLoc dl(Op);
8663 EVT VT = Op.getValueType();
8664 unsigned BitWidth = VT.getSizeInBits();
8665
8666 bool IsFSHL = Op.getOpcode() == ISD::FSHL;
8667 SDValue X = Op.getOperand(0);
8668 SDValue Y = Op.getOperand(1);
8669 SDValue Z = Op.getOperand(2);
8670 EVT AmtVT = Z.getValueType();
8671
8672 // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8673 // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8674 // This is simpler than TargetLowering::expandFunnelShift because we can rely
8675 // on PowerPC shift by BW being well defined.
8676 Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
8677 DAG.getConstant(BitWidth - 1, dl, AmtVT));
8678 SDValue SubZ =
8679 DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
8680 X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
8681 Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
8682 return DAG.getNode(ISD::OR, dl, VT, X, Y);
8683}
8684
8685//===----------------------------------------------------------------------===//
8686// Vector related lowering.
8687//
8688
8689/// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8690/// element size of SplatSize. Cast the result to VT.
8691static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8692 SelectionDAG &DAG, const SDLoc &dl) {
8693 static const MVT VTys[] = { // canonical VT to use for each size.
8694 MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8695 };
8696
8697 EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8698
8699 // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8700 if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
8701 SplatSize = 1;
8702 Val = 0xFF;
8703 }
8704
8705 EVT CanonicalVT = VTys[SplatSize-1];
8706
8707 // Build a canonical splat for this value.
8708 return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8709}
8710
8711/// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8712/// specified intrinsic ID.
8713static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8714 const SDLoc &dl, EVT DestVT = MVT::Other) {
8715 if (DestVT == MVT::Other) DestVT = Op.getValueType();
8716 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8717 DAG.getConstant(IID, dl, MVT::i32), Op);
8718}
8719
8720/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8721/// specified intrinsic ID.
8722static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8723 SelectionDAG &DAG, const SDLoc &dl,
8724 EVT DestVT = MVT::Other) {
8725 if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8726 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8727 DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8728}
8729
8730/// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8731/// specified intrinsic ID.
8732static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8733 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8734 EVT DestVT = MVT::Other) {
8735 if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8736 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8737 DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8738}
8739
8740/// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8741/// amount. The result has the specified value type.
8742static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8743 SelectionDAG &DAG, const SDLoc &dl) {
8744 // Force LHS/RHS to be the right type.
8745 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8746 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8747
8748 int Ops[16];
8749 for (unsigned i = 0; i != 16; ++i)
8750 Ops[i] = i + Amt;
8751 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8752 return DAG.getNode(ISD::BITCAST, dl, VT, T);
8753}
8754
8755/// Do we have an efficient pattern in a .td file for this node?
8756///
8757/// \param V - pointer to the BuildVectorSDNode being matched
8758/// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8759///
8760/// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8761/// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8762/// the opposite is true (expansion is beneficial) are:
8763/// - The node builds a vector out of integers that are not 32 or 64-bits
8764/// - The node builds a vector out of constants
8765/// - The node is a "load-and-splat"
8766/// In all other cases, we will choose to keep the BUILD_VECTOR.
8767static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8768 bool HasDirectMove,
8769 bool HasP8Vector) {
8770 EVT VecVT = V->getValueType(0);
8771 bool RightType = VecVT == MVT::v2f64 ||
8772 (HasP8Vector && VecVT == MVT::v4f32) ||
8773 (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8774 if (!RightType)
8775 return false;
8776
8777 bool IsSplat = true;
8778 bool IsLoad = false;
8779 SDValue Op0 = V->getOperand(0);
8780
8781 // This function is called in a block that confirms the node is not a constant
8782 // splat. So a constant BUILD_VECTOR here means the vector is built out of
8783 // different constants.
8784 if (V->isConstant())
8785 return false;
8786 for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8787 if (V->getOperand(i).isUndef())
8788 return false;
8789 // We want to expand nodes that represent load-and-splat even if the
8790 // loaded value is a floating point truncation or conversion to int.
8791 if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8792 (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8793 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8794 (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8795 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8796 (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8797 V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8798 IsLoad = true;
8799 // If the operands are different or the input is not a load and has more
8800 // uses than just this BV node, then it isn't a splat.
8801 if (V->getOperand(i) != Op0 ||
8802 (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8803 IsSplat = false;
8804 }
8805 return !(IsSplat && IsLoad);
8806}
8807
8808// Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
8809SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8810
8811 SDLoc dl(Op);
8812 SDValue Op0 = Op->getOperand(0);
8813
8814 if ((Op.getValueType() != MVT::f128) ||
8815 (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8816 (Op0.getOperand(0).getValueType() != MVT::i64) ||
8817 (Op0.getOperand(1).getValueType() != MVT::i64))
8818 return SDValue();
8819
8820 return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8821 Op0.getOperand(1));
8822}
8823
8824static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
8825 const SDValue *InputLoad = &Op;
8826 if (InputLoad->getOpcode() == ISD::BITCAST)
8
Assuming the condition is false
9
Taking false branch
8827 InputLoad = &InputLoad->getOperand(0);
8828 if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
10
Assuming the condition is false
12
Taking false branch
8829 InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
11
Assuming the condition is false
8830 IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
8831 InputLoad = &InputLoad->getOperand(0);
8832 }
8833 if (InputLoad->getOpcode() != ISD::LOAD)
13
Assuming the condition is false
14
Taking false branch
8834 return nullptr;
8835 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8836 return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
15
'?' condition is false
16
Returning null pointer, which participates in a condition later
8837}
8838
8839// Convert the argument APFloat to a single precision APFloat if there is no
8840// loss in information during the conversion to single precision APFloat and the
8841// resulting number is not a denormal number. Return true if successful.
8842bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
8843 APFloat APFloatToConvert = ArgAPFloat;
8844 bool LosesInfo = true;
8845 APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
8846 &LosesInfo);
8847 bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
8848 if (Success)
8849 ArgAPFloat = APFloatToConvert;
8850 return Success;
8851}
8852
8853// Bitcast the argument APInt to a double and convert it to a single precision
8854// APFloat, bitcast the APFloat to an APInt and assign it to the original
8855// argument if there is no loss in information during the conversion from
8856// double to single precision APFloat and the resulting number is not a denormal
8857// number. Return true if successful.
8858bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
8859 double DpValue = ArgAPInt.bitsToDouble();
8860 APFloat APFloatDp(DpValue);
8861 bool Success = convertToNonDenormSingle(APFloatDp);
8862 if (Success)
8863 ArgAPInt = APFloatDp.bitcastToAPInt();
8864 return Success;
8865}
8866
8867// Nondestructive check for convertTonNonDenormSingle.
8868bool llvm::checkConvertToNonDenormSingle(APFloat &ArgAPFloat) {
8869 // Only convert if it loses info, since XXSPLTIDP should
8870 // handle the other case.
8871 APFloat APFloatToConvert = ArgAPFloat;
8872 bool LosesInfo = true;
8873 APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
8874 &LosesInfo);
8875
8876 return (!LosesInfo && !APFloatToConvert.isDenormal());
8877}
8878
8879// If this is a case we can't handle, return null and let the default
8880// expansion code take care of it. If we CAN select this case, and if it
8881// selects to a single instruction, return Op. Otherwise, if we can codegen
8882// this case more efficiently than a constant pool load, lower it to the
8883// sequence of ops that should be used.
8884SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
8885 SelectionDAG &DAG) const {
8886 SDLoc dl(Op);
8887 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
8888 assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR")((BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR"
) ? static_cast<void> (0) : __assert_fail ("BVN && \"Expected a BuildVectorSDNode in LowerBUILD_VECTOR\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8888, __PRETTY_FUNCTION__))
;
8889
8890 // Check if this is a splat of a constant value.
8891 APInt APSplatBits, APSplatUndef;
8892 unsigned SplatBitSize;
8893 bool HasAnyUndefs;
8894 bool BVNIsConstantSplat =
8895 BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
8896 HasAnyUndefs, 0, !Subtarget.isLittleEndian());
8897
8898 // If it is a splat of a double, check if we can shrink it to a 32 bit
8899 // non-denormal float which when converted back to double gives us the same
8900 // double. This is to exploit the XXSPLTIDP instruction.
8901 // If we lose precision, we use XXSPLTI32DX.
8902 if (BVNIsConstantSplat && (SplatBitSize == 64) &&
8903 Subtarget.hasPrefixInstrs()) {
8904 // Check the type first to short-circuit so we don't modify APSplatBits if
8905 // this block isn't executed.
8906 if ((Op->getValueType(0) == MVT::v2f64) &&
8907 convertToNonDenormSingle(APSplatBits)) {
8908 SDValue SplatNode = DAG.getNode(
8909 PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
8910 DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
8911 return DAG.getBitcast(Op.getValueType(), SplatNode);
8912 } else {
8913 // We may lose precision, so we have to use XXSPLTI32DX.
8914
8915 uint32_t Hi =
8916 (uint32_t)((APSplatBits.getZExtValue() & 0xFFFFFFFF00000000LL) >> 32);
8917 uint32_t Lo =
8918 (uint32_t)(APSplatBits.getZExtValue() & 0xFFFFFFFF);
8919 SDValue SplatNode = DAG.getUNDEF(MVT::v2i64);
8920
8921 if (!Hi || !Lo)
8922 // If either load is 0, then we should generate XXLXOR to set to 0.
8923 SplatNode = DAG.getTargetConstant(0, dl, MVT::v2i64);
8924
8925 if (Hi)
8926 SplatNode = DAG.getNode(
8927 PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
8928 DAG.getTargetConstant(0, dl, MVT::i32),
8929 DAG.getTargetConstant(Hi, dl, MVT::i32));
8930
8931 if (Lo)
8932 SplatNode =
8933 DAG.getNode(PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
8934 DAG.getTargetConstant(1, dl, MVT::i32),
8935 DAG.getTargetConstant(Lo, dl, MVT::i32));
8936
8937 return DAG.getBitcast(Op.getValueType(), SplatNode);
8938 }
8939 }
8940
8941 if (!BVNIsConstantSplat || SplatBitSize > 32) {
8942
8943 bool IsPermutedLoad = false;
8944 const SDValue *InputLoad =
8945 getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
8946 // Handle load-and-splat patterns as we have instructions that will do this
8947 // in one go.
8948 if (InputLoad && DAG.isSplatValue(Op, true)) {
8949 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8950
8951 // We have handling for 4 and 8 byte elements.
8952 unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
8953
8954 // Checking for a single use of this load, we have to check for vector
8955 // width (128 bits) / ElementSize uses (since each operand of the
8956 // BUILD_VECTOR is a separate use of the value.
8957 unsigned NumUsesOfInputLD = 128 / ElementSize;
8958 for (SDValue BVInOp : Op->ops())
8959 if (BVInOp.isUndef())
8960 NumUsesOfInputLD--;
8961 assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?")((NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?"
) ? static_cast<void> (0) : __assert_fail ("NumUsesOfInputLD > 0 && \"No uses of input LD of a build_vector?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 8961, __PRETTY_FUNCTION__))
;
8962 if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
8963 ((Subtarget.hasVSX() && ElementSize == 64) ||
8964 (Subtarget.hasP9Vector() && ElementSize == 32))) {
8965 SDValue Ops[] = {
8966 LD->getChain(), // Chain
8967 LD->getBasePtr(), // Ptr
8968 DAG.getValueType(Op.getValueType()) // VT
8969 };
8970 SDValue LdSplt = DAG.getMemIntrinsicNode(
8971 PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
8972 Ops, LD->getMemoryVT(), LD->getMemOperand());
8973 // Replace all uses of the output chain of the original load with the
8974 // output chain of the new load.
8975 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
8976 LdSplt.getValue(1));
8977 return LdSplt;
8978 }
8979 }
8980
8981 // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to
8982 // 32-bits can be lowered to VSX instructions under certain conditions.
8983 // Without VSX, there is no pattern more efficient than expanding the node.
8984 if (Subtarget.hasVSX() && Subtarget.isPPC64() &&
8985 haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
8986 Subtarget.hasP8Vector()))
8987 return Op;
8988 return SDValue();
8989 }
8990
8991 uint64_t SplatBits = APSplatBits.getZExtValue();
8992 uint64_t SplatUndef = APSplatUndef.getZExtValue();
8993 unsigned SplatSize = SplatBitSize / 8;
8994
8995 // First, handle single instruction cases.
8996
8997 // All zeros?
8998 if (SplatBits == 0) {
8999 // Canonicalize all zero vectors to be v4i32.
9000 if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9001 SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9002 Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9003 }
9004 return Op;
9005 }
9006
9007 // We have XXSPLTIW for constant splats four bytes wide.
9008 // Given vector length is a multiple of 4, 2-byte splats can be replaced
9009 // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9010 // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9011 // turned into a 4-byte splat of 0xABABABAB.
9012 if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9013 return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2,
9014 Op.getValueType(), DAG, dl);
9015
9016 if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9017 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9018 dl);
9019
9020 // We have XXSPLTIB for constant splats one byte wide.
9021 if (Subtarget.hasP9Vector() && SplatSize == 1)
9022 return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9023 dl);
9024
9025 // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9026 int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9027 (32-SplatBitSize));
9028 if (SextVal >= -16 && SextVal <= 15)
9029 return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9030 dl);
9031
9032 // Two instruction sequences.
9033
9034 // If this value is in the range [-32,30] and is even, use:
9035 // VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9036 // If this value is in the range [17,31] and is odd, use:
9037 // VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9038 // If this value is in the range [-31,-17] and is odd, use:
9039 // VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9040 // Note the last two are three-instruction sequences.
9041 if (SextVal >= -32 && SextVal <= 31) {
9042 // To avoid having these optimizations undone by constant folding,
9043 // we convert to a pseudo that will be expanded later into one of
9044 // the above forms.
9045 SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9046 EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9047 (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9048 SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9049 SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9050 if (VT == Op.getValueType())
9051 return RetVal;
9052 else
9053 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9054 }
9055
9056 // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
9057 // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000). This is important
9058 // for fneg/fabs.
9059 if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9060 // Make -1 and vspltisw -1:
9061 SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9062
9063 // Make the VSLW intrinsic, computing 0x8000_0000.
9064 SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9065 OnesV, DAG, dl);
9066
9067 // xor by OnesV to invert it.
9068 Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9069 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9070 }
9071
9072 // Check to see if this is a wide variety of vsplti*, binop self cases.
9073 static const signed char SplatCsts[] = {
9074 -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9075 -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9076 };
9077
9078 for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9079 // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9080 // cases which are ambiguous (e.g. formation of 0x8000_0000). 'vsplti -1'
9081 int i = SplatCsts[idx];
9082
9083 // Figure out what shift amount will be used by altivec if shifted by i in
9084 // this splat size.
9085 unsigned TypeShiftAmt = i & (SplatBitSize-1);
9086
9087 // vsplti + shl self.
9088 if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9089 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9090 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9091 Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9092 Intrinsic::ppc_altivec_vslw
9093 };
9094 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9095 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9096 }
9097
9098 // vsplti + srl self.
9099 if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9100 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9101 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9102 Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9103 Intrinsic::ppc_altivec_vsrw
9104 };
9105 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9106 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9107 }
9108
9109 // vsplti + rol self.
9110 if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9111 ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9112 SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9113 static const unsigned IIDs[] = { // Intrinsic to use for each size.
9114 Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9115 Intrinsic::ppc_altivec_vrlw
9116 };
9117 Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9118 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9119 }
9120
9121 // t = vsplti c, result = vsldoi t, t, 1
9122 if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9123 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9124 unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9125 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9126 }
9127 // t = vsplti c, result = vsldoi t, t, 2
9128 if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9129 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9130 unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9131 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9132 }
9133 // t = vsplti c, result = vsldoi t, t, 3
9134 if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9135 SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9136 unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9137 return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9138 }
9139 }
9140
9141 return SDValue();
9142}
9143
9144/// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9145/// the specified operations to build the shuffle.
9146static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9147 SDValue RHS, SelectionDAG &DAG,
9148 const SDLoc &dl) {
9149 unsigned OpNum = (PFEntry >> 26) & 0x0F;
9150 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9151 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
9152
9153 enum {
9154 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9155 OP_VMRGHW,
9156 OP_VMRGLW,
9157 OP_VSPLTISW0,
9158 OP_VSPLTISW1,
9159 OP_VSPLTISW2,
9160 OP_VSPLTISW3,
9161 OP_VSLDOI4,
9162 OP_VSLDOI8,
9163 OP_VSLDOI12
9164 };
9165
9166 if (OpNum == OP_COPY) {
9167 if (LHSID == (1*9+2)*9+3) return LHS;
9168 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!")((LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!") ?
static_cast<void> (0) : __assert_fail ("LHSID == ((4*9+5)*9+6)*9+7 && \"Illegal OP_COPY!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9168, __PRETTY_FUNCTION__))
;
9169 return RHS;
9170 }
9171
9172 SDValue OpLHS, OpRHS;
9173 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9174 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9175
9176 int ShufIdxs[16];
9177 switch (OpNum) {
9178 default: llvm_unreachable("Unknown i32 permute!")::llvm::llvm_unreachable_internal("Unknown i32 permute!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9178)
;
9179 case OP_VMRGHW:
9180 ShufIdxs[ 0] = 0; ShufIdxs[ 1] = 1; ShufIdxs[ 2] = 2; ShufIdxs[ 3] = 3;
9181 ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9182 ShufIdxs[ 8] = 4; ShufIdxs[ 9] = 5; ShufIdxs[10] = 6; ShufIdxs[11] = 7;
9183 ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9184 break;
9185 case OP_VMRGLW:
9186 ShufIdxs[ 0] = 8; ShufIdxs[ 1] = 9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9187 ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9188 ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9189 ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9190 break;
9191 case OP_VSPLTISW0:
9192 for (unsigned i = 0; i != 16; ++i)
9193 ShufIdxs[i] = (i&3)+0;
9194 break;
9195 case OP_VSPLTISW1:
9196 for (unsigned i = 0; i != 16; ++i)
9197 ShufIdxs[i] = (i&3)+4;
9198 break;
9199 case OP_VSPLTISW2:
9200 for (unsigned i = 0; i != 16; ++i)
9201 ShufIdxs[i] = (i&3)+8;
9202 break;
9203 case OP_VSPLTISW3:
9204 for (unsigned i = 0; i != 16; ++i)
9205 ShufIdxs[i] = (i&3)+12;
9206 break;
9207 case OP_VSLDOI4:
9208 return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9209 case OP_VSLDOI8:
9210 return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9211 case OP_VSLDOI12:
9212 return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9213 }
9214 EVT VT = OpLHS.getValueType();
9215 OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9216 OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9217 SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9218 return DAG.getNode(ISD::BITCAST, dl, VT, T);
9219}
9220
9221/// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9222/// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9223/// SDValue.
9224SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9225 SelectionDAG &DAG) const {
9226 const unsigned BytesInVector = 16;
9227 bool IsLE = Subtarget.isLittleEndian();
9228 SDLoc dl(N);
9229 SDValue V1 = N->getOperand(0);
9230 SDValue V2 = N->getOperand(1);
9231 unsigned ShiftElts = 0, InsertAtByte = 0;
9232 bool Swap = false;
9233
9234 // Shifts required to get the byte we want at element 7.
9235 unsigned LittleEndianShifts[] = {8, 7, 6, 5, 4, 3, 2, 1,
9236 0, 15, 14, 13, 12, 11, 10, 9};
9237 unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9238 1, 2, 3, 4, 5, 6, 7, 8};
9239
9240 ArrayRef<int> Mask = N->getMask();
9241 int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9242
9243 // For each mask element, find out if we're just inserting something
9244 // from V2 into V1 or vice versa.
9245 // Possible permutations inserting an element from V2 into V1:
9246 // X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9247 // 0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9248 // ...
9249 // 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9250 // Inserting from V1 into V2 will be similar, except mask range will be
9251 // [16,31].
9252
9253 bool FoundCandidate = false;
9254 // If both vector operands for the shuffle are the same vector, the mask
9255 // will contain only elements from the first one and the second one will be
9256 // undef.
9257 unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9258 // Go through the mask of half-words to find an element that's being moved
9259 // from one vector to the other.
9260 for (unsigned i = 0; i < BytesInVector; ++i) {
9261 unsigned CurrentElement = Mask[i];
9262 // If 2nd operand is undefined, we should only look for element 7 in the
9263 // Mask.
9264 if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9265 continue;
9266
9267 bool OtherElementsInOrder = true;
9268 // Examine the other elements in the Mask to see if they're in original
9269 // order.
9270 for (unsigned j = 0; j < BytesInVector; ++j) {
9271 if (j == i)
9272 continue;
9273 // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9274 // from V2 [16,31] and vice versa. Unless the 2nd operand is undefined,
9275 // in which we always assume we're always picking from the 1st operand.
9276 int MaskOffset =
9277 (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9278 if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9279 OtherElementsInOrder = false;
9280 break;
9281 }
9282 }
9283 // If other elements are in original order, we record the number of shifts
9284 // we need to get the element we want into element 7. Also record which byte
9285 // in the vector we should insert into.
9286 if (OtherElementsInOrder) {
9287 // If 2nd operand is undefined, we assume no shifts and no swapping.
9288 if (V2.isUndef()) {
9289 ShiftElts = 0;
9290 Swap = false;
9291 } else {
9292 // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9293 ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9294 : BigEndianShifts[CurrentElement & 0xF];
9295 Swap = CurrentElement < BytesInVector;
9296 }
9297 InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9298 FoundCandidate = true;
9299 break;
9300 }
9301 }
9302
9303 if (!FoundCandidate)
9304 return SDValue();
9305
9306 // Candidate found, construct the proper SDAG sequence with VINSERTB,
9307 // optionally with VECSHL if shift is required.
9308 if (Swap)
9309 std::swap(V1, V2);
9310 if (V2.isUndef())
9311 V2 = V1;
9312 if (ShiftElts) {
9313 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9314 DAG.getConstant(ShiftElts, dl, MVT::i32));
9315 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9316 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9317 }
9318 return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9319 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9320}
9321
9322/// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9323/// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9324/// SDValue.
9325SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9326 SelectionDAG &DAG) const {
9327 const unsigned NumHalfWords = 8;
9328 const unsigned BytesInVector = NumHalfWords * 2;
9329 // Check that the shuffle is on half-words.
9330 if (!isNByteElemShuffleMask(N, 2, 1))
9331 return SDValue();
9332
9333 bool IsLE = Subtarget.isLittleEndian();
9334 SDLoc dl(N);
9335 SDValue V1 = N->getOperand(0);
9336 SDValue V2 = N->getOperand(1);
9337 unsigned ShiftElts = 0, InsertAtByte = 0;
9338 bool Swap = false;
9339
9340 // Shifts required to get the half-word we want at element 3.
9341 unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9342 unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9343
9344 uint32_t Mask = 0;
9345 uint32_t OriginalOrderLow = 0x1234567;
9346 uint32_t OriginalOrderHigh = 0x89ABCDEF;
9347 // Now we look at mask elements 0,2,4,6,8,10,12,14. Pack the mask into a
9348 // 32-bit space, only need 4-bit nibbles per element.
9349 for (unsigned i = 0; i < NumHalfWords; ++i) {
9350 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9351 Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9352 }
9353
9354 // For each mask element, find out if we're just inserting something
9355 // from V2 into V1 or vice versa. Possible permutations inserting an element
9356 // from V2 into V1:
9357 // X, 1, 2, 3, 4, 5, 6, 7
9358 // 0, X, 2, 3, 4, 5, 6, 7
9359 // 0, 1, X, 3, 4, 5, 6, 7
9360 // 0, 1, 2, X, 4, 5, 6, 7
9361 // 0, 1, 2, 3, X, 5, 6, 7
9362 // 0, 1, 2, 3, 4, X, 6, 7
9363 // 0, 1, 2, 3, 4, 5, X, 7
9364 // 0, 1, 2, 3, 4, 5, 6, X
9365 // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9366
9367 bool FoundCandidate = false;
9368 // Go through the mask of half-words to find an element that's being moved
9369 // from one vector to the other.
9370 for (unsigned i = 0; i < NumHalfWords; ++i) {
9371 unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9372 uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9373 uint32_t MaskOtherElts = ~(0xF << MaskShift);
9374 uint32_t TargetOrder = 0x0;
9375
9376 // If both vector operands for the shuffle are the same vector, the mask
9377 // will contain only elements from the first one and the second one will be
9378 // undef.
9379 if (V2.isUndef()) {
9380 ShiftElts = 0;
9381 unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9382 TargetOrder = OriginalOrderLow;
9383 Swap = false;
9384 // Skip if not the correct element or mask of other elements don't equal
9385 // to our expected order.
9386 if (MaskOneElt == VINSERTHSrcElem &&
9387 (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9388 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9389 FoundCandidate = true;
9390 break;
9391 }
9392 } else { // If both operands are defined.
9393 // Target order is [8,15] if the current mask is between [0,7].
9394 TargetOrder =
9395 (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9396 // Skip if mask of other elements don't equal our expected order.
9397 if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9398 // We only need the last 3 bits for the number of shifts.
9399 ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9400 : BigEndianShifts[MaskOneElt & 0x7];
9401 InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9402 Swap = MaskOneElt < NumHalfWords;
9403 FoundCandidate = true;
9404 break;
9405 }
9406 }
9407 }
9408
9409 if (!FoundCandidate)
9410 return SDValue();
9411
9412 // Candidate found, construct the proper SDAG sequence with VINSERTH,
9413 // optionally with VECSHL if shift is required.
9414 if (Swap)
9415 std::swap(V1, V2);
9416 if (V2.isUndef())
9417 V2 = V1;
9418 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9419 if (ShiftElts) {
9420 // Double ShiftElts because we're left shifting on v16i8 type.
9421 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9422 DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9423 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9424 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9425 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9426 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9427 }
9428 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9429 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9430 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9431 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9432}
9433
9434/// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9435/// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9436/// return the default SDValue.
9437SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9438 SelectionDAG &DAG) const {
9439 // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9440 // to v16i8. Peek through the bitcasts to get the actual operands.
9441 SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9442 SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9443
9444 auto ShuffleMask = SVN->getMask();
9445 SDValue VecShuffle(SVN, 0);
9446 SDLoc DL(SVN);
9447
9448 // Check that we have a four byte shuffle.
9449 if (!isNByteElemShuffleMask(SVN, 4, 1))
9450 return SDValue();
9451
9452 // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9453 if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9454 std::swap(LHS, RHS);
9455 VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9456 ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9457 }
9458
9459 // Ensure that the RHS is a vector of constants.
9460 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9461 if (!BVN)
9462 return SDValue();
9463
9464 // Check if RHS is a splat of 4-bytes (or smaller).
9465 APInt APSplatValue, APSplatUndef;
9466 unsigned SplatBitSize;
9467 bool HasAnyUndefs;
9468 if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9469 HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9470 SplatBitSize > 32)
9471 return SDValue();
9472
9473 // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9474 // The instruction splats a constant C into two words of the source vector
9475 // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9476 // Thus we check that the shuffle mask is the equivalent of
9477 // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9478 // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9479 // within each word are consecutive, so we only need to check the first byte.
9480 SDValue Index;
9481 bool IsLE = Subtarget.isLittleEndian();
9482 if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9483 (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9484 ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9485 Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9486 else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9487 (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9488 ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9489 Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9490 else
9491 return SDValue();
9492
9493 // If the splat is narrower than 32-bits, we need to get the 32-bit value
9494 // for XXSPLTI32DX.
9495 unsigned SplatVal = APSplatValue.getZExtValue();
9496 for (; SplatBitSize < 32; SplatBitSize <<= 1)
9497 SplatVal |= (SplatVal << SplatBitSize);
9498
9499 SDValue SplatNode = DAG.getNode(
9500 PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9501 Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9502 return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9503}
9504
9505/// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9506/// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9507/// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9508/// i.e (or (shl x, C1), (srl x, 128-C1)).
9509SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9510 assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL")((Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL"
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::ROTL && \"Should only be called for ISD::ROTL\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9510, __PRETTY_FUNCTION__))
;
9511 assert(Op.getValueType() == MVT::v1i128 &&((Op.getValueType() == MVT::v1i128 && "Only set v1i128 as custom, other type shouldn't reach here!"
) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::v1i128 && \"Only set v1i128 as custom, other type shouldn't reach here!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9512, __PRETTY_FUNCTION__))
9512 "Only set v1i128 as custom, other type shouldn't reach here!")((Op.getValueType() == MVT::v1i128 && "Only set v1i128 as custom, other type shouldn't reach here!"
) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::v1i128 && \"Only set v1i128 as custom, other type shouldn't reach here!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9512, __PRETTY_FUNCTION__))
;
9513 SDLoc dl(Op);
9514 SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9515 SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9516 unsigned SHLAmt = N1.getConstantOperandVal(0);
9517 if (SHLAmt % 8 == 0) {
9518 SmallVector<int, 16> Mask(16, 0);
9519 std::iota(Mask.begin(), Mask.end(), 0);
9520 std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9521 if (SDValue Shuffle =
9522 DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9523 DAG.getUNDEF(MVT::v16i8), Mask))
9524 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9525 }
9526 SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9527 SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9528 DAG.getConstant(SHLAmt, dl, MVT::i32));
9529 SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9530 DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9531 SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9532 return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9533}
9534
9535/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
9536/// is a shuffle we can handle in a single instruction, return it. Otherwise,
9537/// return the code it can be lowered into. Worst case, it can always be
9538/// lowered into a vperm.
9539SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9540 SelectionDAG &DAG) const {
9541 SDLoc dl(Op);
9542 SDValue V1 = Op.getOperand(0);
9543 SDValue V2 = Op.getOperand(1);
9544 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9545
9546 // Any nodes that were combined in the target-independent combiner prior
9547 // to vector legalization will not be sent to the target combine. Try to
9548 // combine it here.
9549 if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
1
Calling 'SDValue::operator bool'
4
Returning from 'SDValue::operator bool'
5
Taking false branch
9550 if (!isa<ShuffleVectorSDNode>(NewShuffle))
9551 return NewShuffle;
9552 Op = NewShuffle;
9553 SVOp = cast<ShuffleVectorSDNode>(Op);
9554 V1 = Op.getOperand(0);
9555 V2 = Op.getOperand(1);
9556 }
9557 EVT VT = Op.getValueType();
9558 bool isLittleEndian = Subtarget.isLittleEndian();
9559
9560 unsigned ShiftElts, InsertAtByte;
6
'ShiftElts' declared without an initial value
9561 bool Swap = false;
9562
9563 // If this is a load-and-splat, we can do that with a single instruction
9564 // in some cases. However if the load has multiple uses, we don't want to
9565 // combine it because that will just produce multiple loads.
9566 bool IsPermutedLoad = false;
9567 const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
7
Calling 'getNormalLoadInput'
17
Returning from 'getNormalLoadInput'
9568 if (InputLoad
17.1
'InputLoad' is null
17.1
'InputLoad' is null
&& Subtarget.hasVSX() && V2.isUndef() &&
9569 (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9570 InputLoad->hasOneUse()) {
9571 bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9572 int SplatIdx =
9573 PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9574
9575 // The splat index for permuted loads will be in the left half of the vector
9576 // which is strictly wider than the loaded value by 8 bytes. So we need to
9577 // adjust the splat index to point to the correct address in memory.
9578 if (IsPermutedLoad) {
9579 assert(isLittleEndian && "Unexpected permuted load on big endian target")((isLittleEndian && "Unexpected permuted load on big endian target"
) ? static_cast<void> (0) : __assert_fail ("isLittleEndian && \"Unexpected permuted load on big endian target\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9579, __PRETTY_FUNCTION__))
;
9580 SplatIdx += IsFourByte ? 2 : 1;
9581 assert((SplatIdx < (IsFourByte ? 4 : 2)) &&(((SplatIdx < (IsFourByte ? 4 : 2)) && "Splat of a value outside of the loaded memory"
) ? static_cast<void> (0) : __assert_fail ("(SplatIdx < (IsFourByte ? 4 : 2)) && \"Splat of a value outside of the loaded memory\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9582, __PRETTY_FUNCTION__))
9582 "Splat of a value outside of the loaded memory")(((SplatIdx < (IsFourByte ? 4 : 2)) && "Splat of a value outside of the loaded memory"
) ? static_cast<void> (0) : __assert_fail ("(SplatIdx < (IsFourByte ? 4 : 2)) && \"Splat of a value outside of the loaded memory\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9582, __PRETTY_FUNCTION__))
;
9583 }
9584
9585 LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9586 // For 4-byte load-and-splat, we need Power9.
9587 if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9588 uint64_t Offset = 0;
9589 if (IsFourByte)
9590 Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9591 else
9592 Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9593
9594 SDValue BasePtr = LD->getBasePtr();
9595 if (Offset != 0)
9596 BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9597 BasePtr, DAG.getIntPtrConstant(Offset, dl));
9598 SDValue Ops[] = {
9599 LD->getChain(), // Chain
9600 BasePtr, // BasePtr
9601 DAG.getValueType(Op.getValueType()) // VT
9602 };
9603 SDVTList VTL =
9604 DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9605 SDValue LdSplt =
9606 DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9607 Ops, LD->getMemoryVT(), LD->getMemOperand());
9608 DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
9609 if (LdSplt.getValueType() != SVOp->getValueType(0))
9610 LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9611 return LdSplt;
9612 }
9613 }
9614 if (Subtarget.hasP9Vector() &&
18
Assuming the condition is false
9615 PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9616 isLittleEndian)) {
9617 if (Swap)
9618 std::swap(V1, V2);
9619 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9620 SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9621 if (ShiftElts) {
9622 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9623 DAG.getConstant(ShiftElts, dl, MVT::i32));
9624 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9625 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9626 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9627 }
9628 SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9629 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9630 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9631 }
9632
9633 if (Subtarget.hasPrefixInstrs()) {
19
Assuming the condition is false
20
Taking false branch
9634 SDValue SplatInsertNode;
9635 if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9636 return SplatInsertNode;
9637 }
9638
9639 if (Subtarget.hasP9Altivec()) {
21
Assuming the condition is false
22
Taking false branch
9640 SDValue NewISDNode;
9641 if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9642 return NewISDNode;
9643
9644 if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9645 return NewISDNode;
9646 }
9647
9648 if (Subtarget.hasVSX() &&
23
Assuming the condition is true
55
Taking true branch
9649 PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
24
Calling 'isXXSLDWIShuffleMask'
54
Returning from 'isXXSLDWIShuffleMask'
9650 if (Swap
55.1
'Swap' is false
55.1
'Swap' is false
)
56
Taking false branch
9651 std::swap(V1, V2);
9652 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9653 SDValue Conv2 =
9654 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
57
'?' condition is false
9655
9656 SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9657 DAG.getConstant(ShiftElts, dl, MVT::i32));
58
1st function call argument is an uninitialized value
9658 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9659 }
9660
9661 if (Subtarget.hasVSX() &&
9662 PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9663 if (Swap)
9664 std::swap(V1, V2);
9665 SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9666 SDValue Conv2 =
9667 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9668
9669 SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9670 DAG.getConstant(ShiftElts, dl, MVT::i32));
9671 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9672 }
9673
9674 if (Subtarget.hasP9Vector()) {
9675 if (PPC::isXXBRHShuffleMask(SVOp)) {
9676 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9677 SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9678 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9679 } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9680 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9681 SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9682 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9683 } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9684 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9685 SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9686 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9687 } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9688 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9689 SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9690 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9691 }
9692 }
9693
9694 if (Subtarget.hasVSX()) {
9695 if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9696 int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9697
9698 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9699 SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9700 DAG.getConstant(SplatIdx, dl, MVT::i32));
9701 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9702 }
9703
9704 // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9705 if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9706 SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9707 SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9708 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9709 }
9710 }
9711
9712 // Cases that are handled by instructions that take permute immediates
9713 // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9714 // selected by the instruction selector.
9715 if (V2.isUndef()) {
9716 if (PPC::isSplatShuffleMask(SVOp, 1) ||
9717 PPC::isSplatShuffleMask(SVOp, 2) ||
9718 PPC::isSplatShuffleMask(SVOp, 4) ||
9719 PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9720 PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9721 PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9722 PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9723 PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9724 PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9725 PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9726 PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9727 PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9728 (Subtarget.hasP8Altivec() && (
9729 PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9730 PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9731 PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9732 return Op;
9733 }
9734 }
9735
9736 // Altivec has a variety of "shuffle immediates" that take two vector inputs
9737 // and produce a fixed permutation. If any of these match, do not lower to
9738 // VPERM.
9739 unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9740 if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9741 PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9742 PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9743 PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9744 PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9745 PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9746 PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9747 PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9748 PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9749 (Subtarget.hasP8Altivec() && (
9750 PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9751 PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9752 PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9753 return Op;
9754
9755 // Check to see if this is a shuffle of 4-byte values. If so, we can use our
9756 // perfect shuffle table to emit an optimal matching sequence.
9757 ArrayRef<int> PermMask = SVOp->getMask();
9758
9759 unsigned PFIndexes[4];
9760 bool isFourElementShuffle = true;
9761 for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9762 unsigned EltNo = 8; // Start out undef.
9763 for (unsigned j = 0; j != 4; ++j) { // Intra-element byte.
9764 if (PermMask[i*4+j] < 0)
9765 continue; // Undef, ignore it.
9766
9767 unsigned ByteSource = PermMask[i*4+j];
9768 if ((ByteSource & 3) != j) {
9769 isFourElementShuffle = false;
9770 break;
9771 }
9772
9773 if (EltNo == 8) {
9774 EltNo = ByteSource/4;
9775 } else if (EltNo != ByteSource/4) {
9776 isFourElementShuffle = false;
9777 break;
9778 }
9779 }
9780 PFIndexes[i] = EltNo;
9781 }
9782
9783 // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9784 // perfect shuffle vector to determine if it is cost effective to do this as
9785 // discrete instructions, or whether we should use a vperm.
9786 // For now, we skip this for little endian until such time as we have a
9787 // little-endian perfect shuffle table.
9788 if (isFourElementShuffle && !isLittleEndian) {
9789 // Compute the index in the perfect shuffle table.
9790 unsigned PFTableIndex =
9791 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9792
9793 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9794 unsigned Cost = (PFEntry >> 30);
9795
9796 // Determining when to avoid vperm is tricky. Many things affect the cost
9797 // of vperm, particularly how many times the perm mask needs to be computed.
9798 // For example, if the perm mask can be hoisted out of a loop or is already
9799 // used (perhaps because there are multiple permutes with the same shuffle
9800 // mask?) the vperm has a cost of 1. OTOH, hoisting the permute mask out of
9801 // the loop requires an extra register.
9802 //
9803 // As a compromise, we only emit discrete instructions if the shuffle can be
9804 // generated in 3 or fewer operations. When we have loop information
9805 // available, if this block is within a loop, we should avoid using vperm
9806 // for 3-operation perms and use a constant pool load instead.
9807 if (Cost < 3)
9808 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9809 }
9810
9811 // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9812 // vector that will get spilled to the constant pool.
9813 if (V2.isUndef()) V2 = V1;
9814
9815 // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9816 // that it is in input element units, not in bytes. Convert now.
9817
9818 // For little endian, the order of the input vectors is reversed, and
9819 // the permutation mask is complemented with respect to 31. This is
9820 // necessary to produce proper semantics with the big-endian-biased vperm
9821 // instruction.
9822 EVT EltVT = V1.getValueType().getVectorElementType();
9823 unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9824
9825 SmallVector<SDValue, 16> ResultMask;
9826 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9827 unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9828
9829 for (unsigned j = 0; j != BytesPerElement; ++j)
9830 if (isLittleEndian)
9831 ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9832 dl, MVT::i32));
9833 else
9834 ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9835 MVT::i32));
9836 }
9837
9838 ShufflesHandledWithVPERM++;
9839 SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9840 LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("ppc-lowering")) { dbgs() << "Emitting a VPERM for the following shuffle:\n"
; } } while (false)
;
9841 LLVM_DEBUG(SVOp->dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("ppc-lowering")) { SVOp->dump(); } } while (false)
;
9842 LLVM_DEBUG(dbgs() << "With the following permute control vector:\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("ppc-lowering")) { dbgs() << "With the following permute control vector:\n"
; } } while (false)
;
9843 LLVM_DEBUG(VPermMask.dump())do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("ppc-lowering")) { VPermMask.dump(); } } while (false)
;
9844
9845 if (isLittleEndian)
9846 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9847 V2, V1, VPermMask);
9848 else
9849 return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9850 V1, V2, VPermMask);
9851}
9852
9853/// getVectorCompareInfo - Given an intrinsic, return false if it is not a
9854/// vector comparison. If it is, return true and fill in Opc/isDot with
9855/// information about the intrinsic.
9856static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
9857 bool &isDot, const PPCSubtarget &Subtarget) {
9858 unsigned IntrinsicID =
9859 cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
9860 CompareOpc = -1;
9861 isDot = false;
9862 switch (IntrinsicID) {
9863 default:
9864 return false;
9865 // Comparison predicates.
9866 case Intrinsic::ppc_altivec_vcmpbfp_p:
9867 CompareOpc = 966;
9868 isDot = true;
9869 break;
9870 case Intrinsic::ppc_altivec_vcmpeqfp_p:
9871 CompareOpc = 198;
9872 isDot = true;
9873 break;
9874 case Intrinsic::ppc_altivec_vcmpequb_p:
9875 CompareOpc = 6;
9876 isDot = true;
9877 break;
9878 case Intrinsic::ppc_altivec_vcmpequh_p:
9879 CompareOpc = 70;
9880 isDot = true;
9881 break;
9882 case Intrinsic::ppc_altivec_vcmpequw_p:
9883 CompareOpc = 134;
9884 isDot = true;
9885 break;
9886 case Intrinsic::ppc_altivec_vcmpequd_p:
9887 if (Subtarget.hasP8Altivec()) {
9888 CompareOpc = 199;
9889 isDot = true;
9890 } else
9891 return false;
9892 break;
9893 case Intrinsic::ppc_altivec_vcmpneb_p:
9894 case Intrinsic::ppc_altivec_vcmpneh_p:
9895 case Intrinsic::ppc_altivec_vcmpnew_p:
9896 case Intrinsic::ppc_altivec_vcmpnezb_p:
9897 case Intrinsic::ppc_altivec_vcmpnezh_p:
9898 case Intrinsic::ppc_altivec_vcmpnezw_p:
9899 if (Subtarget.hasP9Altivec()) {
9900 switch (IntrinsicID) {
9901 default:
9902 llvm_unreachable("Unknown comparison intrinsic.")::llvm::llvm_unreachable_internal("Unknown comparison intrinsic."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9902)
;
9903 case Intrinsic::ppc_altivec_vcmpneb_p:
9904 CompareOpc = 7;
9905 break;
9906 case Intrinsic::ppc_altivec_vcmpneh_p:
9907 CompareOpc = 71;
9908 break;
9909 case Intrinsic::ppc_altivec_vcmpnew_p:
9910 CompareOpc = 135;
9911 break;
9912 case Intrinsic::ppc_altivec_vcmpnezb_p:
9913 CompareOpc = 263;
9914 break;
9915 case Intrinsic::ppc_altivec_vcmpnezh_p:
9916 CompareOpc = 327;
9917 break;
9918 case Intrinsic::ppc_altivec_vcmpnezw_p:
9919 CompareOpc = 391;
9920 break;
9921 }
9922 isDot = true;
9923 } else
9924 return false;
9925 break;
9926 case Intrinsic::ppc_altivec_vcmpgefp_p:
9927 CompareOpc = 454;
9928 isDot = true;
9929 break;
9930 case Intrinsic::ppc_altivec_vcmpgtfp_p:
9931 CompareOpc = 710;
9932 isDot = true;
9933 break;
9934 case Intrinsic::ppc_altivec_vcmpgtsb_p:
9935 CompareOpc = 774;
9936 isDot = true;
9937 break;
9938 case Intrinsic::ppc_altivec_vcmpgtsh_p:
9939 CompareOpc = 838;
9940 isDot = true;
9941 break;
9942 case Intrinsic::ppc_altivec_vcmpgtsw_p:
9943 CompareOpc = 902;
9944 isDot = true;
9945 break;
9946 case Intrinsic::ppc_altivec_vcmpgtsd_p:
9947 if (Subtarget.hasP8Altivec()) {
9948 CompareOpc = 967;
9949 isDot = true;
9950 } else
9951 return false;
9952 break;
9953 case Intrinsic::ppc_altivec_vcmpgtub_p:
9954 CompareOpc = 518;
9955 isDot = true;
9956 break;
9957 case Intrinsic::ppc_altivec_vcmpgtuh_p:
9958 CompareOpc = 582;
9959 isDot = true;
9960 break;
9961 case Intrinsic::ppc_altivec_vcmpgtuw_p:
9962 CompareOpc = 646;
9963 isDot = true;
9964 break;
9965 case Intrinsic::ppc_altivec_vcmpgtud_p:
9966 if (Subtarget.hasP8Altivec()) {
9967 CompareOpc = 711;
9968 isDot = true;
9969 } else
9970 return false;
9971 break;
9972
9973 case Intrinsic::ppc_altivec_vcmpequq:
9974 case Intrinsic::ppc_altivec_vcmpgtsq:
9975 case Intrinsic::ppc_altivec_vcmpgtuq:
9976 if (!Subtarget.isISA3_1())
9977 return false;
9978 switch (IntrinsicID) {
9979 default:
9980 llvm_unreachable("Unknown comparison intrinsic.")::llvm::llvm_unreachable_internal("Unknown comparison intrinsic."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 9980)
;
9981 case Intrinsic::ppc_altivec_vcmpequq:
9982 CompareOpc = 455;
9983 break;
9984 case Intrinsic::ppc_altivec_vcmpgtsq:
9985 CompareOpc = 903;
9986 break;
9987 case Intrinsic::ppc_altivec_vcmpgtuq:
9988 CompareOpc = 647;
9989 break;
9990 }
9991 break;
9992
9993 // VSX predicate comparisons use the same infrastructure
9994 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
9995 case Intrinsic::ppc_vsx_xvcmpgedp_p:
9996 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
9997 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
9998 case Intrinsic::ppc_vsx_xvcmpgesp_p:
9999 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10000 if (Subtarget.hasVSX()) {
10001 switch (IntrinsicID) {
10002 case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10003 CompareOpc = 99;
10004 break;
10005 case Intrinsic::ppc_vsx_xvcmpgedp_p:
10006 CompareOpc = 115;
10007 break;
10008 case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10009 CompareOpc = 107;
10010 break;
10011 case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10012 CompareOpc = 67;
10013 break;
10014 case Intrinsic::ppc_vsx_xvcmpgesp_p:
10015 CompareOpc = 83;
10016 break;
10017 case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10018 CompareOpc = 75;
10019 break;
10020 }
10021 isDot = true;
10022 } else
10023 return false;
10024 break;
10025
10026 // Normal Comparisons.
10027 case Intrinsic::ppc_altivec_vcmpbfp:
10028 CompareOpc = 966;
10029 break;
10030 case Intrinsic::ppc_altivec_vcmpeqfp:
10031 CompareOpc = 198;
10032 break;
10033 case Intrinsic::ppc_altivec_vcmpequb:
10034 CompareOpc = 6;
10035 break;
10036 case Intrinsic::ppc_altivec_vcmpequh:
10037 CompareOpc = 70;
10038 break;
10039 case Intrinsic::ppc_altivec_vcmpequw:
10040 CompareOpc = 134;
10041 break;
10042 case Intrinsic::ppc_altivec_vcmpequd:
10043 if (Subtarget.hasP8Altivec())
10044 CompareOpc = 199;
10045 else
10046 return false;
10047 break;
10048 case Intrinsic::ppc_altivec_vcmpneb:
10049 case Intrinsic::ppc_altivec_vcmpneh:
10050 case Intrinsic::ppc_altivec_vcmpnew:
10051 case Intrinsic::ppc_altivec_vcmpnezb:
10052 case Intrinsic::ppc_altivec_vcmpnezh:
10053 case Intrinsic::ppc_altivec_vcmpnezw:
10054 if (Subtarget.hasP9Altivec())
10055 switch (IntrinsicID) {
10056 default:
10057 llvm_unreachable("Unknown comparison intrinsic.")::llvm::llvm_unreachable_internal("Unknown comparison intrinsic."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10057)
;
10058 case Intrinsic::ppc_altivec_vcmpneb:
10059 CompareOpc = 7;
10060 break;
10061 case Intrinsic::ppc_altivec_vcmpneh:
10062 CompareOpc = 71;
10063 break;
10064 case Intrinsic::ppc_altivec_vcmpnew:
10065 CompareOpc = 135;
10066 break;
10067 case Intrinsic::ppc_altivec_vcmpnezb:
10068 CompareOpc = 263;
10069 break;
10070 case Intrinsic::ppc_altivec_vcmpnezh:
10071 CompareOpc = 327;
10072 break;
10073 case Intrinsic::ppc_altivec_vcmpnezw:
10074 CompareOpc = 391;
10075 break;
10076 }
10077 else
10078 return false;
10079 break;
10080 case Intrinsic::ppc_altivec_vcmpgefp:
10081 CompareOpc = 454;
10082 break;
10083 case Intrinsic::ppc_altivec_vcmpgtfp:
10084 CompareOpc = 710;
10085 break;
10086 case Intrinsic::ppc_altivec_vcmpgtsb:
10087 CompareOpc = 774;
10088 break;
10089 case Intrinsic::ppc_altivec_vcmpgtsh:
10090 CompareOpc = 838;
10091 break;
10092 case Intrinsic::ppc_altivec_vcmpgtsw:
10093 CompareOpc = 902;
10094 break;
10095 case Intrinsic::ppc_altivec_vcmpgtsd:
10096 if (Subtarget.hasP8Altivec())
10097 CompareOpc = 967;
10098 else
10099 return false;
10100 break;
10101 case Intrinsic::ppc_altivec_vcmpgtub:
10102 CompareOpc = 518;
10103 break;
10104 case Intrinsic::ppc_altivec_vcmpgtuh:
10105 CompareOpc = 582;
10106 break;
10107 case Intrinsic::ppc_altivec_vcmpgtuw:
10108 CompareOpc = 646;
10109 break;
10110 case Intrinsic::ppc_altivec_vcmpgtud:
10111 if (Subtarget.hasP8Altivec())
10112 CompareOpc = 711;
10113 else
10114 return false;
10115 break;
10116 case Intrinsic::ppc_altivec_vcmpequq_p:
10117 case Intrinsic::ppc_altivec_vcmpgtsq_p:
10118 case Intrinsic::ppc_altivec_vcmpgtuq_p:
10119 if (!Subtarget.isISA3_1())
10120 return false;
10121 switch (IntrinsicID) {
10122 default:
10123 llvm_unreachable("Unknown comparison intrinsic.")::llvm::llvm_unreachable_internal("Unknown comparison intrinsic."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10123)
;
10124 case Intrinsic::ppc_altivec_vcmpequq_p:
10125 CompareOpc = 455;
10126 break;
10127 case Intrinsic::ppc_altivec_vcmpgtsq_p:
10128 CompareOpc = 903;
10129 break;
10130 case Intrinsic::ppc_altivec_vcmpgtuq_p:
10131 CompareOpc = 647;
10132 break;
10133 }
10134 isDot = true;
10135 break;
10136 }
10137 return true;
10138}
10139
10140/// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10141/// lower, do it, otherwise return null.
10142SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10143 SelectionDAG &DAG) const {
10144 unsigned IntrinsicID =
10145 cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10146
10147 SDLoc dl(Op);
10148
10149 switch (IntrinsicID) {
10150 case Intrinsic::thread_pointer:
10151 // Reads the thread pointer register, used for __builtin_thread_pointer.
10152 if (Subtarget.isPPC64())
10153 return DAG.getRegister(PPC::X13, MVT::i64);
10154 return DAG.getRegister(PPC::R2, MVT::i32);
10155
10156 case Intrinsic::ppc_mma_disassemble_acc:
10157 case Intrinsic::ppc_vsx_disassemble_pair: {
10158 int NumVecs = 2;
10159 SDValue WideVec = Op.getOperand(1);
10160 if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
10161 NumVecs = 4;
10162 WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec);
10163 }
10164 SmallVector<SDValue, 4> RetOps;
10165 for (int VecNo = 0; VecNo < NumVecs; VecNo++) {
10166 SDValue Extract = DAG.getNode(
10167 PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec,
10168 DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
10169 : VecNo,
10170 dl, getPointerTy(DAG.getDataLayout())));
10171 RetOps.push_back(Extract);
10172 }
10173 return DAG.getMergeValues(RetOps, dl);
10174 }
10175 }
10176
10177 // If this is a lowered altivec predicate compare, CompareOpc is set to the
10178 // opcode number of the comparison.
10179 int CompareOpc;
10180 bool isDot;
10181 if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10182 return SDValue(); // Don't custom lower most intrinsics.
10183
10184 // If this is a non-dot comparison, make the VCMP node and we are done.
10185 if (!isDot) {
10186 SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10187 Op.getOperand(1), Op.getOperand(2),
10188 DAG.getConstant(CompareOpc, dl, MVT::i32));
10189 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10190 }
10191
10192 // Create the PPCISD altivec 'dot' comparison node.
10193 SDValue Ops[] = {
10194 Op.getOperand(2), // LHS
10195 Op.getOperand(3), // RHS
10196 DAG.getConstant(CompareOpc, dl, MVT::i32)
10197 };
10198 EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10199 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
10200
10201 // Now that we have the comparison, emit a copy from the CR to a GPR.
10202 // This is flagged to the above dot comparison.
10203 SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10204 DAG.getRegister(PPC::CR6, MVT::i32),
10205 CompNode.getValue(1));
10206
10207 // Unpack the result based on how the target uses it.
10208 unsigned BitNo; // Bit # of CR6.
10209 bool InvertBit; // Invert result?
10210 switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10211 default: // Can't happen, don't crash on invalid number though.
10212 case 0: // Return the value of the EQ bit of CR6.
10213 BitNo = 0; InvertBit = false;
10214 break;
10215 case 1: // Return the inverted value of the EQ bit of CR6.
10216 BitNo = 0; InvertBit = true;
10217 break;
10218 case 2: // Return the value of the LT bit of CR6.
10219 BitNo = 2; InvertBit = false;
10220 break;
10221 case 3: // Return the inverted value of the LT bit of CR6.
10222 BitNo = 2; InvertBit = true;
10223 break;
10224 }
10225
10226 // Shift the bit into the low position.
10227 Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10228 DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10229 // Isolate the bit.
10230 Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10231 DAG.getConstant(1, dl, MVT::i32));
10232
10233 // If we are supposed to, toggle the bit.
10234 if (InvertBit)
10235 Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10236 DAG.getConstant(1, dl, MVT::i32));
10237 return Flags;
10238}
10239
10240SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10241 SelectionDAG &DAG) const {
10242 // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10243 // the beginning of the argument list.
10244 int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10245 SDLoc DL(Op);
10246 switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10247 case Intrinsic::ppc_cfence: {
10248 assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.")((ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument."
) ? static_cast<void> (0) : __assert_fail ("ArgStart == 1 && \"llvm.ppc.cfence must carry a chain argument.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10248, __PRETTY_FUNCTION__))
;
10249 assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.")((Subtarget.isPPC64() && "Only 64-bit is supported for now."
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isPPC64() && \"Only 64-bit is supported for now.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10249, __PRETTY_FUNCTION__))
;
10250 return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10251 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10252 Op.getOperand(ArgStart + 1)),
10253 Op.getOperand(0)),
10254 0);
10255 }
10256 default:
10257 break;
10258 }
10259 return SDValue();
10260}
10261
10262// Lower scalar BSWAP64 to xxbrd.
10263SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10264 SDLoc dl(Op);
10265 // MTVSRDD
10266 Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10267 Op.getOperand(0));
10268 // XXBRD
10269 Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10270 // MFVSRD
10271 int VectorIndex = 0;
10272 if (Subtarget.isLittleEndian())
10273 VectorIndex = 1;
10274 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10275 DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10276 return Op;
10277}
10278
10279// ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10280// compared to a value that is atomically loaded (atomic loads zero-extend).
10281SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10282 SelectionDAG &DAG) const {
10283 assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&((Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && "Expecting an atomic compare-and-swap here."
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && \"Expecting an atomic compare-and-swap here.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10284, __PRETTY_FUNCTION__))
10284 "Expecting an atomic compare-and-swap here.")((Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && "Expecting an atomic compare-and-swap here."
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::ATOMIC_CMP_SWAP && \"Expecting an atomic compare-and-swap here.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10284, __PRETTY_FUNCTION__))
;
10285 SDLoc dl(Op);
10286 auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10287 EVT MemVT = AtomicNode->getMemoryVT();
10288 if (MemVT.getSizeInBits() >= 32)
10289 return Op;
10290
10291 SDValue CmpOp = Op.getOperand(2);
10292 // If this is already correctly zero-extended, leave it alone.
10293 auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10294 if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10295 return Op;
10296
10297 // Clear the high bits of the compare operand.
10298 unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10299 SDValue NewCmpOp =
10300 DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10301 DAG.getConstant(MaskVal, dl, MVT::i32));
10302
10303 // Replace the existing compare operand with the properly zero-extended one.
10304 SmallVector<SDValue, 4> Ops;
10305 for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10306 Ops.push_back(AtomicNode->getOperand(i));
10307 Ops[2] = NewCmpOp;
10308 MachineMemOperand *MMO = AtomicNode->getMemOperand();
10309 SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10310 auto NodeTy =
10311 (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10312 return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10313}
10314
10315SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10316 SelectionDAG &DAG) const {
10317 SDLoc dl(Op);
10318 // Create a stack slot that is 16-byte aligned.
10319 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10320 int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10321 EVT PtrVT = getPointerTy(DAG.getDataLayout());
10322 SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10323
10324 // Store the input value into Value#0 of the stack slot.
10325 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10326 MachinePointerInfo());
10327 // Load it out.
10328 return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10329}
10330
10331SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10332 SelectionDAG &DAG) const {
10333 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&((Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Should only be called for ISD::INSERT_VECTOR_ELT"
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::INSERT_VECTOR_ELT && \"Should only be called for ISD::INSERT_VECTOR_ELT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10334, __PRETTY_FUNCTION__))
10334 "Should only be called for ISD::INSERT_VECTOR_ELT")((Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Should only be called for ISD::INSERT_VECTOR_ELT"
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::INSERT_VECTOR_ELT && \"Should only be called for ISD::INSERT_VECTOR_ELT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10334, __PRETTY_FUNCTION__))
;
10335
10336 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10337
10338 EVT VT = Op.getValueType();
10339 SDLoc dl(Op);
10340 SDValue V1 = Op.getOperand(0);
10341 SDValue V2 = Op.getOperand(1);
10342 SDValue V3 = Op.getOperand(2);
10343
10344 if (Subtarget.isISA3_1()) {
10345 // On P10, we have legal lowering for constant and variable indices for
10346 // integer vectors.
10347 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
10348 VT == MVT::v2i64)
10349 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3);
10350 // For f32 and f64 vectors, we have legal lowering for variable indices.
10351 // For f32 we also have legal lowering when the element is loaded from
10352 // memory.
10353 if (VT == MVT::v4f32 || VT == MVT::v2f64) {
10354 if (!C || (VT == MVT::v4f32 && dyn_cast<LoadSDNode>(V2)))
10355 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3);
10356 return SDValue();
10357 }
10358 }
10359
10360 // Before P10, we have legal lowering for constant indices but not for
10361 // variable ones.
10362 if (!C)
10363 return SDValue();
10364
10365 // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10366 if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10367 SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10368 unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10369 unsigned InsertAtElement = C->getZExtValue();
10370 unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10371 if (Subtarget.isLittleEndian()) {
10372 InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10373 }
10374 return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10375 DAG.getConstant(InsertAtByte, dl, MVT::i32));
10376 }
10377 return Op;
10378}
10379
10380SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10381 SelectionDAG &DAG) const {
10382 SDLoc dl(Op);
10383 LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10384 SDValue LoadChain = LN->getChain();
10385 SDValue BasePtr = LN->getBasePtr();
10386 EVT VT = Op.getValueType();
10387
10388 if (VT != MVT::v256i1 && VT != MVT::v512i1)
10389 return Op;
10390
10391 // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10392 // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in
10393 // 2 or 4 vsx registers.
10394 assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&(((VT != MVT::v512i1 || Subtarget.hasMMA()) && "Type unsupported without MMA"
) ? static_cast<void> (0) : __assert_fail ("(VT != MVT::v512i1 || Subtarget.hasMMA()) && \"Type unsupported without MMA\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10395, __PRETTY_FUNCTION__))
10395 "Type unsupported without MMA")(((VT != MVT::v512i1 || Subtarget.hasMMA()) && "Type unsupported without MMA"
) ? static_cast<void> (0) : __assert_fail ("(VT != MVT::v512i1 || Subtarget.hasMMA()) && \"Type unsupported without MMA\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10395, __PRETTY_FUNCTION__))
;
10396 assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&(((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
"Type unsupported without paired vector support") ? static_cast
<void> (0) : __assert_fail ("(VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && \"Type unsupported without paired vector support\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10397, __PRETTY_FUNCTION__))
10397 "Type unsupported without paired vector support")(((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
"Type unsupported without paired vector support") ? static_cast
<void> (0) : __assert_fail ("(VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && \"Type unsupported without paired vector support\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10397, __PRETTY_FUNCTION__))
;
10398 Align Alignment = LN->getAlign();
10399 SmallVector<SDValue, 4> Loads;
10400 SmallVector<SDValue, 4> LoadChains;
10401 unsigned NumVecs = VT.getSizeInBits() / 128;
10402 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10403 SDValue Load =
10404 DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
10405 LN->getPointerInfo().getWithOffset(Idx * 16),
10406 commonAlignment(Alignment, Idx * 16),
10407 LN->getMemOperand()->getFlags(), LN->getAAInfo());
10408 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10409 DAG.getConstant(16, dl, BasePtr.getValueType()));
10410 Loads.push_back(Load);
10411 LoadChains.push_back(Load.getValue(1));
10412 }
10413 if (Subtarget.isLittleEndian()) {
10414 std::reverse(Loads.begin(), Loads.end());
10415 std::reverse(LoadChains.begin(), LoadChains.end());
10416 }
10417 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10418 SDValue Value =
10419 DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD,
10420 dl, VT, Loads);
10421 SDValue RetOps[] = {Value, TF};
10422 return DAG.getMergeValues(RetOps, dl);
10423}
10424
10425SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10426 SelectionDAG &DAG) const {
10427 SDLoc dl(Op);
10428 StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10429 SDValue StoreChain = SN->getChain();
10430 SDValue BasePtr = SN->getBasePtr();
10431 SDValue Value = SN->getValue();
10432 EVT StoreVT = Value.getValueType();
10433
10434 if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
10435 return Op;
10436
10437 // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10438 // Here we create 2 or 4 v16i8 stores to store the pair or accumulator
10439 // underlying registers individually.
10440 assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&(((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) && "Type unsupported without MMA"
) ? static_cast<void> (0) : __assert_fail ("(StoreVT != MVT::v512i1 || Subtarget.hasMMA()) && \"Type unsupported without MMA\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10441, __PRETTY_FUNCTION__))
10441 "Type unsupported without MMA")(((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) && "Type unsupported without MMA"
) ? static_cast<void> (0) : __assert_fail ("(StoreVT != MVT::v512i1 || Subtarget.hasMMA()) && \"Type unsupported without MMA\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10441, __PRETTY_FUNCTION__))
;
10442 assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&(((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
"Type unsupported without paired vector support") ? static_cast
<void> (0) : __assert_fail ("(StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && \"Type unsupported without paired vector support\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10443, __PRETTY_FUNCTION__))
10443 "Type unsupported without paired vector support")(((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
"Type unsupported without paired vector support") ? static_cast
<void> (0) : __assert_fail ("(StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) && \"Type unsupported without paired vector support\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10443, __PRETTY_FUNCTION__))
;
10444 Align Alignment = SN->getAlign();
10445 SmallVector<SDValue, 4> Stores;
10446 unsigned NumVecs = 2;
10447 if (StoreVT == MVT::v512i1) {
10448 Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value);
10449 NumVecs = 4;
10450 }
10451 for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10452 unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
10453 SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value,
10454 DAG.getConstant(VecNum, dl, getPointerTy(DAG.getDataLayout())));
10455 SDValue Store =
10456 DAG.getStore(StoreChain, dl, Elt, BasePtr,
10457 SN->getPointerInfo().getWithOffset(Idx * 16),
10458 commonAlignment(Alignment, Idx * 16),
10459 SN->getMemOperand()->getFlags(), SN->getAAInfo());
10460 BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10461 DAG.getConstant(16, dl, BasePtr.getValueType()));
10462 Stores.push_back(Store);
10463 }
10464 SDValue TF = DAG.getTokenFactor(dl, Stores);
10465 return TF;
10466}
10467
10468SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10469 SDLoc dl(Op);
10470 if (Op.getValueType() == MVT::v4i32) {
10471 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10472
10473 SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10474 // +16 as shift amt.
10475 SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10476 SDValue RHSSwap = // = vrlw RHS, 16
10477 BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10478
10479 // Shrinkify inputs to v8i16.
10480 LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10481 RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10482 RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10483
10484 // Low parts multiplied together, generating 32-bit results (we ignore the
10485 // top parts).
10486 SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10487 LHS, RHS, DAG, dl, MVT::v4i32);
10488
10489 SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10490 LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10491 // Shift the high parts up 16 bits.
10492 HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10493 Neg16, DAG, dl);
10494 return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10495 } else if (Op.getValueType() == MVT::v16i8) {
10496 SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10497 bool isLittleEndian = Subtarget.isLittleEndian();
10498
10499 // Multiply the even 8-bit parts, producing 16-bit sums.
10500 SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10501 LHS, RHS, DAG, dl, MVT::v8i16);
10502 EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10503
10504 // Multiply the odd 8-bit parts, producing 16-bit sums.
10505 SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10506 LHS, RHS, DAG, dl, MVT::v8i16);
10507 OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10508
10509 // Merge the results together. Because vmuleub and vmuloub are
10510 // instructions with a big-endian bias, we must reverse the
10511 // element numbering and reverse the meaning of "odd" and "even"
10512 // when generating little endian code.
10513 int Ops[16];
10514 for (unsigned i = 0; i != 8; ++i) {
10515 if (isLittleEndian) {
10516 Ops[i*2 ] = 2*i;
10517 Ops[i*2+1] = 2*i+16;
10518 } else {
10519 Ops[i*2 ] = 2*i+1;
10520 Ops[i*2+1] = 2*i+1+16;
10521 }
10522 }
10523 if (isLittleEndian)
10524 return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10525 else
10526 return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10527 } else {
10528 llvm_unreachable("Unknown mul to lower!")::llvm::llvm_unreachable_internal("Unknown mul to lower!", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10528)
;
10529 }
10530}
10531
10532SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
10533 bool IsStrict = Op->isStrictFPOpcode();
10534 if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
10535 !Subtarget.hasP9Vector())
10536 return SDValue();
10537
10538 return Op;
10539}
10540
10541// Custom lowering for fpext vf32 to v2f64
10542SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10543
10544 assert(Op.getOpcode() == ISD::FP_EXTEND &&((Op.getOpcode() == ISD::FP_EXTEND && "Should only be called for ISD::FP_EXTEND"
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::FP_EXTEND && \"Should only be called for ISD::FP_EXTEND\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10545, __PRETTY_FUNCTION__))
10545 "Should only be called for ISD::FP_EXTEND")((Op.getOpcode() == ISD::FP_EXTEND && "Should only be called for ISD::FP_EXTEND"
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::FP_EXTEND && \"Should only be called for ISD::FP_EXTEND\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10545, __PRETTY_FUNCTION__))
;
10546
10547 // FIXME: handle extends from half precision float vectors on P9.
10548 // We only want to custom lower an extend from v2f32 to v2f64.
10549 if (Op.getValueType() != MVT::v2f64 ||
10550 Op.getOperand(0).getValueType() != MVT::v2f32)
10551 return SDValue();
10552
10553 SDLoc dl(Op);
10554 SDValue Op0 = Op.getOperand(0);
10555
10556 switch (Op0.getOpcode()) {
10557 default:
10558 return SDValue();
10559 case ISD::EXTRACT_SUBVECTOR: {
10560 assert(Op0.getNumOperands() == 2 &&((Op0.getNumOperands() == 2 && isa<ConstantSDNode>
(Op0->getOperand(1)) && "Node should have 2 operands with second one being a constant!"
) ? static_cast<void> (0) : __assert_fail ("Op0.getNumOperands() == 2 && isa<ConstantSDNode>(Op0->getOperand(1)) && \"Node should have 2 operands with second one being a constant!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10562, __PRETTY_FUNCTION__))
10561 isa<ConstantSDNode>(Op0->getOperand(1)) &&((Op0.getNumOperands() == 2 && isa<ConstantSDNode>
(Op0->getOperand(1)) && "Node should have 2 operands with second one being a constant!"
) ? static_cast<void> (0) : __assert_fail ("Op0.getNumOperands() == 2 && isa<ConstantSDNode>(Op0->getOperand(1)) && \"Node should have 2 operands with second one being a constant!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10562, __PRETTY_FUNCTION__))
10562 "Node should have 2 operands with second one being a constant!")((Op0.getNumOperands() == 2 && isa<ConstantSDNode>
(Op0->getOperand(1)) && "Node should have 2 operands with second one being a constant!"
) ? static_cast<void> (0) : __assert_fail ("Op0.getNumOperands() == 2 && isa<ConstantSDNode>(Op0->getOperand(1)) && \"Node should have 2 operands with second one being a constant!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10562, __PRETTY_FUNCTION__))
;
10563
10564 if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10565 return SDValue();
10566
10567 // Custom lower is only done for high or low doubleword.
10568 int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10569 if (Idx % 2 != 0)
10570 return SDValue();
10571
10572 // Since input is v4f32, at this point Idx is either 0 or 2.
10573 // Shift to get the doubleword position we want.
10574 int DWord = Idx >> 1;
10575
10576 // High and low word positions are different on little endian.
10577 if (Subtarget.isLittleEndian())
10578 DWord ^= 0x1;
10579
10580 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10581 Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10582 }
10583 case ISD::FADD:
10584 case ISD::FMUL:
10585 case ISD::FSUB: {
10586 SDValue NewLoad[2];
10587 for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10588 // Ensure both input are loads.
10589 SDValue LdOp = Op0.getOperand(i);
10590 if (LdOp.getOpcode() != ISD::LOAD)
10591 return SDValue();
10592 // Generate new load node.
10593 LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10594 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10595 NewLoad[i] = DAG.getMemIntrinsicNode(
10596 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10597 LD->getMemoryVT(), LD->getMemOperand());
10598 }
10599 SDValue NewOp =
10600 DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10601 NewLoad[1], Op0.getNode()->getFlags());
10602 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10603 DAG.getConstant(0, dl, MVT::i32));
10604 }
10605 case ISD::LOAD: {
10606 LoadSDNode *LD = cast<LoadSDNode>(Op0);
10607 SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10608 SDValue NewLd = DAG.getMemIntrinsicNode(
10609 PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10610 LD->getMemoryVT(), LD->getMemOperand());
10611 return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10612 DAG.getConstant(0, dl, MVT::i32));
10613 }
10614 }
10615 llvm_unreachable("ERROR:Should return for all cases within swtich.")::llvm::llvm_unreachable_internal("ERROR:Should return for all cases within swtich."
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10615)
;
10616}
10617
10618/// LowerOperation - Provide custom lowering hooks for some operations.
10619///
10620SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10621 switch (Op.getOpcode()) {
10622 default: llvm_unreachable("Wasn't expecting to be able to lower this!")::llvm::llvm_unreachable_internal("Wasn't expecting to be able to lower this!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10622)
;
10623 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
10624 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
10625 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
10626 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
10627 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
10628 case ISD::STRICT_FSETCC:
10629 case ISD::STRICT_FSETCCS:
10630 case ISD::SETCC: return LowerSETCC(Op, DAG);
10631 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
10632 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
10633
10634 // Variable argument lowering.
10635 case ISD::VASTART: return LowerVASTART(Op, DAG);
10636 case ISD::VAARG: return LowerVAARG(Op, DAG);
10637 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
10638
10639 case ISD::STACKRESTORE: return LowerSTACKRESTORE(Op, DAG);
10640 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10641 case ISD::GET_DYNAMIC_AREA_OFFSET:
10642 return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10643
10644 // Exception handling lowering.
10645 case ISD::EH_DWARF_CFA: return LowerEH_DWARF_CFA(Op, DAG);
10646 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
10647 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
10648
10649 case ISD::LOAD: return LowerLOAD(Op, DAG);
10650 case ISD::STORE: return LowerSTORE(Op, DAG);
10651 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
10652 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
10653 case ISD::STRICT_FP_TO_UINT:
10654 case ISD::STRICT_FP_TO_SINT:
10655 case ISD::FP_TO_UINT:
10656 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10657 case ISD::STRICT_UINT_TO_FP:
10658 case ISD::STRICT_SINT_TO_FP:
10659 case ISD::UINT_TO_FP:
10660 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
10661 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
10662
10663 // Lower 64-bit shifts.
10664 case ISD::SHL_PARTS: return LowerSHL_PARTS(Op, DAG);
10665 case ISD::SRL_PARTS: return LowerSRL_PARTS(Op, DAG);
10666 case ISD::SRA_PARTS: return LowerSRA_PARTS(Op, DAG);
10667
10668 case ISD::FSHL: return LowerFunnelShift(Op, DAG);
10669 case ISD::FSHR: return LowerFunnelShift(Op, DAG);
10670
10671 // Vector-related lowering.
10672 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
10673 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
10674 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10675 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
10676 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
10677 case ISD::MUL: return LowerMUL(Op, DAG);
10678 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
10679 case ISD::STRICT_FP_ROUND:
10680 case ISD::FP_ROUND:
10681 return LowerFP_ROUND(Op, DAG);
10682 case ISD::ROTL: return LowerROTL(Op, DAG);
10683
10684 // For counter-based loop handling.
10685 case ISD::INTRINSIC_W_CHAIN: return SDValue();
10686
10687 case ISD::BITCAST: return LowerBITCAST(Op, DAG);
10688
10689 // Frame & Return address.
10690 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
10691 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
10692
10693 case ISD::INTRINSIC_VOID:
10694 return LowerINTRINSIC_VOID(Op, DAG);
10695 case ISD::BSWAP:
10696 return LowerBSWAP(Op, DAG);
10697 case ISD::ATOMIC_CMP_SWAP:
10698 return LowerATOMIC_CMP_SWAP(Op, DAG);
10699 }
10700}
10701
10702void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10703 SmallVectorImpl<SDValue>&Results,
10704 SelectionDAG &DAG) const {
10705 SDLoc dl(N);
10706 switch (N->getOpcode()) {
10707 default:
10708 llvm_unreachable("Do not know how to custom type legalize this operation!")::llvm::llvm_unreachable_internal("Do not know how to custom type legalize this operation!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10708)
;
10709 case ISD::READCYCLECOUNTER: {
10710 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10711 SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10712
10713 Results.push_back(
10714 DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
10715 Results.push_back(RTB.getValue(2));
10716 break;
10717 }
10718 case ISD::INTRINSIC_W_CHAIN: {
10719 if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10720 Intrinsic::loop_decrement)
10721 break;
10722
10723 assert(N->getValueType(0) == MVT::i1 &&((N->getValueType(0) == MVT::i1 && "Unexpected result type for CTR decrement intrinsic"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i1 && \"Unexpected result type for CTR decrement intrinsic\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10724, __PRETTY_FUNCTION__))
10724 "Unexpected result type for CTR decrement intrinsic")((N->getValueType(0) == MVT::i1 && "Unexpected result type for CTR decrement intrinsic"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i1 && \"Unexpected result type for CTR decrement intrinsic\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10724, __PRETTY_FUNCTION__))
;
10725 EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10726 N->getValueType(0));
10727 SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10728 SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10729 N->getOperand(1));
10730
10731 Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10732 Results.push_back(NewInt.getValue(1));
10733 break;
10734 }
10735 case ISD::VAARG: {
10736 if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10737 return;
10738
10739 EVT VT = N->getValueType(0);
10740
10741 if (VT == MVT::i64) {
10742 SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10743
10744 Results.push_back(NewNode);
10745 Results.push_back(NewNode.getValue(1));
10746 }
10747 return;
10748 }
10749 case ISD::STRICT_FP_TO_SINT:
10750 case ISD::STRICT_FP_TO_UINT:
10751 case ISD::FP_TO_SINT:
10752 case ISD::FP_TO_UINT:
10753 // LowerFP_TO_INT() can only handle f32 and f64.
10754 if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
10755 MVT::ppcf128)
10756 return;
10757 Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10758 return;
10759 case ISD::TRUNCATE: {
10760 if (!N->getValueType(0).isVector())
10761 return;
10762 SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
10763 if (Lowered)
10764 Results.push_back(Lowered);
10765 return;
10766 }
10767 case ISD::FSHL:
10768 case ISD::FSHR:
10769 // Don't handle funnel shifts here.
10770 return;
10771 case ISD::BITCAST:
10772 // Don't handle bitcast here.
10773 return;
10774 case ISD::FP_EXTEND:
10775 SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
10776 if (Lowered)
10777 Results.push_back(Lowered);
10778 return;
10779 }
10780}
10781
10782//===----------------------------------------------------------------------===//
10783// Other Lowering Code
10784//===----------------------------------------------------------------------===//
10785
10786static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10787 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10788 Function *Func = Intrinsic::getDeclaration(M, Id);
10789 return Builder.CreateCall(Func, {});
10790}
10791
10792// The mappings for emitLeading/TrailingFence is taken from
10793// http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
10794Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10795 Instruction *Inst,
10796 AtomicOrdering Ord) const {
10797 if (Ord == AtomicOrdering::SequentiallyConsistent)
10798 return callIntrinsic(Builder, Intrinsic::ppc_sync);
10799 if (isReleaseOrStronger(Ord))
10800 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10801 return nullptr;
10802}
10803
10804Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10805 Instruction *Inst,
10806 AtomicOrdering Ord) const {
10807 if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10808 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10809 // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10810 // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10811 if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10812 return Builder.CreateCall(
10813 Intrinsic::getDeclaration(
10814 Builder.GetInsertBlock()->getParent()->getParent(),
10815 Intrinsic::ppc_cfence, {Inst->getType()}),
10816 {Inst});
10817 // FIXME: Can use isync for rmw operation.
10818 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10819 }
10820 return nullptr;
10821}
10822
10823MachineBasicBlock *
10824PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10825 unsigned AtomicSize,
10826 unsigned BinOpcode,
10827 unsigned CmpOpcode,
10828 unsigned CmpPred) const {
10829 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10830 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10831
10832 auto LoadMnemonic = PPC::LDARX;
10833 auto StoreMnemonic = PPC::STDCX;
10834 switch (AtomicSize) {
10835 default:
10836 llvm_unreachable("Unexpected size of atomic entity")::llvm::llvm_unreachable_internal("Unexpected size of atomic entity"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10836)
;
10837 case 1:
10838 LoadMnemonic = PPC::LBARX;
10839 StoreMnemonic = PPC::STBCX;
10840 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4")((Subtarget.hasPartwordAtomics() && "Call this only with size >=4"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasPartwordAtomics() && \"Call this only with size >=4\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10840, __PRETTY_FUNCTION__))
;
10841 break;
10842 case 2:
10843 LoadMnemonic = PPC::LHARX;
10844 StoreMnemonic = PPC::STHCX;
10845 assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4")((Subtarget.hasPartwordAtomics() && "Call this only with size >=4"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasPartwordAtomics() && \"Call this only with size >=4\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 10845, __PRETTY_FUNCTION__))
;
10846 break;
10847 case 4:
10848 LoadMnemonic = PPC::LWARX;
10849 StoreMnemonic = PPC::STWCX;
10850 break;
10851 case 8:
10852 LoadMnemonic = PPC::LDARX;
10853 StoreMnemonic = PPC::STDCX;
10854 break;
10855 }
10856
10857 const BasicBlock *LLVM_BB = BB->getBasicBlock();
10858 MachineFunction *F = BB->getParent();
10859 MachineFunction::iterator It = ++BB->getIterator();
10860
10861 Register dest = MI.getOperand(0).getReg();
10862 Register ptrA = MI.getOperand(1).getReg();
10863 Register ptrB = MI.getOperand(2).getReg();
10864 Register incr = MI.getOperand(3).getReg();
10865 DebugLoc dl = MI.getDebugLoc();
10866
10867 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
10868 MachineBasicBlock *loop2MBB =
10869 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
10870 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
10871 F->insert(It, loopMBB);
10872 if (CmpOpcode)
10873 F->insert(It, loop2MBB);
10874 F->insert(It, exitMBB);
10875 exitMBB->splice(exitMBB->begin(), BB,
10876 std::next(MachineBasicBlock::iterator(MI)), BB->end());
10877 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10878
10879 MachineRegisterInfo &RegInfo = F->getRegInfo();
10880 Register TmpReg = (!BinOpcode) ? incr :
10881 RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
10882 : &PPC::GPRCRegClass);
10883
10884 // thisMBB:
10885 // ...
10886 // fallthrough --> loopMBB
10887 BB->addSuccessor(loopMBB);
10888
10889 // loopMBB:
10890 // l[wd]arx dest, ptr
10891 // add r0, dest, incr
10892 // st[wd]cx. r0, ptr
10893 // bne- loopMBB
10894 // fallthrough --> exitMBB
10895
10896 // For max/min...
10897 // loopMBB:
10898 // l[wd]arx dest, ptr
10899 // cmpl?[wd] incr, dest
10900 // bgt exitMBB
10901 // loop2MBB:
10902 // st[wd]cx. dest, ptr
10903 // bne- loopMBB
10904 // fallthrough --> exitMBB
10905
10906 BB = loopMBB;
10907 BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
10908 .addReg(ptrA).addReg(ptrB);
10909 if (BinOpcode)
10910 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
10911 if (CmpOpcode) {
10912 // Signed comparisons of byte or halfword values must be sign-extended.
10913 if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
10914 Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
10915 BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
10916 ExtReg).addReg(dest);
10917 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10918 .addReg(incr).addReg(ExtReg);
10919 } else
10920 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
10921 .addReg(incr).addReg(dest);
10922
10923 BuildMI(BB, dl, TII->get(PPC::BCC))
10924 .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
10925 BB->addSuccessor(loop2MBB);
10926 BB->addSuccessor(exitMBB);
10927 BB = loop2MBB;
10928 }
10929 BuildMI(BB, dl, TII->get(StoreMnemonic))
10930 .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
10931 BuildMI(BB, dl, TII->get(PPC::BCC))
10932 .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
10933 BB->addSuccessor(loopMBB);
10934 BB->addSuccessor(exitMBB);
10935
10936 // exitMBB:
10937 // ...
10938 BB = exitMBB;
10939 return BB;
10940}
10941
10942static bool isSignExtended(MachineInstr &MI, const PPCInstrInfo *TII) {
10943 switch(MI.getOpcode()) {
10944 default:
10945 return false;
10946 case PPC::COPY:
10947 return TII->isSignExtended(MI);
10948 case PPC::LHA:
10949 case PPC::LHA8:
10950 case PPC::LHAU:
10951 case PPC::LHAU8:
10952 case PPC::LHAUX:
10953 case PPC::LHAUX8:
10954 case PPC::LHAX:
10955 case PPC::LHAX8:
10956 case PPC::LWA:
10957 case PPC::LWAUX:
10958 case PPC::LWAX:
10959 case PPC::LWAX_32:
10960 case PPC::LWA_32:
10961 case PPC::PLHA:
10962 case PPC::PLHA8:
10963 case PPC::PLHA8pc:
10964 case PPC::PLHApc:
10965 case PPC::PLWA:
10966 case PPC::PLWA8:
10967 case PPC::PLWA8pc:
10968 case PPC::PLWApc:
10969 case PPC::EXTSB:
10970 case PPC::EXTSB8:
10971 case PPC::EXTSB8_32_64:
10972 case PPC::EXTSB8_rec:
10973 case PPC::EXTSB_rec:
10974 case PPC::EXTSH:
10975 case PPC::EXTSH8:
10976 case PPC::EXTSH8_32_64:
10977 case PPC::EXTSH8_rec:
10978 case PPC::EXTSH_rec:
10979 case PPC::EXTSW:
10980 case PPC::EXTSWSLI:
10981 case PPC::EXTSWSLI_32_64:
10982 case PPC::EXTSWSLI_32_64_rec:
10983 case PPC::EXTSWSLI_rec:
10984 case PPC::EXTSW_32:
10985 case PPC::EXTSW_32_64:
10986 case PPC::EXTSW_32_64_rec:
10987 case PPC::EXTSW_rec:
10988 case PPC::SRAW:
10989 case PPC::SRAWI:
10990 case PPC::SRAWI_rec:
10991 case PPC::SRAW_rec:
10992 return true;
10993 }
10994 return false;
10995}
10996
10997MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
10998 MachineInstr &MI, MachineBasicBlock *BB,
10999 bool is8bit, // operation
11000 unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11001 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11002 const PPCInstrInfo *TII = Subtarget.getInstrInfo();
11003
11004 // If this is a signed comparison and the value being compared is not known
11005 // to be sign extended, sign extend it here.
11006 DebugLoc dl = MI.getDebugLoc();
11007 MachineFunction *F = BB->getParent();
11008 MachineRegisterInfo &RegInfo = F->getRegInfo();
11009 Register incr = MI.getOperand(3).getReg();
11010 bool IsSignExtended = Register::isVirtualRegister(incr) &&
11011 isSignExtended(*RegInfo.getVRegDef(incr), TII);
11012
11013 if (CmpOpcode == PPC::CMPW && !IsSignExtended) {
11014 Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11015 BuildMI(*BB, MI, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg)
11016 .addReg(MI.getOperand(3).getReg());
11017 MI.getOperand(3).setReg(ValueReg);
11018 }
11019 // If we support part-word atomic mnemonics, just use them
11020 if (Subtarget.hasPartwordAtomics())
11021 return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11022 CmpPred);
11023
11024 // In 64 bit mode we have to use 64 bits for addresses, even though the
11025 // lwarx/stwcx are 32 bits. With the 32-bit atomics we can use address
11026 // registers without caring whether they're 32 or 64, but here we're
11027 // doing actual arithmetic on the addresses.
11028 bool is64bit = Subtarget.isPPC64();
11029 bool isLittleEndian = Subtarget.isLittleEndian();
11030 unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11031
11032 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11033 MachineFunction::iterator It = ++BB->getIterator();
11034
11035 Register dest = MI.getOperand(0).getReg();
11036 Register ptrA = MI.getOperand(1).getReg();
11037 Register ptrB = MI.getOperand(2).getReg();
11038
11039 MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11040 MachineBasicBlock *loop2MBB =
11041 CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11042 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11043 F->insert(It, loopMBB);
11044 if (CmpOpcode)
11045 F->insert(It, loop2MBB);
11046 F->insert(It, exitMBB);
11047 exitMBB->splice(exitMBB->begin(), BB,
11048 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11049 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11050
11051 const TargetRegisterClass *RC =
11052 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11053 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11054
11055 Register PtrReg = RegInfo.createVirtualRegister(RC);
11056 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11057 Register ShiftReg =
11058 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11059 Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11060 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11061 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11062 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11063 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11064 Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11065 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11066 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11067 Register Ptr1Reg;
11068 Register TmpReg =
11069 (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11070
11071 // thisMBB:
11072 // ...
11073 // fallthrough --> loopMBB
11074 BB->addSuccessor(loopMBB);
11075
11076 // The 4-byte load must be aligned, while a char or short may be
11077 // anywhere in the word. Hence all this nasty bookkeeping code.
11078 // add ptr1, ptrA, ptrB [copy if ptrA==0]
11079 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11080 // xori shift, shift1, 24 [16]
11081 // rlwinm ptr, ptr1, 0, 0, 29
11082 // slw incr2, incr, shift
11083 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11084 // slw mask, mask2, shift
11085 // loopMBB:
11086 // lwarx tmpDest, ptr
11087 // add tmp, tmpDest, incr2
11088 // andc tmp2, tmpDest, mask
11089 // and tmp3, tmp, mask
11090 // or tmp4, tmp3, tmp2
11091 // stwcx. tmp4, ptr
11092 // bne- loopMBB
11093 // fallthrough --> exitMBB
11094 // srw dest, tmpDest, shift
11095 if (ptrA != ZeroReg) {
11096 Ptr1Reg = RegInfo.createVirtualRegister(RC);
11097 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11098 .addReg(ptrA)
11099 .addReg(ptrB);
11100 } else {
11101 Ptr1Reg = ptrB;
11102 }
11103 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11104 // mode.
11105 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11106 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11107 .addImm(3)
11108 .addImm(27)
11109 .addImm(is8bit ? 28 : 27);
11110 if (!isLittleEndian)
11111 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11112 .addReg(Shift1Reg)
11113 .addImm(is8bit ? 24 : 16);
11114 if (is64bit)
11115 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11116 .addReg(Ptr1Reg)
11117 .addImm(0)
11118 .addImm(61);
11119 else
11120 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11121 .addReg(Ptr1Reg)
11122 .addImm(0)
11123 .addImm(0)
11124 .addImm(29);
11125 BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11126 if (is8bit)
11127 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11128 else {
11129 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11130 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11131 .addReg(Mask3Reg)
11132 .addImm(65535);
11133 }
11134 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11135 .addReg(Mask2Reg)
11136 .addReg(ShiftReg);
11137
11138 BB = loopMBB;
11139 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11140 .addReg(ZeroReg)
11141 .addReg(PtrReg);
11142 if (BinOpcode)
11143 BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11144 .addReg(Incr2Reg)
11145 .addReg(TmpDestReg);
11146 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11147 .addReg(TmpDestReg)
11148 .addReg(MaskReg);
11149 BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11150 if (CmpOpcode) {
11151 // For unsigned comparisons, we can directly compare the shifted values.
11152 // For signed comparisons we shift and sign extend.
11153 Register SReg = RegInfo.createVirtualRegister(GPRC);
11154 BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11155 .addReg(TmpDestReg)
11156 .addReg(MaskReg);
11157 unsigned ValueReg = SReg;
11158 unsigned CmpReg = Incr2Reg;
11159 if (CmpOpcode == PPC::CMPW) {
11160 ValueReg = RegInfo.createVirtualRegister(GPRC);
11161 BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11162 .addReg(SReg)
11163 .addReg(ShiftReg);
11164 Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11165 BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11166 .addReg(ValueReg);
11167 ValueReg = ValueSReg;
11168 CmpReg = incr;
11169 }
11170 BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11171 .addReg(CmpReg)
11172 .addReg(ValueReg);
11173 BuildMI(BB, dl, TII->get(PPC::BCC))
11174 .addImm(CmpPred)
11175 .addReg(PPC::CR0)
11176 .addMBB(exitMBB);
11177 BB->addSuccessor(loop2MBB);
11178 BB->addSuccessor(exitMBB);
11179 BB = loop2MBB;
11180 }
11181 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11182 BuildMI(BB, dl, TII->get(PPC::STWCX))
11183 .addReg(Tmp4Reg)
11184 .addReg(ZeroReg)
11185 .addReg(PtrReg);
11186 BuildMI(BB, dl, TII->get(PPC::BCC))
11187 .addImm(PPC::PRED_NE)
11188 .addReg(PPC::CR0)
11189 .addMBB(loopMBB);
11190 BB->addSuccessor(loopMBB);
11191 BB->addSuccessor(exitMBB);
11192
11193 // exitMBB:
11194 // ...
11195 BB = exitMBB;
11196 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11197 .addReg(TmpDestReg)
11198 .addReg(ShiftReg);
11199 return BB;
11200}
11201
11202llvm::MachineBasicBlock *
11203PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11204 MachineBasicBlock *MBB) const {
11205 DebugLoc DL = MI.getDebugLoc();
11206 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11207 const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11208
11209 MachineFunction *MF = MBB->getParent();
11210 MachineRegisterInfo &MRI = MF->getRegInfo();
11211
11212 const BasicBlock *BB = MBB->getBasicBlock();
11213 MachineFunction::iterator I = ++MBB->getIterator();
11214
11215 Register DstReg = MI.getOperand(0).getReg();
11216 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11217 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!")((TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"
) ? static_cast<void> (0) : __assert_fail ("TRI->isTypeLegalForClass(*RC, MVT::i32) && \"Invalid destination!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11217, __PRETTY_FUNCTION__))
;
11218 Register mainDstReg = MRI.createVirtualRegister(RC);
11219 Register restoreDstReg = MRI.createVirtualRegister(RC);
11220
11221 MVT PVT = getPointerTy(MF->getDataLayout());
11222 assert((PVT == MVT::i64 || PVT == MVT::i32) &&(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"
) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11223, __PRETTY_FUNCTION__))
11223 "Invalid Pointer Size!")(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"
) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11223, __PRETTY_FUNCTION__))
;
11224 // For v = setjmp(buf), we generate
11225 //
11226 // thisMBB:
11227 // SjLjSetup mainMBB
11228 // bl mainMBB
11229 // v_restore = 1
11230 // b sinkMBB
11231 //
11232 // mainMBB:
11233 // buf[LabelOffset] = LR
11234 // v_main = 0
11235 //
11236 // sinkMBB:
11237 // v = phi(main, restore)
11238 //
11239
11240 MachineBasicBlock *thisMBB = MBB;
11241 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11242 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11243 MF->insert(I, mainMBB);
11244 MF->insert(I, sinkMBB);
11245
11246 MachineInstrBuilder MIB;
11247
11248 // Transfer the remainder of BB and its successor edges to sinkMBB.
11249 sinkMBB->splice(sinkMBB->begin(), MBB,
11250 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11251 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11252
11253 // Note that the structure of the jmp_buf used here is not compatible
11254 // with that used by libc, and is not designed to be. Specifically, it
11255 // stores only those 'reserved' registers that LLVM does not otherwise
11256 // understand how to spill. Also, by convention, by the time this
11257 // intrinsic is called, Clang has already stored the frame address in the
11258 // first slot of the buffer and stack address in the third. Following the
11259 // X86 target code, we'll store the jump address in the second slot. We also
11260 // need to save the TOC pointer (R2) to handle jumps between shared
11261 // libraries, and that will be stored in the fourth slot. The thread
11262 // identifier (R13) is not affected.
11263
11264 // thisMBB:
11265 const int64_t LabelOffset = 1 * PVT.getStoreSize();
11266 const int64_t TOCOffset = 3 * PVT.getStoreSize();
11267 const int64_t BPOffset = 4 * PVT.getStoreSize();
11268
11269 // Prepare IP either in reg.
11270 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11271 Register LabelReg = MRI.createVirtualRegister(PtrRC);
11272 Register BufReg = MI.getOperand(1).getReg();
11273
11274 if (Subtarget.is64BitELFABI()) {
11275 setUsesTOCBasePtr(*MBB->getParent());
11276 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11277 .addReg(PPC::X2)
11278 .addImm(TOCOffset)
11279 .addReg(BufReg)
11280 .cloneMemRefs(MI);
11281 }
11282
11283 // Naked functions never have a base pointer, and so we use r1. For all
11284 // other functions, this decision must be delayed until during PEI.
11285 unsigned BaseReg;
11286 if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11287 BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11288 else
11289 BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11290
11291 MIB = BuildMI(*thisMBB, MI, DL,
11292 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11293 .addReg(BaseReg)
11294 .addImm(BPOffset)
11295 .addReg(BufReg)
11296 .cloneMemRefs(MI);
11297
11298 // Setup
11299 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11300 MIB.addRegMask(TRI->getNoPreservedMask());
11301
11302 BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11303
11304 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11305 .addMBB(mainMBB);
11306 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11307
11308 thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11309 thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11310
11311 // mainMBB:
11312 // mainDstReg = 0
11313 MIB =
11314 BuildMI(mainMBB, DL,
11315 TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11316
11317 // Store IP
11318 if (Subtarget.isPPC64()) {
11319 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11320 .addReg(LabelReg)
11321 .addImm(LabelOffset)
11322 .addReg(BufReg);
11323 } else {
11324 MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11325 .addReg(LabelReg)
11326 .addImm(LabelOffset)
11327 .addReg(BufReg);
11328 }
11329 MIB.cloneMemRefs(MI);
11330
11331 BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11332 mainMBB->addSuccessor(sinkMBB);
11333
11334 // sinkMBB:
11335 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11336 TII->get(PPC::PHI), DstReg)
11337 .addReg(mainDstReg).addMBB(mainMBB)
11338 .addReg(restoreDstReg).addMBB(thisMBB);
11339
11340 MI.eraseFromParent();
11341 return sinkMBB;
11342}
11343
11344MachineBasicBlock *
11345PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11346 MachineBasicBlock *MBB) const {
11347 DebugLoc DL = MI.getDebugLoc();
11348 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11349
11350 MachineFunction *MF = MBB->getParent();
11351 MachineRegisterInfo &MRI = MF->getRegInfo();
11352
11353 MVT PVT = getPointerTy(MF->getDataLayout());
11354 assert((PVT == MVT::i64 || PVT == MVT::i32) &&(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"
) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11355, __PRETTY_FUNCTION__))
11355 "Invalid Pointer Size!")(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"
) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11355, __PRETTY_FUNCTION__))
;
11356
11357 const TargetRegisterClass *RC =
11358 (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11359 Register Tmp = MRI.createVirtualRegister(RC);
11360 // Since FP is only updated here but NOT referenced, it's treated as GPR.
11361 unsigned FP = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11362 unsigned SP = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11363 unsigned BP =
11364 (PVT == MVT::i64)
11365 ? PPC::X30
11366 : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11367 : PPC::R30);
11368
11369 MachineInstrBuilder MIB;
11370
11371 const int64_t LabelOffset = 1 * PVT.getStoreSize();
11372 const int64_t SPOffset = 2 * PVT.getStoreSize();
11373 const int64_t TOCOffset = 3 * PVT.getStoreSize();
11374 const int64_t BPOffset = 4 * PVT.getStoreSize();
11375
11376 Register BufReg = MI.getOperand(0).getReg();
11377
11378 // Reload FP (the jumped-to function may not have had a
11379 // frame pointer, and if so, then its r31 will be restored
11380 // as necessary).
11381 if (PVT == MVT::i64) {
11382 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11383 .addImm(0)
11384 .addReg(BufReg);
11385 } else {
11386 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11387 .addImm(0)
11388 .addReg(BufReg);
11389 }
11390 MIB.cloneMemRefs(MI);
11391
11392 // Reload IP
11393 if (PVT == MVT::i64) {
11394 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11395 .addImm(LabelOffset)
11396 .addReg(BufReg);
11397 } else {
11398 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11399 .addImm(LabelOffset)
11400 .addReg(BufReg);
11401 }
11402 MIB.cloneMemRefs(MI);
11403
11404 // Reload SP
11405 if (PVT == MVT::i64) {
11406 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11407 .addImm(SPOffset)
11408 .addReg(BufReg);
11409 } else {
11410 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11411 .addImm(SPOffset)
11412 .addReg(BufReg);
11413 }
11414 MIB.cloneMemRefs(MI);
11415
11416 // Reload BP
11417 if (PVT == MVT::i64) {
11418 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11419 .addImm(BPOffset)
11420 .addReg(BufReg);
11421 } else {
11422 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11423 .addImm(BPOffset)
11424 .addReg(BufReg);
11425 }
11426 MIB.cloneMemRefs(MI);
11427
11428 // Reload TOC
11429 if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11430 setUsesTOCBasePtr(*MBB->getParent());
11431 MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11432 .addImm(TOCOffset)
11433 .addReg(BufReg)
11434 .cloneMemRefs(MI);
11435 }
11436
11437 // Jump
11438 BuildMI(*MBB, MI, DL,
11439 TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11440 BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11441
11442 MI.eraseFromParent();
11443 return MBB;
11444}
11445
11446bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11447 // If the function specifically requests inline stack probes, emit them.
11448 if (MF.getFunction().hasFnAttribute("probe-stack"))
11449 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11450 "inline-asm";
11451 return false;
11452}
11453
11454unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11455 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11456 unsigned StackAlign = TFI->getStackAlignment();
11457 assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&((StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
"Unexpected stack alignment") ? static_cast<void> (0) :
__assert_fail ("StackAlign >= 1 && isPowerOf2_32(StackAlign) && \"Unexpected stack alignment\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11458, __PRETTY_FUNCTION__))
11458 "Unexpected stack alignment")((StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
"Unexpected stack alignment") ? static_cast<void> (0) :
__assert_fail ("StackAlign >= 1 && isPowerOf2_32(StackAlign) && \"Unexpected stack alignment\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11458, __PRETTY_FUNCTION__))
;
11459 // The default stack probe size is 4096 if the function has no
11460 // stack-probe-size attribute.
11461 unsigned StackProbeSize = 4096;
11462 const Function &Fn = MF.getFunction();
11463 if (Fn.hasFnAttribute("stack-probe-size"))
11464 Fn.getFnAttribute("stack-probe-size")
11465 .getValueAsString()
11466 .getAsInteger(0, StackProbeSize);
11467 // Round down to the stack alignment.
11468 StackProbeSize &= ~(StackAlign - 1);
11469 return StackProbeSize ? StackProbeSize : StackAlign;
11470}
11471
11472// Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11473// into three phases. In the first phase, it uses pseudo instruction
11474// PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11475// FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11476// At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11477// MaxCallFrameSize so that it can calculate correct data area pointer.
11478MachineBasicBlock *
11479PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11480 MachineBasicBlock *MBB) const {
11481 const bool isPPC64 = Subtarget.isPPC64();
11482 MachineFunction *MF = MBB->getParent();
11483 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11484 DebugLoc DL = MI.getDebugLoc();
11485 const unsigned ProbeSize = getStackProbeSize(*MF);
11486 const BasicBlock *ProbedBB = MBB->getBasicBlock();
11487 MachineRegisterInfo &MRI = MF->getRegInfo();
11488 // The CFG of probing stack looks as
11489 // +-----+
11490 // | MBB |
11491 // +--+--+
11492 // |
11493 // +----v----+
11494 // +--->+ TestMBB +---+
11495 // | +----+----+ |
11496 // | | |
11497 // | +-----v----+ |
11498 // +---+ BlockMBB | |
11499 // +----------+ |
11500 // |
11501 // +---------+ |
11502 // | TailMBB +<--+
11503 // +---------+
11504 // In MBB, calculate previous frame pointer and final stack pointer.
11505 // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11506 // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11507 // TailMBB is spliced via \p MI.
11508 MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11509 MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11510 MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11511
11512 MachineFunction::iterator MBBIter = ++MBB->getIterator();
11513 MF->insert(MBBIter, TestMBB);
11514 MF->insert(MBBIter, BlockMBB);
11515 MF->insert(MBBIter, TailMBB);
11516
11517 const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11518 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11519
11520 Register DstReg = MI.getOperand(0).getReg();
11521 Register NegSizeReg = MI.getOperand(1).getReg();
11522 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11523 Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11524 Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11525 Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11526
11527 // Since value of NegSizeReg might be realigned in prologepilog, insert a
11528 // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11529 // NegSize.
11530 unsigned ProbeOpc;
11531 if (!MRI.hasOneNonDBGUse(NegSizeReg))
11532 ProbeOpc =
11533 isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11534 else
11535 // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11536 // and NegSizeReg will be allocated in the same phyreg to avoid
11537 // redundant copy when NegSizeReg has only one use which is current MI and
11538 // will be replaced by PREPARE_PROBED_ALLOCA then.
11539 ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11540 : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11541 BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11542 .addDef(ActualNegSizeReg)
11543 .addReg(NegSizeReg)
11544 .add(MI.getOperand(2))
11545 .add(MI.getOperand(3));
11546
11547 // Calculate final stack pointer, which equals to SP + ActualNegSize.
11548 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11549 FinalStackPtr)
11550 .addReg(SPReg)
11551 .addReg(ActualNegSizeReg);
11552
11553 // Materialize a scratch register for update.
11554 int64_t NegProbeSize = -(int64_t)ProbeSize;
11555 assert(isInt<32>(NegProbeSize) && "Unhandled probe size!")((isInt<32>(NegProbeSize) && "Unhandled probe size!"
) ? static_cast<void> (0) : __assert_fail ("isInt<32>(NegProbeSize) && \"Unhandled probe size!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11555, __PRETTY_FUNCTION__))
;
11556 Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11557 if (!isInt<16>(NegProbeSize)) {
11558 Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11559 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11560 .addImm(NegProbeSize >> 16);
11561 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11562 ScratchReg)
11563 .addReg(TempReg)
11564 .addImm(NegProbeSize & 0xFFFF);
11565 } else
11566 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11567 .addImm(NegProbeSize);
11568
11569 {
11570 // Probing leading residual part.
11571 Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11572 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11573 .addReg(ActualNegSizeReg)
11574 .addReg(ScratchReg);
11575 Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11576 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11577 .addReg(Div)
11578 .addReg(ScratchReg);
11579 Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11580 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11581 .addReg(Mul)
11582 .addReg(ActualNegSizeReg);
11583 BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11584 .addReg(FramePointer)
11585 .addReg(SPReg)
11586 .addReg(NegMod);
11587 }
11588
11589 {
11590 // Remaining part should be multiple of ProbeSize.
11591 Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11592 BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11593 .addReg(SPReg)
11594 .addReg(FinalStackPtr);
11595 BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11596 .addImm(PPC::PRED_EQ)
11597 .addReg(CmpResult)
11598 .addMBB(TailMBB);
11599 TestMBB->addSuccessor(BlockMBB);
11600 TestMBB->addSuccessor(TailMBB);
11601 }
11602
11603 {
11604 // Touch the block.
11605 // |P...|P...|P...
11606 BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11607 .addReg(FramePointer)
11608 .addReg(SPReg)
11609 .addReg(ScratchReg);
11610 BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11611 BlockMBB->addSuccessor(TestMBB);
11612 }
11613
11614 // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11615 // DYNAREAOFFSET pseudo instruction to get the future result.
11616 Register MaxCallFrameSizeReg =
11617 MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11618 BuildMI(TailMBB, DL,
11619 TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11620 MaxCallFrameSizeReg)
11621 .add(MI.getOperand(2))
11622 .add(MI.getOperand(3));
11623 BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11624 .addReg(SPReg)
11625 .addReg(MaxCallFrameSizeReg);
11626
11627 // Splice instructions after MI to TailMBB.
11628 TailMBB->splice(TailMBB->end(), MBB,
11629 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11630 TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11631 MBB->addSuccessor(TestMBB);
11632
11633 // Delete the pseudo instruction.
11634 MI.eraseFromParent();
11635
11636 ++NumDynamicAllocaProbed;
11637 return TailMBB;
11638}
11639
11640MachineBasicBlock *
11641PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11642 MachineBasicBlock *BB) const {
11643 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11644 MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11645 if (Subtarget.is64BitELFABI() &&
11646 MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11647 !Subtarget.isUsingPCRelativeCalls()) {
11648 // Call lowering should have added an r2 operand to indicate a dependence
11649 // on the TOC base pointer value. It can't however, because there is no
11650 // way to mark the dependence as implicit there, and so the stackmap code
11651 // will confuse it with a regular operand. Instead, add the dependence
11652 // here.
11653 MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11654 }
11655
11656 return emitPatchPoint(MI, BB);
11657 }
11658
11659 if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11660 MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11661 return emitEHSjLjSetJmp(MI, BB);
11662 } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11663 MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11664 return emitEHSjLjLongJmp(MI, BB);
11665 }
11666
11667 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11668
11669 // To "insert" these instructions we actually have to insert their
11670 // control-flow patterns.
11671 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11672 MachineFunction::iterator It = ++BB->getIterator();
11673
11674 MachineFunction *F = BB->getParent();
11675
11676 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11677 MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11678 MI.getOpcode() == PPC::SELECT_I8) {
11679 SmallVector<MachineOperand, 2> Cond;
11680 if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11681 MI.getOpcode() == PPC::SELECT_CC_I8)
11682 Cond.push_back(MI.getOperand(4));
11683 else
11684 Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11685 Cond.push_back(MI.getOperand(1));
11686
11687 DebugLoc dl = MI.getDebugLoc();
11688 TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11689 MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11690 } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11691 MI.getOpcode() == PPC::SELECT_CC_F8 ||
11692 MI.getOpcode() == PPC::SELECT_CC_F16 ||
11693 MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11694 MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11695 MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11696 MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11697 MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11698 MI.getOpcode() == PPC::SELECT_CC_SPE ||
11699 MI.getOpcode() == PPC::SELECT_F4 ||
11700 MI.getOpcode() == PPC::SELECT_F8 ||
11701 MI.getOpcode() == PPC::SELECT_F16 ||
11702 MI.getOpcode() == PPC::SELECT_SPE ||
11703 MI.getOpcode() == PPC::SELECT_SPE4 ||
11704 MI.getOpcode() == PPC::SELECT_VRRC ||
11705 MI.getOpcode() == PPC::SELECT_VSFRC ||
11706 MI.getOpcode() == PPC::SELECT_VSSRC ||
11707 MI.getOpcode() == PPC::SELECT_VSRC) {
11708 // The incoming instruction knows the destination vreg to set, the
11709 // condition code register to branch on, the true/false values to
11710 // select between, and a branch opcode to use.
11711
11712 // thisMBB:
11713 // ...
11714 // TrueVal = ...
11715 // cmpTY ccX, r1, r2
11716 // bCC copy1MBB
11717 // fallthrough --> copy0MBB
11718 MachineBasicBlock *thisMBB = BB;
11719 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11720 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11721 DebugLoc dl = MI.getDebugLoc();
11722 F->insert(It, copy0MBB);
11723 F->insert(It, sinkMBB);
11724
11725 // Transfer the remainder of BB and its successor edges to sinkMBB.
11726 sinkMBB->splice(sinkMBB->begin(), BB,
11727 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11728 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11729
11730 // Next, add the true and fallthrough blocks as its successors.
11731 BB->addSuccessor(copy0MBB);
11732 BB->addSuccessor(sinkMBB);
11733
11734 if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11735 MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11736 MI.getOpcode() == PPC::SELECT_F16 ||
11737 MI.getOpcode() == PPC::SELECT_SPE4 ||
11738 MI.getOpcode() == PPC::SELECT_SPE ||
11739 MI.getOpcode() == PPC::SELECT_VRRC ||
11740 MI.getOpcode() == PPC::SELECT_VSFRC ||
11741 MI.getOpcode() == PPC::SELECT_VSSRC ||
11742 MI.getOpcode() == PPC::SELECT_VSRC) {
11743 BuildMI(BB, dl, TII->get(PPC::BC))
11744 .addReg(MI.getOperand(1).getReg())
11745 .addMBB(sinkMBB);
11746 } else {
11747 unsigned SelectPred = MI.getOperand(4).getImm();
11748 BuildMI(BB, dl, TII->get(PPC::BCC))
11749 .addImm(SelectPred)
11750 .addReg(MI.getOperand(1).getReg())
11751 .addMBB(sinkMBB);
11752 }
11753
11754 // copy0MBB:
11755 // %FalseValue = ...
11756 // # fallthrough to sinkMBB
11757 BB = copy0MBB;
11758
11759 // Update machine-CFG edges
11760 BB->addSuccessor(sinkMBB);
11761
11762 // sinkMBB:
11763 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11764 // ...
11765 BB = sinkMBB;
11766 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11767 .addReg(MI.getOperand(3).getReg())
11768 .addMBB(copy0MBB)
11769 .addReg(MI.getOperand(2).getReg())
11770 .addMBB(thisMBB);
11771 } else if (MI.getOpcode() == PPC::ReadTB) {
11772 // To read the 64-bit time-base register on a 32-bit target, we read the
11773 // two halves. Should the counter have wrapped while it was being read, we
11774 // need to try again.
11775 // ...
11776 // readLoop:
11777 // mfspr Rx,TBU # load from TBU
11778 // mfspr Ry,TB # load from TB
11779 // mfspr Rz,TBU # load from TBU
11780 // cmpw crX,Rx,Rz # check if 'old'='new'
11781 // bne readLoop # branch if they're not equal
11782 // ...
11783
11784 MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11785 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11786 DebugLoc dl = MI.getDebugLoc();
11787 F->insert(It, readMBB);
11788 F->insert(It, sinkMBB);
11789
11790 // Transfer the remainder of BB and its successor edges to sinkMBB.
11791 sinkMBB->splice(sinkMBB->begin(), BB,
11792 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11793 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11794
11795 BB->addSuccessor(readMBB);
11796 BB = readMBB;
11797
11798 MachineRegisterInfo &RegInfo = F->getRegInfo();
11799 Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11800 Register LoReg = MI.getOperand(0).getReg();
11801 Register HiReg = MI.getOperand(1).getReg();
11802
11803 BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11804 BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11805 BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11806
11807 Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11808
11809 BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11810 .addReg(HiReg)
11811 .addReg(ReadAgainReg);
11812 BuildMI(BB, dl, TII->get(PPC::BCC))
11813 .addImm(PPC::PRED_NE)
11814 .addReg(CmpReg)
11815 .addMBB(readMBB);
11816
11817 BB->addSuccessor(readMBB);
11818 BB->addSuccessor(sinkMBB);
11819 } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11820 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11821 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11822 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11823 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11824 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11825 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11826 BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11827
11828 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11829 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
11830 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
11831 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
11832 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
11833 BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
11834 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
11835 BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
11836
11837 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
11838 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
11839 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
11840 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
11841 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
11842 BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
11843 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
11844 BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
11845
11846 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
11847 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
11848 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
11849 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
11850 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
11851 BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
11852 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
11853 BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
11854
11855 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
11856 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
11857 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
11858 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
11859 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
11860 BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
11861 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
11862 BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
11863
11864 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
11865 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
11866 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
11867 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
11868 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
11869 BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
11870 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
11871 BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
11872
11873 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
11874 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
11875 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
11876 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
11877 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
11878 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
11879 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
11880 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
11881
11882 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
11883 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
11884 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
11885 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
11886 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
11887 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
11888 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
11889 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
11890
11891 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
11892 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
11893 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
11894 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
11895 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
11896 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
11897 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
11898 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
11899
11900 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
11901 BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
11902 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
11903 BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
11904 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
11905 BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
11906 else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
11907 BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
11908
11909 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
11910 BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
11911 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
11912 BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
11913 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
11914 BB = EmitAtomicBinary(MI, BB, 4, 0);
11915 else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
11916 BB = EmitAtomicBinary(MI, BB, 8, 0);
11917 else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
11918 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
11919 (Subtarget.hasPartwordAtomics() &&
11920 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
11921 (Subtarget.hasPartwordAtomics() &&
11922 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
11923 bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
11924
11925 auto LoadMnemonic = PPC::LDARX;
11926 auto StoreMnemonic = PPC::STDCX;
11927 switch (MI.getOpcode()) {
11928 default:
11929 llvm_unreachable("Compare and swap of unknown size")::llvm::llvm_unreachable_internal("Compare and swap of unknown size"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11929)
;
11930 case PPC::ATOMIC_CMP_SWAP_I8:
11931 LoadMnemonic = PPC::LBARX;
11932 StoreMnemonic = PPC::STBCX;
11933 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.")((Subtarget.hasPartwordAtomics() && "No support partword atomics."
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasPartwordAtomics() && \"No support partword atomics.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11933, __PRETTY_FUNCTION__))
;
11934 break;
11935 case PPC::ATOMIC_CMP_SWAP_I16:
11936 LoadMnemonic = PPC::LHARX;
11937 StoreMnemonic = PPC::STHCX;
11938 assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.")((Subtarget.hasPartwordAtomics() && "No support partword atomics."
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasPartwordAtomics() && \"No support partword atomics.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 11938, __PRETTY_FUNCTION__))
;
11939 break;
11940 case PPC::ATOMIC_CMP_SWAP_I32:
11941 LoadMnemonic = PPC::LWARX;
11942 StoreMnemonic = PPC::STWCX;
11943 break;
11944 case PPC::ATOMIC_CMP_SWAP_I64:
11945 LoadMnemonic = PPC::LDARX;
11946 StoreMnemonic = PPC::STDCX;
11947 break;
11948 }
11949 Register dest = MI.getOperand(0).getReg();
11950 Register ptrA = MI.getOperand(1).getReg();
11951 Register ptrB = MI.getOperand(2).getReg();
11952 Register oldval = MI.getOperand(3).getReg();
11953 Register newval = MI.getOperand(4).getReg();
11954 DebugLoc dl = MI.getDebugLoc();
11955
11956 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
11957 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
11958 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
11959 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11960 F->insert(It, loop1MBB);
11961 F->insert(It, loop2MBB);
11962 F->insert(It, midMBB);
11963 F->insert(It, exitMBB);
11964 exitMBB->splice(exitMBB->begin(), BB,
11965 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11966 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11967
11968 // thisMBB:
11969 // ...
11970 // fallthrough --> loopMBB
11971 BB->addSuccessor(loop1MBB);
11972
11973 // loop1MBB:
11974 // l[bhwd]arx dest, ptr
11975 // cmp[wd] dest, oldval
11976 // bne- midMBB
11977 // loop2MBB:
11978 // st[bhwd]cx. newval, ptr
11979 // bne- loopMBB
11980 // b exitBB
11981 // midMBB:
11982 // st[bhwd]cx. dest, ptr
11983 // exitBB:
11984 BB = loop1MBB;
11985 BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
11986 BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
11987 .addReg(oldval)
11988 .addReg(dest);
11989 BuildMI(BB, dl, TII->get(PPC::BCC))
11990 .addImm(PPC::PRED_NE)
11991 .addReg(PPC::CR0)
11992 .addMBB(midMBB);
11993 BB->addSuccessor(loop2MBB);
11994 BB->addSuccessor(midMBB);
11995
11996 BB = loop2MBB;
11997 BuildMI(BB, dl, TII->get(StoreMnemonic))
11998 .addReg(newval)
11999 .addReg(ptrA)
12000 .addReg(ptrB);
12001 BuildMI(BB, dl, TII->get(PPC::BCC))
12002 .addImm(PPC::PRED_NE)
12003 .addReg(PPC::CR0)
12004 .addMBB(loop1MBB);
12005 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12006 BB->addSuccessor(loop1MBB);
12007 BB->addSuccessor(exitMBB);
12008
12009 BB = midMBB;
12010 BuildMI(BB, dl, TII->get(StoreMnemonic))
12011 .addReg(dest)
12012 .addReg(ptrA)
12013 .addReg(ptrB);
12014 BB->addSuccessor(exitMBB);
12015
12016 // exitMBB:
12017 // ...
12018 BB = exitMBB;
12019 } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12020 MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12021 // We must use 64-bit registers for addresses when targeting 64-bit,
12022 // since we're actually doing arithmetic on them. Other registers
12023 // can be 32-bit.
12024 bool is64bit = Subtarget.isPPC64();
12025 bool isLittleEndian = Subtarget.isLittleEndian();
12026 bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12027
12028 Register dest = MI.getOperand(0).getReg();
12029 Register ptrA = MI.getOperand(1).getReg();
12030 Register ptrB = MI.getOperand(2).getReg();
12031 Register oldval = MI.getOperand(3).getReg();
12032 Register newval = MI.getOperand(4).getReg();
12033 DebugLoc dl = MI.getDebugLoc();
12034
12035 MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12036 MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12037 MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12038 MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12039 F->insert(It, loop1MBB);
12040 F->insert(It, loop2MBB);
12041 F->insert(It, midMBB);
12042 F->insert(It, exitMBB);
12043 exitMBB->splice(exitMBB->begin(), BB,
12044 std::next(MachineBasicBlock::iterator(MI)), BB->end());
12045 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12046
12047 MachineRegisterInfo &RegInfo = F->getRegInfo();
12048 const TargetRegisterClass *RC =
12049 is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12050 const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12051
12052 Register PtrReg = RegInfo.createVirtualRegister(RC);
12053 Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12054 Register ShiftReg =
12055 isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12056 Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12057 Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12058 Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12059 Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12060 Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12061 Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12062 Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12063 Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12064 Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12065 Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12066 Register Ptr1Reg;
12067 Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12068 Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12069 // thisMBB:
12070 // ...
12071 // fallthrough --> loopMBB
12072 BB->addSuccessor(loop1MBB);
12073
12074 // The 4-byte load must be aligned, while a char or short may be
12075 // anywhere in the word. Hence all this nasty bookkeeping code.
12076 // add ptr1, ptrA, ptrB [copy if ptrA==0]
12077 // rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12078 // xori shift, shift1, 24 [16]
12079 // rlwinm ptr, ptr1, 0, 0, 29
12080 // slw newval2, newval, shift
12081 // slw oldval2, oldval,shift
12082 // li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12083 // slw mask, mask2, shift
12084 // and newval3, newval2, mask
12085 // and oldval3, oldval2, mask
12086 // loop1MBB:
12087 // lwarx tmpDest, ptr
12088 // and tmp, tmpDest, mask
12089 // cmpw tmp, oldval3
12090 // bne- midMBB
12091 // loop2MBB:
12092 // andc tmp2, tmpDest, mask
12093 // or tmp4, tmp2, newval3
12094 // stwcx. tmp4, ptr
12095 // bne- loop1MBB
12096 // b exitBB
12097 // midMBB:
12098 // stwcx. tmpDest, ptr
12099 // exitBB:
12100 // srw dest, tmpDest, shift
12101 if (ptrA != ZeroReg) {
12102 Ptr1Reg = RegInfo.createVirtualRegister(RC);
12103 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12104 .addReg(ptrA)
12105 .addReg(ptrB);
12106 } else {
12107 Ptr1Reg = ptrB;
12108 }
12109
12110 // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12111 // mode.
12112 BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12113 .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12114 .addImm(3)
12115 .addImm(27)
12116 .addImm(is8bit ? 28 : 27);
12117 if (!isLittleEndian)
12118 BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12119 .addReg(Shift1Reg)
12120 .addImm(is8bit ? 24 : 16);
12121 if (is64bit)
12122 BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12123 .addReg(Ptr1Reg)
12124 .addImm(0)
12125 .addImm(61);
12126 else
12127 BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12128 .addReg(Ptr1Reg)
12129 .addImm(0)
12130 .addImm(0)
12131 .addImm(29);
12132 BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12133 .addReg(newval)
12134 .addReg(ShiftReg);
12135 BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12136 .addReg(oldval)
12137 .addReg(ShiftReg);
12138 if (is8bit)
12139 BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12140 else {
12141 BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12142 BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12143 .addReg(Mask3Reg)
12144 .addImm(65535);
12145 }
12146 BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12147 .addReg(Mask2Reg)
12148 .addReg(ShiftReg);
12149 BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12150 .addReg(NewVal2Reg)
12151 .addReg(MaskReg);
12152 BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12153 .addReg(OldVal2Reg)
12154 .addReg(MaskReg);
12155
12156 BB = loop1MBB;
12157 BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12158 .addReg(ZeroReg)
12159 .addReg(PtrReg);
12160 BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12161 .addReg(TmpDestReg)
12162 .addReg(MaskReg);
12163 BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12164 .addReg(TmpReg)
12165 .addReg(OldVal3Reg);
12166 BuildMI(BB, dl, TII->get(PPC::BCC))
12167 .addImm(PPC::PRED_NE)
12168 .addReg(PPC::CR0)
12169 .addMBB(midMBB);
12170 BB->addSuccessor(loop2MBB);
12171 BB->addSuccessor(midMBB);
12172
12173 BB = loop2MBB;
12174 BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12175 .addReg(TmpDestReg)
12176 .addReg(MaskReg);
12177 BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12178 .addReg(Tmp2Reg)
12179 .addReg(NewVal3Reg);
12180 BuildMI(BB, dl, TII->get(PPC::STWCX))
12181 .addReg(Tmp4Reg)
12182 .addReg(ZeroReg)
12183 .addReg(PtrReg);
12184 BuildMI(BB, dl, TII->get(PPC::BCC))
12185 .addImm(PPC::PRED_NE)
12186 .addReg(PPC::CR0)
12187 .addMBB(loop1MBB);
12188 BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12189 BB->addSuccessor(loop1MBB);
12190 BB->addSuccessor(exitMBB);
12191
12192 BB = midMBB;
12193 BuildMI(BB, dl, TII->get(PPC::STWCX))
12194 .addReg(TmpDestReg)
12195 .addReg(ZeroReg)
12196 .addReg(PtrReg);
12197 BB->addSuccessor(exitMBB);
12198
12199 // exitMBB:
12200 // ...
12201 BB = exitMBB;
12202 BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12203 .addReg(TmpReg)
12204 .addReg(ShiftReg);
12205 } else if (MI.getOpcode() == PPC::FADDrtz) {
12206 // This pseudo performs an FADD with rounding mode temporarily forced
12207 // to round-to-zero. We emit this via custom inserter since the FPSCR
12208 // is not modeled at the SelectionDAG level.
12209 Register Dest = MI.getOperand(0).getReg();
12210 Register Src1 = MI.getOperand(1).getReg();
12211 Register Src2 = MI.getOperand(2).getReg();
12212 DebugLoc dl = MI.getDebugLoc();
12213
12214 MachineRegisterInfo &RegInfo = F->getRegInfo();
12215 Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12216
12217 // Save FPSCR value.
12218 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12219
12220 // Set rounding mode to round-to-zero.
12221 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
12222 .addImm(31)
12223 .addReg(PPC::RM, RegState::ImplicitDefine);
12224
12225 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
12226 .addImm(30)
12227 .addReg(PPC::RM, RegState::ImplicitDefine);
12228
12229 // Perform addition.
12230 auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
12231 .addReg(Src1)
12232 .addReg(Src2);
12233 if (MI.getFlag(MachineInstr::NoFPExcept))
12234 MIB.setMIFlag(MachineInstr::NoFPExcept);
12235
12236 // Restore FPSCR value.
12237 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12238 } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12239 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12240 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12241 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12242 unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12243 MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12244 ? PPC::ANDI8_rec
12245 : PPC::ANDI_rec;
12246 bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12247 MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12248
12249 MachineRegisterInfo &RegInfo = F->getRegInfo();
12250 Register Dest = RegInfo.createVirtualRegister(
12251 Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12252
12253 DebugLoc Dl = MI.getDebugLoc();
12254 BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12255 .addReg(MI.getOperand(1).getReg())
12256 .addImm(1);
12257 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12258 MI.getOperand(0).getReg())
12259 .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12260 } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12261 DebugLoc Dl = MI.getDebugLoc();
12262 MachineRegisterInfo &RegInfo = F->getRegInfo();
12263 Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12264 BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12265 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12266 MI.getOperand(0).getReg())
12267 .addReg(CRReg);
12268 } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12269 DebugLoc Dl = MI.getDebugLoc();
12270 unsigned Imm = MI.getOperand(1).getImm();
12271 BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12272 BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12273 MI.getOperand(0).getReg())
12274 .addReg(PPC::CR0EQ);
12275 } else if (MI.getOpcode() == PPC::SETRNDi) {
12276 DebugLoc dl = MI.getDebugLoc();
12277 Register OldFPSCRReg = MI.getOperand(0).getReg();
12278
12279 // Save FPSCR value.
12280 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12281
12282 // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12283 // the following settings:
12284 // 00 Round to nearest
12285 // 01 Round to 0
12286 // 10 Round to +inf
12287 // 11 Round to -inf
12288
12289 // When the operand is immediate, using the two least significant bits of
12290 // the immediate to set the bits 62:63 of FPSCR.
12291 unsigned Mode = MI.getOperand(1).getImm();
12292 BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12293 .addImm(31)
12294 .addReg(PPC::RM, RegState::ImplicitDefine);
12295
12296 BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12297 .addImm(30)
12298 .addReg(PPC::RM, RegState::ImplicitDefine);
12299 } else if (MI.getOpcode() == PPC::SETRND) {
12300 DebugLoc dl = MI.getDebugLoc();
12301
12302 // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12303 // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12304 // If the target doesn't have DirectMove, we should use stack to do the
12305 // conversion, because the target doesn't have the instructions like mtvsrd
12306 // or mfvsrd to do this conversion directly.
12307 auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12308 if (Subtarget.hasDirectMove()) {
12309 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12310 .addReg(SrcReg);
12311 } else {
12312 // Use stack to do the register copy.
12313 unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12314 MachineRegisterInfo &RegInfo = F->getRegInfo();
12315 const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12316 if (RC == &PPC::F8RCRegClass) {
12317 // Copy register from F8RCRegClass to G8RCRegclass.
12318 assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&(((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
"Unsupported RegClass.") ? static_cast<void> (0) : __assert_fail
("(RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) && \"Unsupported RegClass.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 12319, __PRETTY_FUNCTION__))
12319 "Unsupported RegClass.")(((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
"Unsupported RegClass.") ? static_cast<void> (0) : __assert_fail
("(RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) && \"Unsupported RegClass.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 12319, __PRETTY_FUNCTION__))
;
12320
12321 StoreOp = PPC::STFD;
12322 LoadOp = PPC::LD;
12323 } else {
12324 // Copy register from G8RCRegClass to F8RCRegclass.
12325 assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&(((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
(RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
"Unsupported RegClass.") ? static_cast<void> (0) : __assert_fail
("(RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && \"Unsupported RegClass.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 12327, __PRETTY_FUNCTION__))
12326 (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&(((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
(RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
"Unsupported RegClass.") ? static_cast<void> (0) : __assert_fail
("(RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && \"Unsupported RegClass.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 12327, __PRETTY_FUNCTION__))
12327 "Unsupported RegClass.")(((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
(RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
"Unsupported RegClass.") ? static_cast<void> (0) : __assert_fail
("(RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) && (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) && \"Unsupported RegClass.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 12327, __PRETTY_FUNCTION__))
;
12328 }
12329
12330 MachineFrameInfo &MFI = F->getFrameInfo();
12331 int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12332
12333 MachineMemOperand *MMOStore = F->getMachineMemOperand(
12334 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12335 MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12336 MFI.getObjectAlign(FrameIdx));
12337
12338 // Store the SrcReg into the stack.
12339 BuildMI(*BB, MI, dl, TII->get(StoreOp))
12340 .addReg(SrcReg)
12341 .addImm(0)
12342 .addFrameIndex(FrameIdx)
12343 .addMemOperand(MMOStore);
12344
12345 MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12346 MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12347 MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12348 MFI.getObjectAlign(FrameIdx));
12349
12350 // Load from the stack where SrcReg is stored, and save to DestReg,
12351 // so we have done the RegClass conversion from RegClass::SrcReg to
12352 // RegClass::DestReg.
12353 BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12354 .addImm(0)
12355 .addFrameIndex(FrameIdx)
12356 .addMemOperand(MMOLoad);
12357 }
12358 };
12359
12360 Register OldFPSCRReg = MI.getOperand(0).getReg();
12361
12362 // Save FPSCR value.
12363 BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12364
12365 // When the operand is gprc register, use two least significant bits of the
12366 // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12367 //
12368 // copy OldFPSCRTmpReg, OldFPSCRReg
12369 // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12370 // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12371 // copy NewFPSCRReg, NewFPSCRTmpReg
12372 // mtfsf 255, NewFPSCRReg
12373 MachineOperand SrcOp = MI.getOperand(1);
12374 MachineRegisterInfo &RegInfo = F->getRegInfo();
12375 Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12376
12377 copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12378
12379 Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12380 Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12381
12382 // The first operand of INSERT_SUBREG should be a register which has
12383 // subregisters, we only care about its RegClass, so we should use an
12384 // IMPLICIT_DEF register.
12385 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12386 BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12387 .addReg(ImDefReg)
12388 .add(SrcOp)
12389 .addImm(1);
12390
12391 Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12392 BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12393 .addReg(OldFPSCRTmpReg)
12394 .addReg(ExtSrcReg)
12395 .addImm(0)
12396 .addImm(62);
12397
12398 Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12399 copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12400
12401 // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12402 // bits of FPSCR.
12403 BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12404 .addImm(255)
12405 .addReg(NewFPSCRReg)
12406 .addImm(0)
12407 .addImm(0);
12408 } else if (MI.getOpcode() == PPC::SETFLM) {
12409 DebugLoc Dl = MI.getDebugLoc();
12410
12411 // Result of setflm is previous FPSCR content, so we need to save it first.
12412 Register OldFPSCRReg = MI.getOperand(0).getReg();
12413 BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
12414
12415 // Put bits in 32:63 to FPSCR.
12416 Register NewFPSCRReg = MI.getOperand(1).getReg();
12417 BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
12418 .addImm(255)
12419 .addReg(NewFPSCRReg)
12420 .addImm(0)
12421 .addImm(0);
12422 } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12423 MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12424 return emitProbedAlloca(MI, BB);
12425 } else {
12426 llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 12426)
;
12427 }
12428
12429 MI.eraseFromParent(); // The pseudo instruction is gone now.
12430 return BB;
12431}
12432
12433//===----------------------------------------------------------------------===//
12434// Target Optimization Hooks
12435//===----------------------------------------------------------------------===//
12436
12437static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12438 // For the estimates, convergence is quadratic, so we essentially double the
12439 // number of digits correct after every iteration. For both FRE and FRSQRTE,
12440 // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12441 // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12442 int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12443 if (VT.getScalarType() == MVT::f64)
12444 RefinementSteps++;
12445 return RefinementSteps;
12446}
12447
12448SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
12449 const DenormalMode &Mode) const {
12450 // We only have VSX Vector Test for software Square Root.
12451 EVT VT = Op.getValueType();
12452 if (!isTypeLegal(MVT::i1) ||
12453 (VT != MVT::f64 &&
12454 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
12455 return TargetLowering::getSqrtInputTest(Op, DAG, Mode);
12456
12457 SDLoc DL(Op);
12458 // The output register of FTSQRT is CR field.
12459 SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
12460 // ftsqrt BF,FRB
12461 // Let e_b be the unbiased exponent of the double-precision
12462 // floating-point operand in register FRB.
12463 // fe_flag is set to 1 if either of the following conditions occurs.
12464 // - The double-precision floating-point operand in register FRB is a zero,
12465 // a NaN, or an infinity, or a negative value.
12466 // - e_b is less than or equal to -970.
12467 // Otherwise fe_flag is set to 0.
12468 // Both VSX and non-VSX versions would set EQ bit in the CR if the number is
12469 // not eligible for iteration. (zero/negative/infinity/nan or unbiased
12470 // exponent is less than -970)
12471 SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32);
12472 return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1,
12473 FTSQRT, SRIdxVal),
12474 0);
12475}
12476
12477SDValue
12478PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
12479 SelectionDAG &DAG) const {
12480 // We only have VSX Vector Square Root.
12481 EVT VT = Op.getValueType();
12482 if (VT != MVT::f64 &&
12483 ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
12484 return TargetLowering::getSqrtResultForDenormInput(Op, DAG);
12485
12486 return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op);
12487}
12488
12489SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12490 int Enabled, int &RefinementSteps,
12491 bool &UseOneConstNR,
12492 bool Reciprocal) const {
12493 EVT VT = Operand.getValueType();
12494 if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12495 (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12496 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12497 (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12498 if (RefinementSteps == ReciprocalEstimate::Unspecified)
12499 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12500
12501 // The Newton-Raphson computation with a single constant does not provide
12502 // enough accuracy on some CPUs.
12503 UseOneConstNR = !Subtarget.needsTwoConstNR();
12504 return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12505 }
12506 return SDValue();
12507}
12508
12509SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12510 int Enabled,
12511 int &RefinementSteps) const {
12512 EVT VT = Operand.getValueType();
12513 if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12514 (VT == MVT::f64 && Subtarget.hasFRE()) ||
12515 (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12516 (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12517 if (RefinementSteps == ReciprocalEstimate::Unspecified)
12518 RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12519 return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12520 }
12521 return SDValue();
12522}
12523
12524unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12525 // Note: This functionality is used only when unsafe-fp-math is enabled, and
12526 // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12527 // enabled for division), this functionality is redundant with the default
12528 // combiner logic (once the division -> reciprocal/multiply transformation
12529 // has taken place). As a result, this matters more for older cores than for
12530 // newer ones.
12531
12532 // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12533 // reciprocal if there are two or more FDIVs (for embedded cores with only
12534 // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12535 switch (Subtarget.getCPUDirective()) {
12536 default:
12537 return 3;
12538 case PPC::DIR_440:
12539 case PPC::DIR_A2:
12540 case PPC::DIR_E500:
12541 case PPC::DIR_E500mc:
12542 case PPC::DIR_E5500:
12543 return 2;
12544 }
12545}
12546
12547// isConsecutiveLSLoc needs to work even if all adds have not yet been
12548// collapsed, and so we need to look through chains of them.
12549static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12550 int64_t& Offset, SelectionDAG &DAG) {
12551 if (DAG.isBaseWithConstantOffset(Loc)) {
12552 Base = Loc.getOperand(0);
12553 Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12554
12555 // The base might itself be a base plus an offset, and if so, accumulate
12556 // that as well.
12557 getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12558 }
12559}
12560
12561static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12562 unsigned Bytes, int Dist,
12563 SelectionDAG &DAG) {
12564 if (VT.getSizeInBits() / 8 != Bytes)
12565 return false;
12566
12567 SDValue BaseLoc = Base->getBasePtr();
12568 if (Loc.getOpcode() == ISD::FrameIndex) {
12569 if (BaseLoc.getOpcode() != ISD::FrameIndex)
12570 return false;
12571 const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12572 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
12573 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12574 int FS = MFI.getObjectSize(FI);
12575 int BFS = MFI.getObjectSize(BFI);
12576 if (FS != BFS || FS != (int)Bytes) return false;
12577 return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12578 }
12579
12580 SDValue Base1 = Loc, Base2 = BaseLoc;
12581 int64_t Offset1 = 0, Offset2 = 0;
12582 getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12583 getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12584 if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12585 return true;
12586
12587 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12588 const GlobalValue *GV1 = nullptr;
12589 const GlobalValue *GV2 = nullptr;
12590 Offset1 = 0;
12591 Offset2 = 0;
12592 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12593 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12594 if (isGA1 && isGA2 && GV1 == GV2)
12595 return Offset1 == (Offset2 + Dist*Bytes);
12596 return false;
12597}
12598
12599// Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12600// not enforce equality of the chain operands.
12601static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12602 unsigned Bytes, int Dist,
12603 SelectionDAG &DAG) {
12604 if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12605 EVT VT = LS->getMemoryVT();
12606 SDValue Loc = LS->getBasePtr();
12607 return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12608 }
12609
12610 if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12611 EVT VT;
12612 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12613 default: return false;
12614 case Intrinsic::ppc_altivec_lvx:
12615 case Intrinsic::ppc_altivec_lvxl:
12616 case Intrinsic::ppc_vsx_lxvw4x:
12617 case Intrinsic::ppc_vsx_lxvw4x_be:
12618 VT = MVT::v4i32;
12619 break;
12620 case Intrinsic::ppc_vsx_lxvd2x:
12621 case Intrinsic::ppc_vsx_lxvd2x_be:
12622 VT = MVT::v2f64;
12623 break;
12624 case Intrinsic::ppc_altivec_lvebx:
12625 VT = MVT::i8;
12626 break;
12627 case Intrinsic::ppc_altivec_lvehx:
12628 VT = MVT::i16;
12629 break;
12630 case Intrinsic::ppc_altivec_lvewx:
12631 VT = MVT::i32;
12632 break;
12633 }
12634
12635 return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12636 }
12637
12638 if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12639 EVT VT;
12640 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12641 default: return false;
12642 case Intrinsic::ppc_altivec_stvx:
12643 case Intrinsic::ppc_altivec_stvxl:
12644 case Intrinsic::ppc_vsx_stxvw4x:
12645 VT = MVT::v4i32;
12646 break;
12647 case Intrinsic::ppc_vsx_stxvd2x:
12648 VT = MVT::v2f64;
12649 break;
12650 case Intrinsic::ppc_vsx_stxvw4x_be:
12651 VT = MVT::v4i32;
12652 break;
12653 case Intrinsic::ppc_vsx_stxvd2x_be:
12654 VT = MVT::v2f64;
12655 break;
12656 case Intrinsic::ppc_altivec_stvebx:
12657 VT = MVT::i8;
12658 break;
12659 case Intrinsic::ppc_altivec_stvehx:
12660 VT = MVT::i16;
12661 break;
12662 case Intrinsic::ppc_altivec_stvewx:
12663 VT = MVT::i32;
12664 break;
12665 }
12666
12667 return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12668 }
12669
12670 return false;
12671}
12672
12673// Return true is there is a nearyby consecutive load to the one provided
12674// (regardless of alignment). We search up and down the chain, looking though
12675// token factors and other loads (but nothing else). As a result, a true result
12676// indicates that it is safe to create a new consecutive load adjacent to the
12677// load provided.
12678static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12679 SDValue Chain = LD->getChain();
12680 EVT VT = LD->getMemoryVT();
12681
12682 SmallSet<SDNode *, 16> LoadRoots;
12683 SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12684 SmallSet<SDNode *, 16> Visited;
12685
12686 // First, search up the chain, branching to follow all token-factor operands.
12687 // If we find a consecutive load, then we're done, otherwise, record all
12688 // nodes just above the top-level loads and token factors.
12689 while (!Queue.empty()) {
12690 SDNode *ChainNext = Queue.pop_back_val();
12691 if (!Visited.insert(ChainNext).second)
12692 continue;
12693
12694 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12695 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12696 return true;
12697
12698 if (!Visited.count(ChainLD->getChain().getNode()))
12699 Queue.push_back(ChainLD->getChain().getNode());
12700 } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12701 for (const SDUse &O : ChainNext->ops())
12702 if (!Visited.count(O.getNode()))
12703 Queue.push_back(O.getNode());
12704 } else
12705 LoadRoots.insert(ChainNext);
12706 }
12707
12708 // Second, search down the chain, starting from the top-level nodes recorded
12709 // in the first phase. These top-level nodes are the nodes just above all
12710 // loads and token factors. Starting with their uses, recursively look though
12711 // all loads (just the chain uses) and token factors to find a consecutive
12712 // load.
12713 Visited.clear();
12714 Queue.clear();
12715
12716 for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12717 IE = LoadRoots.end(); I != IE; ++I) {
12718 Queue.push_back(*I);
12719
12720 while (!Queue.empty()) {
12721 SDNode *LoadRoot = Queue.pop_back_val();
12722 if (!Visited.insert(LoadRoot).second)
12723 continue;
12724
12725 if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12726 if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12727 return true;
12728
12729 for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12730 UE = LoadRoot->use_end(); UI != UE; ++UI)
12731 if (((isa<MemSDNode>(*UI) &&
12732 cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12733 UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12734 Queue.push_back(*UI);
12735 }
12736 }
12737
12738 return false;
12739}
12740
12741/// This function is called when we have proved that a SETCC node can be replaced
12742/// by subtraction (and other supporting instructions) so that the result of
12743/// comparison is kept in a GPR instead of CR. This function is purely for
12744/// codegen purposes and has some flags to guide the codegen process.
12745static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12746 bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12747 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.")((N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::SETCC && \"ISD::SETCC Expected.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 12747, __PRETTY_FUNCTION__))
;
12748
12749 // Zero extend the operands to the largest legal integer. Originally, they
12750 // must be of a strictly smaller size.
12751 auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12752 DAG.getConstant(Size, DL, MVT::i32));
12753 auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12754 DAG.getConstant(Size, DL, MVT::i32));
12755
12756 // Swap if needed. Depends on the condition code.
12757 if (Swap)
12758 std::swap(Op0, Op1);
12759
12760 // Subtract extended integers.
12761 auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12762
12763 // Move the sign bit to the least significant position and zero out the rest.
12764 // Now the least significant bit carries the result of original comparison.
12765 auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12766 DAG.getConstant(Size - 1, DL, MVT::i32));
12767 auto Final = Shifted;
12768
12769 // Complement the result if needed. Based on the condition code.
12770 if (Complement)
12771 Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12772 DAG.getConstant(1, DL, MVT::i64));
12773
12774 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12775}
12776
12777SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12778 DAGCombinerInfo &DCI) const {
12779 assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.")((N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected."
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::SETCC && \"ISD::SETCC Expected.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 12779, __PRETTY_FUNCTION__))
;
12780
12781 SelectionDAG &DAG = DCI.DAG;
12782 SDLoc DL(N);
12783
12784 // Size of integers being compared has a critical role in the following
12785 // analysis, so we prefer to do this when all types are legal.
12786 if (!DCI.isAfterLegalizeDAG())
12787 return SDValue();
12788
12789 // If all users of SETCC extend its value to a legal integer type
12790 // then we replace SETCC with a subtraction
12791 for (SDNode::use_iterator UI = N->use_begin(),
12792 UE = N->use_end(); UI != UE; ++UI) {
12793 if (UI->getOpcode() != ISD::ZERO_EXTEND)
12794 return SDValue();
12795 }
12796
12797 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12798 auto OpSize = N->getOperand(0).getValueSizeInBits();
12799
12800 unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12801
12802 if (OpSize < Size) {
12803 switch (CC) {
12804 default: break;
12805 case ISD::SETULT:
12806 return generateEquivalentSub(N, Size, false, false, DL, DAG);
12807 case ISD::SETULE:
12808 return generateEquivalentSub(N, Size, true, true, DL, DAG);
12809 case ISD::SETUGT:
12810 return generateEquivalentSub(N, Size, false, true, DL, DAG);
12811 case ISD::SETUGE:
12812 return generateEquivalentSub(N, Size, true, false, DL, DAG);
12813 }
12814 }
12815
12816 return SDValue();
12817}
12818
12819SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12820 DAGCombinerInfo &DCI) const {
12821 SelectionDAG &DAG = DCI.DAG;
12822 SDLoc dl(N);
12823
12824 assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits")((Subtarget.useCRBits() && "Expecting to be tracking CR bits"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.useCRBits() && \"Expecting to be tracking CR bits\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 12824, __PRETTY_FUNCTION__))
;
12825 // If we're tracking CR bits, we need to be careful that we don't have:
12826 // trunc(binary-ops(zext(x), zext(y)))
12827 // or
12828 // trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12829 // such that we're unnecessarily moving things into GPRs when it would be
12830 // better to keep them in CR bits.
12831
12832 // Note that trunc here can be an actual i1 trunc, or can be the effective
12833 // truncation that comes from a setcc or select_cc.
12834 if (N->getOpcode() == ISD::TRUNCATE &&
12835 N->getValueType(0) != MVT::i1)
12836 return SDValue();
12837
12838 if (N->getOperand(0).getValueType() != MVT::i32 &&
12839 N->getOperand(0).getValueType() != MVT::i64)
12840 return SDValue();
12841
12842 if (N->getOpcode() == ISD::SETCC ||
12843 N->getOpcode() == ISD::SELECT_CC) {
12844 // If we're looking at a comparison, then we need to make sure that the
12845 // high bits (all except for the first) don't matter the result.
12846 ISD::CondCode CC =
12847 cast<CondCodeSDNode>(N->getOperand(
12848 N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
12849 unsigned OpBits = N->getOperand(0).getValueSizeInBits();
12850
12851 if (ISD::isSignedIntSetCC(CC)) {
12852 if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
12853 DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
12854 return SDValue();
12855 } else if (ISD::isUnsignedIntSetCC(CC)) {
12856 if (!DAG.MaskedValueIsZero(N->getOperand(0),
12857 APInt::getHighBitsSet(OpBits, OpBits-1)) ||
12858 !DAG.MaskedValueIsZero(N->getOperand(1),
12859 APInt::getHighBitsSet(OpBits, OpBits-1)))
12860 return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
12861 : SDValue());
12862 } else {
12863 // This is neither a signed nor an unsigned comparison, just make sure
12864 // that the high bits are equal.
12865 KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
12866 KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
12867
12868 // We don't really care about what is known about the first bit (if
12869 // anything), so pretend that it is known zero for both to ensure they can
12870 // be compared as constants.
12871 Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0);
12872 Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0);
12873
12874 if (!Op1Known.isConstant() || !Op2Known.isConstant() ||
12875 Op1Known.getConstant() != Op2Known.getConstant())
12876 return SDValue();
12877 }
12878 }
12879
12880 // We now know that the higher-order bits are irrelevant, we just need to
12881 // make sure that all of the intermediate operations are bit operations, and
12882 // all inputs are extensions.
12883 if (N->getOperand(0).getOpcode() != ISD::AND &&
12884 N->getOperand(0).getOpcode() != ISD::OR &&
12885 N->getOperand(0).getOpcode() != ISD::XOR &&
12886 N->getOperand(0).getOpcode() != ISD::SELECT &&
12887 N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
12888 N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
12889 N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
12890 N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
12891 N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
12892 return SDValue();
12893
12894 if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
12895 N->getOperand(1).getOpcode() != ISD::AND &&
12896 N->getOperand(1).getOpcode() != ISD::OR &&
12897 N->getOperand(1).getOpcode() != ISD::XOR &&
12898 N->getOperand(1).getOpcode() != ISD::SELECT &&
12899 N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
12900 N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
12901 N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
12902 N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
12903 N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
12904 return SDValue();
12905
12906 SmallVector<SDValue, 4> Inputs;
12907 SmallVector<SDValue, 8> BinOps, PromOps;
12908 SmallPtrSet<SDNode *, 16> Visited;
12909
12910 for (unsigned i = 0; i < 2; ++i) {
12911 if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12912 N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12913 N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12914 N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12915 isa<ConstantSDNode>(N->getOperand(i)))
12916 Inputs.push_back(N->getOperand(i));
12917 else
12918 BinOps.push_back(N->getOperand(i));
12919
12920 if (N->getOpcode() == ISD::TRUNCATE)
12921 break;
12922 }
12923
12924 // Visit all inputs, collect all binary operations (and, or, xor and
12925 // select) that are all fed by extensions.
12926 while (!BinOps.empty()) {
12927 SDValue BinOp = BinOps.pop_back_val();
12928
12929 if (!Visited.insert(BinOp.getNode()).second)
12930 continue;
12931
12932 PromOps.push_back(BinOp);
12933
12934 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
12935 // The condition of the select is not promoted.
12936 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
12937 continue;
12938 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
12939 continue;
12940
12941 if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12942 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12943 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
12944 BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
12945 isa<ConstantSDNode>(BinOp.getOperand(i))) {
12946 Inputs.push_back(BinOp.getOperand(i));
12947 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
12948 BinOp.getOperand(i).getOpcode() == ISD::OR ||
12949 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
12950 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
12951 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
12952 BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
12953 BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
12954 BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
12955 BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
12956 BinOps.push_back(BinOp.getOperand(i));
12957 } else {
12958 // We have an input that is not an extension or another binary
12959 // operation; we'll abort this transformation.
12960 return SDValue();
12961 }
12962 }
12963 }
12964
12965 // Make sure that this is a self-contained cluster of operations (which
12966 // is not quite the same thing as saying that everything has only one
12967 // use).
12968 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
12969 if (isa<ConstantSDNode>(Inputs[i]))
12970 continue;
12971
12972 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
12973 UE = Inputs[i].getNode()->use_end();
12974 UI != UE; ++UI) {
12975 SDNode *User = *UI;
12976 if (User != N && !Visited.count(User))
12977 return SDValue();
12978
12979 // Make sure that we're not going to promote the non-output-value
12980 // operand(s) or SELECT or SELECT_CC.
12981 // FIXME: Although we could sometimes handle this, and it does occur in
12982 // practice that one of the condition inputs to the select is also one of
12983 // the outputs, we currently can't deal with this.
12984 if (User->getOpcode() == ISD::SELECT) {
12985 if (User->getOperand(0) == Inputs[i])
12986 return SDValue();
12987 } else if (User->getOpcode() == ISD::SELECT_CC) {
12988 if (User->getOperand(0) == Inputs[i] ||
12989 User->getOperand(1) == Inputs[i])
12990 return SDValue();
12991 }
12992 }
12993 }
12994
12995 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
12996 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
12997 UE = PromOps[i].getNode()->use_end();
12998 UI != UE; ++UI) {
12999 SDNode *User = *UI;
13000 if (User != N && !Visited.count(User))
13001 return SDValue();
13002
13003 // Make sure that we're not going to promote the non-output-value
13004 // operand(s) or SELECT or SELECT_CC.
13005 // FIXME: Although we could sometimes handle this, and it does occur in
13006 // practice that one of the condition inputs to the select is also one of
13007 // the outputs, we currently can't deal with this.
13008 if (User->getOpcode() == ISD::SELECT) {
13009 if (User->getOperand(0) == PromOps[i])
13010 return SDValue();
13011 } else if (User->getOpcode() == ISD::SELECT_CC) {
13012 if (User->getOperand(0) == PromOps[i] ||
13013 User->getOperand(1) == PromOps[i])
13014 return SDValue();
13015 }
13016 }
13017 }
13018
13019 // Replace all inputs with the extension operand.
13020 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13021 // Constants may have users outside the cluster of to-be-promoted nodes,
13022 // and so we need to replace those as we do the promotions.
13023 if (isa<ConstantSDNode>(Inputs[i]))
13024 continue;
13025 else
13026 DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
13027 }
13028
13029 std::list<HandleSDNode> PromOpHandles;
13030 for (auto &PromOp : PromOps)
13031 PromOpHandles.emplace_back(PromOp);
13032
13033 // Replace all operations (these are all the same, but have a different
13034 // (i1) return type). DAG.getNode will validate that the types of
13035 // a binary operator match, so go through the list in reverse so that
13036 // we've likely promoted both operands first. Any intermediate truncations or
13037 // extensions disappear.
13038 while (!PromOpHandles.empty()) {
13039 SDValue PromOp = PromOpHandles.back().getValue();
13040 PromOpHandles.pop_back();
13041
13042 if (PromOp.getOpcode() == ISD::TRUNCATE ||
13043 PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13044 PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13045 PromOp.getOpcode() == ISD::ANY_EXTEND) {
13046 if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13047 PromOp.getOperand(0).getValueType() != MVT::i1) {
13048 // The operand is not yet ready (see comment below).
13049 PromOpHandles.emplace_front(PromOp);
13050 continue;
13051 }
13052
13053 SDValue RepValue = PromOp.getOperand(0);
13054 if (isa<ConstantSDNode>(RepValue))
13055 RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13056
13057 DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13058 continue;
13059 }
13060
13061 unsigned C;
13062 switch (PromOp.getOpcode()) {
13063 default: C = 0; break;
13064 case ISD::SELECT: C = 1; break;
13065 case ISD::SELECT_CC: C = 2; break;
13066 }
13067
13068 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13069 PromOp.getOperand(C).getValueType() != MVT::i1) ||
13070 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13071 PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13072 // The to-be-promoted operands of this node have not yet been
13073 // promoted (this should be rare because we're going through the
13074 // list backward, but if one of the operands has several users in
13075 // this cluster of to-be-promoted nodes, it is possible).
13076 PromOpHandles.emplace_front(PromOp);
13077 continue;
13078 }
13079
13080 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13081 PromOp.getNode()->op_end());
13082
13083 // If there are any constant inputs, make sure they're replaced now.
13084 for (unsigned i = 0; i < 2; ++i)
13085 if (isa<ConstantSDNode>(Ops[C+i]))
13086 Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13087
13088 DAG.ReplaceAllUsesOfValueWith(PromOp,
13089 DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13090 }
13091
13092 // Now we're left with the initial truncation itself.
13093 if (N->getOpcode() == ISD::TRUNCATE)
13094 return N->getOperand(0);
13095
13096 // Otherwise, this is a comparison. The operands to be compared have just
13097 // changed type (to i1), but everything else is the same.
13098 return SDValue(N, 0);
13099}
13100
13101SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13102 DAGCombinerInfo &DCI) const {
13103 SelectionDAG &DAG = DCI.DAG;
13104 SDLoc dl(N);
13105
13106 // If we're tracking CR bits, we need to be careful that we don't have:
13107 // zext(binary-ops(trunc(x), trunc(y)))
13108 // or
13109 // zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13110 // such that we're unnecessarily moving things into CR bits that can more
13111 // efficiently stay in GPRs. Note that if we're not certain that the high
13112 // bits are set as required by the final extension, we still may need to do
13113 // some masking to get the proper behavior.
13114
13115 // This same functionality is important on PPC64 when dealing with
13116 // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13117 // the return values of functions. Because it is so similar, it is handled
13118 // here as well.
13119
13120 if (N->getValueType(0) != MVT::i32 &&
13121 N->getValueType(0) != MVT::i64)
13122 return SDValue();
13123
13124 if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13125 (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13126 return SDValue();
13127
13128 if (N->getOperand(0).getOpcode() != ISD::AND &&
13129 N->getOperand(0).getOpcode() != ISD::OR &&
13130 N->getOperand(0).getOpcode() != ISD::XOR &&
13131 N->getOperand(0).getOpcode() != ISD::SELECT &&
13132 N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13133 return SDValue();
13134
13135 SmallVector<SDValue, 4> Inputs;
13136 SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13137 SmallPtrSet<SDNode *, 16> Visited;
13138
13139 // Visit all inputs, collect all binary operations (and, or, xor and
13140 // select) that are all fed by truncations.
13141 while (!BinOps.empty()) {
13142 SDValue BinOp = BinOps.pop_back_val();
13143
13144 if (!Visited.insert(BinOp.getNode()).second)
13145 continue;
13146
13147 PromOps.push_back(BinOp);
13148
13149 for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13150 // The condition of the select is not promoted.
13151 if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13152 continue;
13153 if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13154 continue;
13155
13156 if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13157 isa<ConstantSDNode>(BinOp.getOperand(i))) {
13158 Inputs.push_back(BinOp.getOperand(i));
13159 } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13160 BinOp.getOperand(i).getOpcode() == ISD::OR ||
13161 BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13162 BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13163 BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13164 BinOps.push_back(BinOp.getOperand(i));
13165 } else {
13166 // We have an input that is not a truncation or another binary
13167 // operation; we'll abort this transformation.
13168 return SDValue();
13169 }
13170 }
13171 }
13172
13173 // The operands of a select that must be truncated when the select is
13174 // promoted because the operand is actually part of the to-be-promoted set.
13175 DenseMap<SDNode *, EVT> SelectTruncOp[2];
13176
13177 // Make sure that this is a self-contained cluster of operations (which
13178 // is not quite the same thing as saying that everything has only one
13179 // use).
13180 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13181 if (isa<ConstantSDNode>(Inputs[i]))
13182 continue;
13183
13184 for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13185 UE = Inputs[i].getNode()->use_end();
13186 UI != UE; ++UI) {
13187 SDNode *User = *UI;
13188 if (User != N && !Visited.count(User))
13189 return SDValue();
13190
13191 // If we're going to promote the non-output-value operand(s) or SELECT or
13192 // SELECT_CC, record them for truncation.
13193 if (User->getOpcode() == ISD::SELECT) {
13194 if (User->getOperand(0) == Inputs[i])
13195 SelectTruncOp[0].insert(std::make_pair(User,
13196 User->getOperand(0).getValueType()));
13197 } else if (User->getOpcode() == ISD::SELECT_CC) {
13198 if (User->getOperand(0) == Inputs[i])
13199 SelectTruncOp[0].insert(std::make_pair(User,
13200 User->getOperand(0).getValueType()));
13201 if (User->getOperand(1) == Inputs[i])
13202 SelectTruncOp[1].insert(std::make_pair(User,
13203 User->getOperand(1).getValueType()));
13204 }
13205 }
13206 }
13207
13208 for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13209 for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13210 UE = PromOps[i].getNode()->use_end();
13211 UI != UE; ++UI) {
13212 SDNode *User = *UI;
13213 if (User != N && !Visited.count(User))
13214 return SDValue();
13215
13216 // If we're going to promote the non-output-value operand(s) or SELECT or
13217 // SELECT_CC, record them for truncation.
13218 if (User->getOpcode() == ISD::SELECT) {
13219 if (User->getOperand(0) == PromOps[i])
13220 SelectTruncOp[0].insert(std::make_pair(User,
13221 User->getOperand(0).getValueType()));
13222 } else if (User->getOpcode() == ISD::SELECT_CC) {
13223 if (User->getOperand(0) == PromOps[i])
13224 SelectTruncOp[0].insert(std::make_pair(User,
13225 User->getOperand(0).getValueType()));
13226 if (User->getOperand(1) == PromOps[i])
13227 SelectTruncOp[1].insert(std::make_pair(User,
13228 User->getOperand(1).getValueType()));
13229 }
13230 }
13231 }
13232
13233 unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13234 bool ReallyNeedsExt = false;
13235 if (N->getOpcode() != ISD::ANY_EXTEND) {
13236 // If all of the inputs are not already sign/zero extended, then
13237 // we'll still need to do that at the end.
13238 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13239 if (isa<ConstantSDNode>(Inputs[i]))
13240 continue;
13241
13242 unsigned OpBits =
13243 Inputs[i].getOperand(0).getValueSizeInBits();
13244 assert(PromBits < OpBits && "Truncation not to a smaller bit count?")((PromBits < OpBits && "Truncation not to a smaller bit count?"
) ? static_cast<void> (0) : __assert_fail ("PromBits < OpBits && \"Truncation not to a smaller bit count?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13244, __PRETTY_FUNCTION__))
;
13245
13246 if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13247 !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13248 APInt::getHighBitsSet(OpBits,
13249 OpBits-PromBits))) ||
13250 (N->getOpcode() == ISD::SIGN_EXTEND &&
13251 DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13252 (OpBits-(PromBits-1)))) {
13253 ReallyNeedsExt = true;
13254 break;
13255 }
13256 }
13257 }
13258
13259 // Replace all inputs, either with the truncation operand, or a
13260 // truncation or extension to the final output type.
13261 for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13262 // Constant inputs need to be replaced with the to-be-promoted nodes that
13263 // use them because they might have users outside of the cluster of
13264 // promoted nodes.
13265 if (isa<ConstantSDNode>(Inputs[i]))
13266 continue;
13267
13268 SDValue InSrc = Inputs[i].getOperand(0);
13269 if (Inputs[i].getValueType() == N->getValueType(0))
13270 DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13271 else if (N->getOpcode() == ISD::SIGN_EXTEND)
13272 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13273 DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13274 else if (N->getOpcode() == ISD::ZERO_EXTEND)
13275 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13276 DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13277 else
13278 DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13279 DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13280 }
13281
13282 std::list<HandleSDNode> PromOpHandles;
13283 for (auto &PromOp : PromOps)
13284 PromOpHandles.emplace_back(PromOp);
13285
13286 // Replace all operations (these are all the same, but have a different
13287 // (promoted) return type). DAG.getNode will validate that the types of
13288 // a binary operator match, so go through the list in reverse so that
13289 // we've likely promoted both operands first.
13290 while (!PromOpHandles.empty()) {
13291 SDValue PromOp = PromOpHandles.back().getValue();
13292 PromOpHandles.pop_back();
13293
13294 unsigned C;
13295 switch (PromOp.getOpcode()) {
13296 default: C = 0; break;
13297 case ISD::SELECT: C = 1; break;
13298 case ISD::SELECT_CC: C = 2; break;
13299 }
13300
13301 if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13302 PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13303 (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13304 PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13305 // The to-be-promoted operands of this node have not yet been
13306 // promoted (this should be rare because we're going through the
13307 // list backward, but if one of the operands has several users in
13308 // this cluster of to-be-promoted nodes, it is possible).
13309 PromOpHandles.emplace_front(PromOp);
13310 continue;
13311 }
13312
13313 // For SELECT and SELECT_CC nodes, we do a similar check for any
13314 // to-be-promoted comparison inputs.
13315 if (PromOp.getOpcode() == ISD::SELECT ||
13316 PromOp.getOpcode() == ISD::SELECT_CC) {
13317 if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13318 PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13319 (SelectTruncOp[1].count(PromOp.getNode()) &&
13320 PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13321 PromOpHandles.emplace_front(PromOp);
13322 continue;
13323 }
13324 }
13325
13326 SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13327 PromOp.getNode()->op_end());
13328
13329 // If this node has constant inputs, then they'll need to be promoted here.
13330 for (unsigned i = 0; i < 2; ++i) {
13331 if (!isa<ConstantSDNode>(Ops[C+i]))
13332 continue;
13333 if (Ops[C+i].getValueType() == N->getValueType(0))
13334 continue;
13335
13336 if (N->getOpcode() == ISD::SIGN_EXTEND)
13337 Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13338 else if (N->getOpcode() == ISD::ZERO_EXTEND)
13339 Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13340 else
13341 Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13342 }
13343
13344 // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13345 // truncate them again to the original value type.
13346 if (PromOp.getOpcode() == ISD::SELECT ||
13347 PromOp.getOpcode() == ISD::SELECT_CC) {
13348 auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13349 if (SI0 != SelectTruncOp[0].end())
13350 Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13351 auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13352 if (SI1 != SelectTruncOp[1].end())
13353 Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13354 }
13355
13356 DAG.ReplaceAllUsesOfValueWith(PromOp,
13357 DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13358 }
13359
13360 // Now we're left with the initial extension itself.
13361 if (!ReallyNeedsExt)
13362 return N->getOperand(0);
13363
13364 // To zero extend, just mask off everything except for the first bit (in the
13365 // i1 case).
13366 if (N->getOpcode() == ISD::ZERO_EXTEND)
13367 return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13368 DAG.getConstant(APInt::getLowBitsSet(
13369 N->getValueSizeInBits(0), PromBits),
13370 dl, N->getValueType(0)));
13371
13372 assert(N->getOpcode() == ISD::SIGN_EXTEND &&((N->getOpcode() == ISD::SIGN_EXTEND && "Invalid extension type"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::SIGN_EXTEND && \"Invalid extension type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13373, __PRETTY_FUNCTION__))
13373 "Invalid extension type")((N->getOpcode() == ISD::SIGN_EXTEND && "Invalid extension type"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::SIGN_EXTEND && \"Invalid extension type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13373, __PRETTY_FUNCTION__))
;
13374 EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13375 SDValue ShiftCst =
13376 DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13377 return DAG.getNode(
13378 ISD::SRA, dl, N->getValueType(0),
13379 DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13380 ShiftCst);
13381}
13382
13383SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13384 DAGCombinerInfo &DCI) const {
13385 assert(N->getOpcode() == ISD::SETCC &&((N->getOpcode() == ISD::SETCC && "Should be called with a SETCC node"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::SETCC && \"Should be called with a SETCC node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13386, __PRETTY_FUNCTION__))
13386 "Should be called with a SETCC node")((N->getOpcode() == ISD::SETCC && "Should be called with a SETCC node"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::SETCC && \"Should be called with a SETCC node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13386, __PRETTY_FUNCTION__))
;
13387
13388 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13389 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13390 SDValue LHS = N->getOperand(0);
13391 SDValue RHS = N->getOperand(1);
13392
13393 // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13394 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13395 LHS.hasOneUse())
13396 std::swap(LHS, RHS);
13397
13398 // x == 0-y --> x+y == 0
13399 // x != 0-y --> x+y != 0
13400 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13401 RHS.hasOneUse()) {
13402 SDLoc DL(N);
13403 SelectionDAG &DAG = DCI.DAG;
13404 EVT VT = N->getValueType(0);
13405 EVT OpVT = LHS.getValueType();
13406 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13407 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13408 }
13409 }
13410
13411 return DAGCombineTruncBoolExt(N, DCI);
13412}
13413
13414// Is this an extending load from an f32 to an f64?
13415static bool isFPExtLoad(SDValue Op) {
13416 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13417 return LD->getExtensionType() == ISD::EXTLOAD &&
13418 Op.getValueType() == MVT::f64;
13419 return false;
13420}
13421
13422/// Reduces the number of fp-to-int conversion when building a vector.
13423///
13424/// If this vector is built out of floating to integer conversions,
13425/// transform it to a vector built out of floating point values followed by a
13426/// single floating to integer conversion of the vector.
13427/// Namely (build_vector (fptosi $A), (fptosi $B), ...)
13428/// becomes (fptosi (build_vector ($A, $B, ...)))
13429SDValue PPCTargetLowering::
13430combineElementTruncationToVectorTruncation(SDNode *N,
13431 DAGCombinerInfo &DCI) const {
13432 assert(N->getOpcode() == ISD::BUILD_VECTOR &&((N->getOpcode() == ISD::BUILD_VECTOR && "Should be called with a BUILD_VECTOR node"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"Should be called with a BUILD_VECTOR node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13433, __PRETTY_FUNCTION__))
13433 "Should be called with a BUILD_VECTOR node")((N->getOpcode() == ISD::BUILD_VECTOR && "Should be called with a BUILD_VECTOR node"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"Should be called with a BUILD_VECTOR node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13433, __PRETTY_FUNCTION__))
;
13434
13435 SelectionDAG &DAG = DCI.DAG;
13436 SDLoc dl(N);
13437
13438 SDValue FirstInput = N->getOperand(0);
13439 assert(FirstInput.getOpcode() == PPCISD::MFVSR &&((FirstInput.getOpcode() == PPCISD::MFVSR && "The input operand must be an fp-to-int conversion."
) ? static_cast<void> (0) : __assert_fail ("FirstInput.getOpcode() == PPCISD::MFVSR && \"The input operand must be an fp-to-int conversion.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13440, __PRETTY_FUNCTION__))
13440 "The input operand must be an fp-to-int conversion.")((FirstInput.getOpcode() == PPCISD::MFVSR && "The input operand must be an fp-to-int conversion."
) ? static_cast<void> (0) : __assert_fail ("FirstInput.getOpcode() == PPCISD::MFVSR && \"The input operand must be an fp-to-int conversion.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13440, __PRETTY_FUNCTION__))
;
13441
13442 // This combine happens after legalization so the fp_to_[su]i nodes are
13443 // already converted to PPCSISD nodes.
13444 unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13445 if (FirstConversion == PPCISD::FCTIDZ ||
13446 FirstConversion == PPCISD::FCTIDUZ ||
13447 FirstConversion == PPCISD::FCTIWZ ||
13448 FirstConversion == PPCISD::FCTIWUZ) {
13449 bool IsSplat = true;
13450 bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13451 FirstConversion == PPCISD::FCTIWUZ;
13452 EVT SrcVT = FirstInput.getOperand(0).getValueType();
13453 SmallVector<SDValue, 4> Ops;
13454 EVT TargetVT = N->getValueType(0);
13455 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13456 SDValue NextOp = N->getOperand(i);
13457 if (NextOp.getOpcode() != PPCISD::MFVSR)
13458 return SDValue();
13459 unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13460 if (NextConversion != FirstConversion)
13461 return SDValue();
13462 // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13463 // This is not valid if the input was originally double precision. It is
13464 // also not profitable to do unless this is an extending load in which
13465 // case doing this combine will allow us to combine consecutive loads.
13466 if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13467 return SDValue();
13468 if (N->getOperand(i) != FirstInput)
13469 IsSplat = false;
13470 }
13471
13472 // If this is a splat, we leave it as-is since there will be only a single
13473 // fp-to-int conversion followed by a splat of the integer. This is better
13474 // for 32-bit and smaller ints and neutral for 64-bit ints.
13475 if (IsSplat)
13476 return SDValue();
13477
13478 // Now that we know we have the right type of node, get its operands
13479 for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13480 SDValue In = N->getOperand(i).getOperand(0);
13481 if (Is32Bit) {
13482 // For 32-bit values, we need to add an FP_ROUND node (if we made it
13483 // here, we know that all inputs are extending loads so this is safe).
13484 if (In.isUndef())
13485 Ops.push_back(DAG.getUNDEF(SrcVT));
13486 else {
13487 SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13488 MVT::f32, In.getOperand(0),
13489 DAG.getIntPtrConstant(1, dl));
13490 Ops.push_back(Trunc);
13491 }
13492 } else
13493 Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13494 }
13495
13496 unsigned Opcode;
13497 if (FirstConversion == PPCISD::FCTIDZ ||
13498 FirstConversion == PPCISD::FCTIWZ)
13499 Opcode = ISD::FP_TO_SINT;
13500 else
13501 Opcode = ISD::FP_TO_UINT;
13502
13503 EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13504 SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13505 return DAG.getNode(Opcode, dl, TargetVT, BV);
13506 }
13507 return SDValue();
13508}
13509
13510/// Reduce the number of loads when building a vector.
13511///
13512/// Building a vector out of multiple loads can be converted to a load
13513/// of the vector type if the loads are consecutive. If the loads are
13514/// consecutive but in descending order, a shuffle is added at the end
13515/// to reorder the vector.
13516static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13517 assert(N->getOpcode() == ISD::BUILD_VECTOR &&((N->getOpcode() == ISD::BUILD_VECTOR && "Should be called with a BUILD_VECTOR node"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"Should be called with a BUILD_VECTOR node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13518, __PRETTY_FUNCTION__))
13518 "Should be called with a BUILD_VECTOR node")((N->getOpcode() == ISD::BUILD_VECTOR && "Should be called with a BUILD_VECTOR node"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"Should be called with a BUILD_VECTOR node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13518, __PRETTY_FUNCTION__))
;
13519
13520 SDLoc dl(N);
13521
13522 // Return early for non byte-sized type, as they can't be consecutive.
13523 if (!N->getValueType(0).getVectorElementType().isByteSized())
13524 return SDValue();
13525
13526 bool InputsAreConsecutiveLoads = true;
13527 bool InputsAreReverseConsecutive = true;
13528 unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13529 SDValue FirstInput = N->getOperand(0);
13530 bool IsRoundOfExtLoad = false;
13531
13532 if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13533 FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13534 LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13535 IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13536 }
13537 // Not a build vector of (possibly fp_rounded) loads.
13538 if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13539 N->getNumOperands() == 1)
13540 return SDValue();
13541
13542 for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13543 // If any inputs are fp_round(extload), they all must be.
13544 if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13545 return SDValue();
13546
13547 SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13548 N->getOperand(i);
13549 if (NextInput.getOpcode() != ISD::LOAD)
13550 return SDValue();
13551
13552 SDValue PreviousInput =
13553 IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13554 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13555 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13556
13557 // If any inputs are fp_round(extload), they all must be.
13558 if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13559 return SDValue();
13560
13561 if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13562 InputsAreConsecutiveLoads = false;
13563 if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13564 InputsAreReverseConsecutive = false;
13565
13566 // Exit early if the loads are neither consecutive nor reverse consecutive.
13567 if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13568 return SDValue();
13569 }
13570
13571 assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&((!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive
) && "The loads cannot be both consecutive and reverse consecutive."
) ? static_cast<void> (0) : __assert_fail ("!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && \"The loads cannot be both consecutive and reverse consecutive.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13572, __PRETTY_FUNCTION__))
13572 "The loads cannot be both consecutive and reverse consecutive.")((!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive
) && "The loads cannot be both consecutive and reverse consecutive."
) ? static_cast<void> (0) : __assert_fail ("!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) && \"The loads cannot be both consecutive and reverse consecutive.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13572, __PRETTY_FUNCTION__))
;
13573
13574 SDValue FirstLoadOp =
13575 IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13576 SDValue LastLoadOp =
13577 IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13578 N->getOperand(N->getNumOperands()-1);
13579
13580 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13581 LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13582 if (InputsAreConsecutiveLoads) {
13583 assert(LD1 && "Input needs to be a LoadSDNode.")((LD1 && "Input needs to be a LoadSDNode.") ? static_cast
<void> (0) : __assert_fail ("LD1 && \"Input needs to be a LoadSDNode.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13583, __PRETTY_FUNCTION__))
;
13584 return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13585 LD1->getBasePtr(), LD1->getPointerInfo(),
13586 LD1->getAlignment());
13587 }
13588 if (InputsAreReverseConsecutive) {
13589 assert(LDL && "Input needs to be a LoadSDNode.")((LDL && "Input needs to be a LoadSDNode.") ? static_cast
<void> (0) : __assert_fail ("LDL && \"Input needs to be a LoadSDNode.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13589, __PRETTY_FUNCTION__))
;
13590 SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13591 LDL->getBasePtr(), LDL->getPointerInfo(),
13592 LDL->getAlignment());
13593 SmallVector<int, 16> Ops;
13594 for (int i = N->getNumOperands() - 1; i >= 0; i--)
13595 Ops.push_back(i);
13596
13597 return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13598 DAG.getUNDEF(N->getValueType(0)), Ops);
13599 }
13600 return SDValue();
13601}
13602
13603// This function adds the required vector_shuffle needed to get
13604// the elements of the vector extract in the correct position
13605// as specified by the CorrectElems encoding.
13606static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13607 SDValue Input, uint64_t Elems,
13608 uint64_t CorrectElems) {
13609 SDLoc dl(N);
13610
13611 unsigned NumElems = Input.getValueType().getVectorNumElements();
13612 SmallVector<int, 16> ShuffleMask(NumElems, -1);
13613
13614 // Knowing the element indices being extracted from the original
13615 // vector and the order in which they're being inserted, just put
13616 // them at element indices required for the instruction.
13617 for (unsigned i = 0; i < N->getNumOperands(); i++) {
13618 if (DAG.getDataLayout().isLittleEndian())
13619 ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13620 else
13621 ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13622 CorrectElems = CorrectElems >> 8;
13623 Elems = Elems >> 8;
13624 }
13625
13626 SDValue Shuffle =
13627 DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13628 DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13629
13630 EVT VT = N->getValueType(0);
13631 SDValue Conv = DAG.getBitcast(VT, Shuffle);
13632
13633 EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13634 Input.getValueType().getVectorElementType(),
13635 VT.getVectorNumElements());
13636 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13637 DAG.getValueType(ExtVT));
13638}
13639
13640// Look for build vector patterns where input operands come from sign
13641// extended vector_extract elements of specific indices. If the correct indices
13642// aren't used, add a vector shuffle to fix up the indices and create
13643// SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13644// during instruction selection.
13645static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13646 // This array encodes the indices that the vector sign extend instructions
13647 // extract from when extending from one type to another for both BE and LE.
13648 // The right nibble of each byte corresponds to the LE incides.
13649 // and the left nibble of each byte corresponds to the BE incides.
13650 // For example: 0x3074B8FC byte->word
13651 // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13652 // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13653 // For example: 0x000070F8 byte->double word
13654 // For LE: the allowed indices are: 0x0,0x8
13655 // For BE: the allowed indices are: 0x7,0xF
13656 uint64_t TargetElems[] = {
13657 0x3074B8FC, // b->w
13658 0x000070F8, // b->d
13659 0x10325476, // h->w
13660 0x00003074, // h->d
13661 0x00001032, // w->d
13662 };
13663
13664 uint64_t Elems = 0;
13665 int Index;
13666 SDValue Input;
13667
13668 auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13669 if (!Op)
13670 return false;
13671 if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13672 Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13673 return false;
13674
13675 // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13676 // of the right width.
13677 SDValue Extract = Op.getOperand(0);
13678 if (Extract.getOpcode() == ISD::ANY_EXTEND)
13679 Extract = Extract.getOperand(0);
13680 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13681 return false;
13682
13683 ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13684 if (!ExtOp)
13685 return false;
13686
13687 Index = ExtOp->getZExtValue();
13688 if (Input && Input != Extract.getOperand(0))
13689 return false;
13690
13691 if (!Input)
13692 Input = Extract.getOperand(0);
13693
13694 Elems = Elems << 8;
13695 Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13696 Elems |= Index;
13697
13698 return true;
13699 };
13700
13701 // If the build vector operands aren't sign extended vector extracts,
13702 // of the same input vector, then return.
13703 for (unsigned i = 0; i < N->getNumOperands(); i++) {
13704 if (!isSExtOfVecExtract(N->getOperand(i))) {
13705 return SDValue();
13706 }
13707 }
13708
13709 // If the vector extract indicies are not correct, add the appropriate
13710 // vector_shuffle.
13711 int TgtElemArrayIdx;
13712 int InputSize = Input.getValueType().getScalarSizeInBits();
13713 int OutputSize = N->getValueType(0).getScalarSizeInBits();
13714 if (InputSize + OutputSize == 40)
13715 TgtElemArrayIdx = 0;
13716 else if (InputSize + OutputSize == 72)
13717 TgtElemArrayIdx = 1;
13718 else if (InputSize + OutputSize == 48)
13719 TgtElemArrayIdx = 2;
13720 else if (InputSize + OutputSize == 80)
13721 TgtElemArrayIdx = 3;
13722 else if (InputSize + OutputSize == 96)
13723 TgtElemArrayIdx = 4;
13724 else
13725 return SDValue();
13726
13727 uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13728 CorrectElems = DAG.getDataLayout().isLittleEndian()
13729 ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13730 : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13731 if (Elems != CorrectElems) {
13732 return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13733 }
13734
13735 // Regular lowering will catch cases where a shuffle is not needed.
13736 return SDValue();
13737}
13738
13739// Look for the pattern of a load from a narrow width to i128, feeding
13740// into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
13741// (LXVRZX). This node represents a zero extending load that will be matched
13742// to the Load VSX Vector Rightmost instructions.
13743static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
13744 SDLoc DL(N);
13745
13746 // This combine is only eligible for a BUILD_VECTOR of v1i128.
13747 if (N->getValueType(0) != MVT::v1i128)
13748 return SDValue();
13749
13750 SDValue Operand = N->getOperand(0);
13751 // Proceed with the transformation if the operand to the BUILD_VECTOR
13752 // is a load instruction.
13753 if (Operand.getOpcode() != ISD::LOAD)
13754 return SDValue();
13755
13756 LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand);
13757 EVT MemoryType = LD->getMemoryVT();
13758
13759 // This transformation is only valid if the we are loading either a byte,
13760 // halfword, word, or doubleword.
13761 bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
13762 MemoryType == MVT::i32 || MemoryType == MVT::i64;
13763
13764 // Ensure that the load from the narrow width is being zero extended to i128.
13765 if (!ValidLDType ||
13766 (LD->getExtensionType() != ISD::ZEXTLOAD &&
13767 LD->getExtensionType() != ISD::EXTLOAD))
13768 return SDValue();
13769
13770 SDValue LoadOps[] = {
13771 LD->getChain(), LD->getBasePtr(),
13772 DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
13773
13774 return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
13775 DAG.getVTList(MVT::v1i128, MVT::Other),
13776 LoadOps, MemoryType, LD->getMemOperand());
13777}
13778
13779SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13780 DAGCombinerInfo &DCI) const {
13781 assert(N->getOpcode() == ISD::BUILD_VECTOR &&((N->getOpcode() == ISD::BUILD_VECTOR && "Should be called with a BUILD_VECTOR node"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"Should be called with a BUILD_VECTOR node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13782, __PRETTY_FUNCTION__))
13782 "Should be called with a BUILD_VECTOR node")((N->getOpcode() == ISD::BUILD_VECTOR && "Should be called with a BUILD_VECTOR node"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::BUILD_VECTOR && \"Should be called with a BUILD_VECTOR node\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13782, __PRETTY_FUNCTION__))
;
13783
13784 SelectionDAG &DAG = DCI.DAG;
13785 SDLoc dl(N);
13786
13787 if (!Subtarget.hasVSX())
13788 return SDValue();
13789
13790 // The target independent DAG combiner will leave a build_vector of
13791 // float-to-int conversions intact. We can generate MUCH better code for
13792 // a float-to-int conversion of a vector of floats.
13793 SDValue FirstInput = N->getOperand(0);
13794 if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13795 SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13796 if (Reduced)
13797 return Reduced;
13798 }
13799
13800 // If we're building a vector out of consecutive loads, just load that
13801 // vector type.
13802 SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13803 if (Reduced)
13804 return Reduced;
13805
13806 // If we're building a vector out of extended elements from another vector
13807 // we have P9 vector integer extend instructions. The code assumes legal
13808 // input types (i.e. it can't handle things like v4i16) so do not run before
13809 // legalization.
13810 if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13811 Reduced = combineBVOfVecSExt(N, DAG);
13812 if (Reduced)
13813 return Reduced;
13814 }
13815
13816 // On Power10, the Load VSX Vector Rightmost instructions can be utilized
13817 // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
13818 // is a load from <valid narrow width> to i128.
13819 if (Subtarget.isISA3_1()) {
13820 SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
13821 if (BVOfZLoad)
13822 return BVOfZLoad;
13823 }
13824
13825 if (N->getValueType(0) != MVT::v2f64)
13826 return SDValue();
13827
13828 // Looking for:
13829 // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
13830 if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
13831 FirstInput.getOpcode() != ISD::UINT_TO_FP)
13832 return SDValue();
13833 if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
13834 N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
13835 return SDValue();
13836 if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
13837 return SDValue();
13838
13839 SDValue Ext1 = FirstInput.getOperand(0);
13840 SDValue Ext2 = N->getOperand(1).getOperand(0);
13841 if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
13842 Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13843 return SDValue();
13844
13845 ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
13846 ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
13847 if (!Ext1Op || !Ext2Op)
13848 return SDValue();
13849 if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
13850 Ext1.getOperand(0) != Ext2.getOperand(0))
13851 return SDValue();
13852
13853 int FirstElem = Ext1Op->getZExtValue();
13854 int SecondElem = Ext2Op->getZExtValue();
13855 int SubvecIdx;
13856 if (FirstElem == 0 && SecondElem == 1)
13857 SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
13858 else if (FirstElem == 2 && SecondElem == 3)
13859 SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
13860 else
13861 return SDValue();
13862
13863 SDValue SrcVec = Ext1.getOperand(0);
13864 auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
13865 PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
13866 return DAG.getNode(NodeType, dl, MVT::v2f64,
13867 SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
13868}
13869
13870SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
13871 DAGCombinerInfo &DCI) const {
13872 assert((N->getOpcode() == ISD::SINT_TO_FP ||(((N->getOpcode() == ISD::SINT_TO_FP || N->getOpcode() ==
ISD::UINT_TO_FP) && "Need an int -> FP conversion node here"
) ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::SINT_TO_FP || N->getOpcode() == ISD::UINT_TO_FP) && \"Need an int -> FP conversion node here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13874, __PRETTY_FUNCTION__))
13873 N->getOpcode() == ISD::UINT_TO_FP) &&(((N->getOpcode() == ISD::SINT_TO_FP || N->getOpcode() ==
ISD::UINT_TO_FP) && "Need an int -> FP conversion node here"
) ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::SINT_TO_FP || N->getOpcode() == ISD::UINT_TO_FP) && \"Need an int -> FP conversion node here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13874, __PRETTY_FUNCTION__))
13874 "Need an int -> FP conversion node here")(((N->getOpcode() == ISD::SINT_TO_FP || N->getOpcode() ==
ISD::UINT_TO_FP) && "Need an int -> FP conversion node here"
) ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::SINT_TO_FP || N->getOpcode() == ISD::UINT_TO_FP) && \"Need an int -> FP conversion node here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13874, __PRETTY_FUNCTION__))
;
13875
13876 if (useSoftFloat() || !Subtarget.has64BitSupport())
13877 return SDValue();
13878
13879 SelectionDAG &DAG = DCI.DAG;
13880 SDLoc dl(N);
13881 SDValue Op(N, 0);
13882
13883 // Don't handle ppc_fp128 here or conversions that are out-of-range capable
13884 // from the hardware.
13885 if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
13886 return SDValue();
13887 if (!Op.getOperand(0).getValueType().isSimple())
13888 return SDValue();
13889 if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
13890 Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
13891 return SDValue();
13892
13893 SDValue FirstOperand(Op.getOperand(0));
13894 bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
13895 (FirstOperand.getValueType() == MVT::i8 ||
13896 FirstOperand.getValueType() == MVT::i16);
13897 if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
13898 bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
13899 bool DstDouble = Op.getValueType() == MVT::f64;
13900 unsigned ConvOp = Signed ?
13901 (DstDouble ? PPCISD::FCFID : PPCISD::FCFIDS) :
13902 (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
13903 SDValue WidthConst =
13904 DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
13905 dl, false);
13906 LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
13907 SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
13908 SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
13909 DAG.getVTList(MVT::f64, MVT::Other),
13910 Ops, MVT::i8, LDN->getMemOperand());
13911
13912 // For signed conversion, we need to sign-extend the value in the VSR
13913 if (Signed) {
13914 SDValue ExtOps[] = { Ld, WidthConst };
13915 SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
13916 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
13917 } else
13918 return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
13919 }
13920
13921
13922 // For i32 intermediate values, unfortunately, the conversion functions
13923 // leave the upper 32 bits of the value are undefined. Within the set of
13924 // scalar instructions, we have no method for zero- or sign-extending the
13925 // value. Thus, we cannot handle i32 intermediate values here.
13926 if (Op.getOperand(0).getValueType() == MVT::i32)
13927 return SDValue();
13928
13929 assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&(((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT())
&& "UINT_TO_FP is supported only with FPCVT") ? static_cast
<void> (0) : __assert_fail ("(Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && \"UINT_TO_FP is supported only with FPCVT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13930, __PRETTY_FUNCTION__))
13930 "UINT_TO_FP is supported only with FPCVT")(((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT())
&& "UINT_TO_FP is supported only with FPCVT") ? static_cast
<void> (0) : __assert_fail ("(Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) && \"UINT_TO_FP is supported only with FPCVT\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13930, __PRETTY_FUNCTION__))
;
13931
13932 // If we have FCFIDS, then use it when converting to single-precision.
13933 // Otherwise, convert to double-precision and then round.
13934 unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13935 ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
13936 : PPCISD::FCFIDS)
13937 : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
13938 : PPCISD::FCFID);
13939 MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
13940 ? MVT::f32
13941 : MVT::f64;
13942
13943 // If we're converting from a float, to an int, and back to a float again,
13944 // then we don't need the store/load pair at all.
13945 if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
13946 Subtarget.hasFPCVT()) ||
13947 (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
13948 SDValue Src = Op.getOperand(0).getOperand(0);
13949 if (Src.getValueType() == MVT::f32) {
13950 Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
13951 DCI.AddToWorklist(Src.getNode());
13952 } else if (Src.getValueType() != MVT::f64) {
13953 // Make sure that we don't pick up a ppc_fp128 source value.
13954 return SDValue();
13955 }
13956
13957 unsigned FCTOp =
13958 Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
13959 PPCISD::FCTIDUZ;
13960
13961 SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
13962 SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
13963
13964 if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
13965 FP = DAG.getNode(ISD::FP_ROUND, dl,
13966 MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
13967 DCI.AddToWorklist(FP.getNode());
13968 }
13969
13970 return FP;
13971 }
13972
13973 return SDValue();
13974}
13975
13976// expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
13977// builtins) into loads with swaps.
13978SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
13979 DAGCombinerInfo &DCI) const {
13980 SelectionDAG &DAG = DCI.DAG;
13981 SDLoc dl(N);
13982 SDValue Chain;
13983 SDValue Base;
13984 MachineMemOperand *MMO;
13985
13986 switch (N->getOpcode()) {
13987 default:
13988 llvm_unreachable("Unexpected opcode for little endian VSX load")::llvm::llvm_unreachable_internal("Unexpected opcode for little endian VSX load"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 13988)
;
13989 case ISD::LOAD: {
13990 LoadSDNode *LD = cast<LoadSDNode>(N);
13991 Chain = LD->getChain();
13992 Base = LD->getBasePtr();
13993 MMO = LD->getMemOperand();
13994 // If the MMO suggests this isn't a load of a full vector, leave
13995 // things alone. For a built-in, we have to make the change for
13996 // correctness, so if there is a size problem that will be a bug.
13997 if (MMO->getSize() < 16)
13998 return SDValue();
13999 break;
14000 }
14001 case ISD::INTRINSIC_W_CHAIN: {
14002 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14003 Chain = Intrin->getChain();
14004 // Similarly to the store case below, Intrin->getBasePtr() doesn't get
14005 // us what we want. Get operand 2 instead.
14006 Base = Intrin->getOperand(2);
14007 MMO = Intrin->getMemOperand();
14008 break;
14009 }
14010 }
14011
14012 MVT VecTy = N->getValueType(0).getSimpleVT();
14013
14014 // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
14015 // aligned and the type is a vector with elements up to 4 bytes
14016 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14017 VecTy.getScalarSizeInBits() <= 32) {
14018 return SDValue();
14019 }
14020
14021 SDValue LoadOps[] = { Chain, Base };
14022 SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
14023 DAG.getVTList(MVT::v2f64, MVT::Other),
14024 LoadOps, MVT::v2f64, MMO);
14025
14026 DCI.AddToWorklist(Load.getNode());
14027 Chain = Load.getValue(1);
14028 SDValue Swap = DAG.getNode(
14029 PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14030 DCI.AddToWorklist(Swap.getNode());
14031
14032 // Add a bitcast if the resulting load type doesn't match v2f64.
14033 if (VecTy != MVT::v2f64) {
14034 SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14035 DCI.AddToWorklist(N.getNode());
14036 // Package {bitcast value, swap's chain} to match Load's shape.
14037 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14038 N, Swap.getValue(1));
14039 }
14040
14041 return Swap;
14042}
14043
14044// expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14045// builtins) into stores with swaps.
14046SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14047 DAGCombinerInfo &DCI) const {
14048 SelectionDAG &DAG = DCI.DAG;
14049 SDLoc dl(N);
14050 SDValue Chain;
14051 SDValue Base;
14052 unsigned SrcOpnd;
14053 MachineMemOperand *MMO;
14054
14055 switch (N->getOpcode()) {
14056 default:
14057 llvm_unreachable("Unexpected opcode for little endian VSX store")::llvm::llvm_unreachable_internal("Unexpected opcode for little endian VSX store"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14057)
;
14058 case ISD::STORE: {
14059 StoreSDNode *ST = cast<StoreSDNode>(N);
14060 Chain = ST->getChain();
14061 Base = ST->getBasePtr();
14062 MMO = ST->getMemOperand();
14063 SrcOpnd = 1;
14064 // If the MMO suggests this isn't a store of a full vector, leave
14065 // things alone. For a built-in, we have to make the change for
14066 // correctness, so if there is a size problem that will be a bug.
14067 if (MMO->getSize() < 16)
14068 return SDValue();
14069 break;
14070 }
14071 case ISD::INTRINSIC_VOID: {
14072 MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14073 Chain = Intrin->getChain();
14074 // Intrin->getBasePtr() oddly does not get what we want.
14075 Base = Intrin->getOperand(3);
14076 MMO = Intrin->getMemOperand();
14077 SrcOpnd = 2;
14078 break;
14079 }
14080 }
14081
14082 SDValue Src = N->getOperand(SrcOpnd);
14083 MVT VecTy = Src.getValueType().getSimpleVT();
14084
14085 // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14086 // aligned and the type is a vector with elements up to 4 bytes
14087 if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14088 VecTy.getScalarSizeInBits() <= 32) {
14089 return SDValue();
14090 }
14091
14092 // All stores are done as v2f64 and possible bit cast.
14093 if (VecTy != MVT::v2f64) {
14094 Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14095 DCI.AddToWorklist(Src.getNode());
14096 }
14097
14098 SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14099 DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14100 DCI.AddToWorklist(Swap.getNode());
14101 Chain = Swap.getValue(1);
14102 SDValue StoreOps[] = { Chain, Swap, Base };
14103 SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14104 DAG.getVTList(MVT::Other),
14105 StoreOps, VecTy, MMO);
14106 DCI.AddToWorklist(Store.getNode());
14107 return Store;
14108}
14109
14110// Handle DAG combine for STORE (FP_TO_INT F).
14111SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14112 DAGCombinerInfo &DCI) const {
14113
14114 SelectionDAG &DAG = DCI.DAG;
14115 SDLoc dl(N);
14116 unsigned Opcode = N->getOperand(1).getOpcode();
14117
14118 assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)(((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) &&
"Not a FP_TO_INT Instruction!") ? static_cast<void> (0
) : __assert_fail ("(Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) && \"Not a FP_TO_INT Instruction!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14119, __PRETTY_FUNCTION__))
14119 && "Not a FP_TO_INT Instruction!")(((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) &&
"Not a FP_TO_INT Instruction!") ? static_cast<void> (0
) : __assert_fail ("(Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) && \"Not a FP_TO_INT Instruction!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14119, __PRETTY_FUNCTION__))
;
14120
14121 SDValue Val = N->getOperand(1).getOperand(0);
14122 EVT Op1VT = N->getOperand(1).getValueType();
14123 EVT ResVT = Val.getValueType();
14124
14125 if (!isTypeLegal(ResVT))
14126 return SDValue();
14127
14128 // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14129 bool ValidTypeForStoreFltAsInt =
14130 (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14131 (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14132
14133 if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14134 cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14135 return SDValue();
14136
14137 // Extend f32 values to f64
14138 if (ResVT.getScalarSizeInBits() == 32) {
14139 Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14140 DCI.AddToWorklist(Val.getNode());
14141 }
14142
14143 // Set signed or unsigned conversion opcode.
14144 unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14145 PPCISD::FP_TO_SINT_IN_VSR :
14146 PPCISD::FP_TO_UINT_IN_VSR;
14147
14148 Val = DAG.getNode(ConvOpcode,
14149 dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14150 DCI.AddToWorklist(Val.getNode());
14151
14152 // Set number of bytes being converted.
14153 unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14154 SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14155 DAG.getIntPtrConstant(ByteSize, dl, false),
14156 DAG.getValueType(Op1VT) };
14157
14158 Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14159 DAG.getVTList(MVT::Other), Ops,
14160 cast<StoreSDNode>(N)->getMemoryVT(),
14161 cast<StoreSDNode>(N)->getMemOperand());
14162
14163 DCI.AddToWorklist(Val.getNode());
14164 return Val;
14165}
14166
14167static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14168 // Check that the source of the element keeps flipping
14169 // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14170 bool PrevElemFromFirstVec = Mask[0] < NumElts;
14171 for (int i = 1, e = Mask.size(); i < e; i++) {
14172 if (PrevElemFromFirstVec && Mask[i] < NumElts)
14173 return false;
14174 if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14175 return false;
14176 PrevElemFromFirstVec = !PrevElemFromFirstVec;
14177 }
14178 return true;
14179}
14180
14181static bool isSplatBV(SDValue Op) {
14182 if (Op.getOpcode() != ISD::BUILD_VECTOR)
14183 return false;
14184 SDValue FirstOp;
14185
14186 // Find first non-undef input.
14187 for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14188 FirstOp = Op.getOperand(i);
14189 if (!FirstOp.isUndef())
14190 break;
14191 }
14192
14193 // All inputs are undef or the same as the first non-undef input.
14194 for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14195 if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14196 return false;
14197 return true;
14198}
14199
14200static SDValue isScalarToVec(SDValue Op) {
14201 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14202 return Op;
14203 if (Op.getOpcode() != ISD::BITCAST)
14204 return SDValue();
14205 Op = Op.getOperand(0);
14206 if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14207 return Op;
14208 return SDValue();
14209}
14210
14211static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14212 int LHSMaxIdx, int RHSMinIdx,
14213 int RHSMaxIdx, int HalfVec) {
14214 for (int i = 0, e = ShuffV.size(); i < e; i++) {
14215 int Idx = ShuffV[i];
14216 if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14217 ShuffV[i] += HalfVec;
14218 }
14219}
14220
14221// Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14222// the original is:
14223// (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14224// In such a case, just change the shuffle mask to extract the element
14225// from the permuted index.
14226static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
14227 SDLoc dl(OrigSToV);
14228 EVT VT = OrigSToV.getValueType();
14229 assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&((OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR && "Expecting a SCALAR_TO_VECTOR here"
) ? static_cast<void> (0) : __assert_fail ("OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR && \"Expecting a SCALAR_TO_VECTOR here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14230, __PRETTY_FUNCTION__))
14230 "Expecting a SCALAR_TO_VECTOR here")((OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR && "Expecting a SCALAR_TO_VECTOR here"
) ? static_cast<void> (0) : __assert_fail ("OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR && \"Expecting a SCALAR_TO_VECTOR here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14230, __PRETTY_FUNCTION__))
;
14231 SDValue Input = OrigSToV.getOperand(0);
14232
14233 if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14234 ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14235 SDValue OrigVector = Input.getOperand(0);
14236
14237 // Can't handle non-const element indices or different vector types
14238 // for the input to the extract and the output of the scalar_to_vector.
14239 if (Idx && VT == OrigVector.getValueType()) {
14240 SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
14241 NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
14242 return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14243 }
14244 }
14245 return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14246 OrigSToV.getOperand(0));
14247}
14248
14249// On little endian subtargets, combine shuffles such as:
14250// vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14251// into:
14252// vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14253// because the latter can be matched to a single instruction merge.
14254// Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14255// to put the value into element zero. Adjust the shuffle mask so that the
14256// vector can remain in permuted form (to prevent a swap prior to a shuffle).
14257SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14258 SelectionDAG &DAG) const {
14259 SDValue LHS = SVN->getOperand(0);
14260 SDValue RHS = SVN->getOperand(1);
14261 auto Mask = SVN->getMask();
14262 int NumElts = LHS.getValueType().getVectorNumElements();
14263 SDValue Res(SVN, 0);
14264 SDLoc dl(SVN);
14265
14266 // None of these combines are useful on big endian systems since the ISA
14267 // already has a big endian bias.
14268 if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14269 return Res;
14270
14271 // If this is not a shuffle of a shuffle and the first element comes from
14272 // the second vector, canonicalize to the commuted form. This will make it
14273 // more likely to match one of the single instruction patterns.
14274 if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14275 RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14276 std::swap(LHS, RHS);
14277 Res = DAG.getCommutedVectorShuffle(*SVN);
14278 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14279 }
14280
14281 // Adjust the shuffle mask if either input vector comes from a
14282 // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14283 // form (to prevent the need for a swap).
14284 SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14285 SDValue SToVLHS = isScalarToVec(LHS);
14286 SDValue SToVRHS = isScalarToVec(RHS);
14287 if (SToVLHS || SToVRHS) {
14288 int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14289 : SToVRHS.getValueType().getVectorNumElements();
14290 int NumEltsOut = ShuffV.size();
14291
14292 // Initially assume that neither input is permuted. These will be adjusted
14293 // accordingly if either input is.
14294 int LHSMaxIdx = -1;
14295 int RHSMinIdx = -1;
14296 int RHSMaxIdx = -1;
14297 int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14298
14299 // Get the permuted scalar to vector nodes for the source(s) that come from
14300 // ISD::SCALAR_TO_VECTOR.
14301 if (SToVLHS) {
14302 // Set up the values for the shuffle vector fixup.
14303 LHSMaxIdx = NumEltsOut / NumEltsIn;
14304 SToVLHS = getSToVPermuted(SToVLHS, DAG);
14305 if (SToVLHS.getValueType() != LHS.getValueType())
14306 SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14307 LHS = SToVLHS;
14308 }
14309 if (SToVRHS) {
14310 RHSMinIdx = NumEltsOut;
14311 RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14312 SToVRHS = getSToVPermuted(SToVRHS, DAG);
14313 if (SToVRHS.getValueType() != RHS.getValueType())
14314 SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14315 RHS = SToVRHS;
14316 }
14317
14318 // Fix up the shuffle mask to reflect where the desired element actually is.
14319 // The minimum and maximum indices that correspond to element zero for both
14320 // the LHS and RHS are computed and will control which shuffle mask entries
14321 // are to be changed. For example, if the RHS is permuted, any shuffle mask
14322 // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
14323 // HalfVec to refer to the corresponding element in the permuted vector.
14324 fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14325 HalfVec);
14326 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14327
14328 // We may have simplified away the shuffle. We won't be able to do anything
14329 // further with it here.
14330 if (!isa<ShuffleVectorSDNode>(Res))
14331 return Res;
14332 Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14333 }
14334
14335 // The common case after we commuted the shuffle is that the RHS is a splat
14336 // and we have elements coming in from the splat at indices that are not
14337 // conducive to using a merge.
14338 // Example:
14339 // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14340 if (!isSplatBV(RHS))
14341 return Res;
14342
14343 // We are looking for a mask such that all even elements are from
14344 // one vector and all odd elements from the other.
14345 if (!isAlternatingShuffMask(Mask, NumElts))
14346 return Res;
14347
14348 // Adjust the mask so we are pulling in the same index from the splat
14349 // as the index from the interesting vector in consecutive elements.
14350 // Example (even elements from first vector):
14351 // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14352 if (Mask[0] < NumElts)
14353 for (int i = 1, e = Mask.size(); i < e; i += 2)
14354 ShuffV[i] = (ShuffV[i - 1] + NumElts);
14355 // Example (odd elements from first vector):
14356 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14357 else
14358 for (int i = 0, e = Mask.size(); i < e; i += 2)
14359 ShuffV[i] = (ShuffV[i + 1] + NumElts);
14360
14361 // If the RHS has undefs, we need to remove them since we may have created
14362 // a shuffle that adds those instead of the splat value.
14363 SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
14364 RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
14365
14366 Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14367 return Res;
14368}
14369
14370SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14371 LSBaseSDNode *LSBase,
14372 DAGCombinerInfo &DCI) const {
14373 assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&(((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
"Not a reverse memop pattern!") ? static_cast<void> (0
) : __assert_fail ("(ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) && \"Not a reverse memop pattern!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14374, __PRETTY_FUNCTION__))
14374 "Not a reverse memop pattern!")(((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
"Not a reverse memop pattern!") ? static_cast<void> (0
) : __assert_fail ("(ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) && \"Not a reverse memop pattern!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14374, __PRETTY_FUNCTION__))
;
14375
14376 auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14377 auto Mask = SVN->getMask();
14378 int i = 0;
14379 auto I = Mask.rbegin();
14380 auto E = Mask.rend();
14381
14382 for (; I != E; ++I) {
14383 if (*I != i)
14384 return false;
14385 i++;
14386 }
14387 return true;
14388 };
14389
14390 SelectionDAG &DAG = DCI.DAG;
14391 EVT VT = SVN->getValueType(0);
14392
14393 if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14394 return SDValue();
14395
14396 // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14397 // See comment in PPCVSXSwapRemoval.cpp.
14398 // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14399 if (!Subtarget.hasP9Vector())
14400 return SDValue();
14401
14402 if(!IsElementReverse(SVN))
14403 return SDValue();
14404
14405 if (LSBase->getOpcode() == ISD::LOAD) {
14406 // If the load has more than one user except the shufflevector instruction,
14407 // it is not profitable to replace the shufflevector with a reverse load.
14408 if (!LSBase->hasOneUse())
14409 return SDValue();
14410
14411 SDLoc dl(SVN);
14412 SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14413 return DAG.getMemIntrinsicNode(
14414 PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14415 LSBase->getMemoryVT(), LSBase->getMemOperand());
14416 }
14417
14418 if (LSBase->getOpcode() == ISD::STORE) {
14419 // If there are other uses of the shuffle, the swap cannot be avoided.
14420 // Forcing the use of an X-Form (since swapped stores only have
14421 // X-Forms) without removing the swap is unprofitable.
14422 if (!SVN->hasOneUse())
14423 return SDValue();
14424
14425 SDLoc dl(LSBase);
14426 SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14427 LSBase->getBasePtr()};
14428 return DAG.getMemIntrinsicNode(
14429 PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14430 LSBase->getMemoryVT(), LSBase->getMemOperand());
14431 }
14432
14433 llvm_unreachable("Expected a load or store node here")::llvm::llvm_unreachable_internal("Expected a load or store node here"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14433)
;
14434}
14435
14436SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14437 DAGCombinerInfo &DCI) const {
14438 SelectionDAG &DAG = DCI.DAG;
14439 SDLoc dl(N);
14440 switch (N->getOpcode()) {
14441 default: break;
14442 case ISD::ADD:
14443 return combineADD(N, DCI);
14444 case ISD::SHL:
14445 return combineSHL(N, DCI);
14446 case ISD::SRA:
14447 return combineSRA(N, DCI);
14448 case ISD::SRL:
14449 return combineSRL(N, DCI);
14450 case ISD::MUL:
14451 return combineMUL(N, DCI);
14452 case ISD::FMA:
14453 case PPCISD::FNMSUB:
14454 return combineFMALike(N, DCI);
14455 case PPCISD::SHL:
14456 if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14457 return N->getOperand(0);
14458 break;
14459 case PPCISD::SRL:
14460 if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14461 return N->getOperand(0);
14462 break;
14463 case PPCISD::SRA:
14464 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14465 if (C->isNullValue() || // 0 >>s V -> 0.
14466 C->isAllOnesValue()) // -1 >>s V -> -1.
14467 return N->getOperand(0);
14468 }
14469 break;
14470 case ISD::SIGN_EXTEND:
14471 case ISD::ZERO_EXTEND:
14472 case ISD::ANY_EXTEND:
14473 return DAGCombineExtBoolTrunc(N, DCI);
14474 case ISD::TRUNCATE:
14475 return combineTRUNCATE(N, DCI);
14476 case ISD::SETCC:
14477 if (SDValue CSCC = combineSetCC(N, DCI))
14478 return CSCC;
14479 LLVM_FALLTHROUGH[[gnu::fallthrough]];
14480 case ISD::SELECT_CC:
14481 return DAGCombineTruncBoolExt(N, DCI);
14482 case ISD::SINT_TO_FP:
14483 case ISD::UINT_TO_FP:
14484 return combineFPToIntToFP(N, DCI);
14485 case ISD::VECTOR_SHUFFLE:
14486 if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14487 LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14488 return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14489 }
14490 return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14491 case ISD::STORE: {
14492
14493 EVT Op1VT = N->getOperand(1).getValueType();
14494 unsigned Opcode = N->getOperand(1).getOpcode();
14495
14496 if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14497 SDValue Val= combineStoreFPToInt(N, DCI);
14498 if (Val)
14499 return Val;
14500 }
14501
14502 if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14503 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14504 SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14505 if (Val)
14506 return Val;
14507 }
14508
14509 // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14510 if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14511 N->getOperand(1).getNode()->hasOneUse() &&
14512 (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14513 (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14514
14515 // STBRX can only handle simple types and it makes no sense to store less
14516 // two bytes in byte-reversed order.
14517 EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14518 if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14519 break;
14520
14521 SDValue BSwapOp = N->getOperand(1).getOperand(0);
14522 // Do an any-extend to 32-bits if this is a half-word input.
14523 if (BSwapOp.getValueType() == MVT::i16)
14524 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14525
14526 // If the type of BSWAP operand is wider than stored memory width
14527 // it need to be shifted to the right side before STBRX.
14528 if (Op1VT.bitsGT(mVT)) {
14529 int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14530 BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14531 DAG.getConstant(Shift, dl, MVT::i32));
14532 // Need to truncate if this is a bswap of i64 stored as i32/i16.
14533 if (Op1VT == MVT::i64)
14534 BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14535 }
14536
14537 SDValue Ops[] = {
14538 N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14539 };
14540 return
14541 DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14542 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14543 cast<StoreSDNode>(N)->getMemOperand());
14544 }
14545
14546 // STORE Constant:i32<0> -> STORE<trunc to i32> Constant:i64<0>
14547 // So it can increase the chance of CSE constant construction.
14548 if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14549 isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14550 // Need to sign-extended to 64-bits to handle negative values.
14551 EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14552 uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14553 MemVT.getSizeInBits());
14554 SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14555
14556 // DAG.getTruncStore() can't be used here because it doesn't accept
14557 // the general (base + offset) addressing mode.
14558 // So we use UpdateNodeOperands and setTruncatingStore instead.
14559 DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14560 N->getOperand(3));
14561 cast<StoreSDNode>(N)->setTruncatingStore(true);
14562 return SDValue(N, 0);
14563 }
14564
14565 // For little endian, VSX stores require generating xxswapd/lxvd2x.
14566 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14567 if (Op1VT.isSimple()) {
14568 MVT StoreVT = Op1VT.getSimpleVT();
14569 if (Subtarget.needsSwapsForVSXMemOps() &&
14570 (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14571 StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14572 return expandVSXStoreForLE(N, DCI);
14573 }
14574 break;
14575 }
14576 case ISD::LOAD: {
14577 LoadSDNode *LD = cast<LoadSDNode>(N);
14578 EVT VT = LD->getValueType(0);
14579
14580 // For little endian, VSX loads require generating lxvd2x/xxswapd.
14581 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14582 if (VT.isSimple()) {
14583 MVT LoadVT = VT.getSimpleVT();
14584 if (Subtarget.needsSwapsForVSXMemOps() &&
14585 (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14586 LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14587 return expandVSXLoadForLE(N, DCI);
14588 }
14589
14590 // We sometimes end up with a 64-bit integer load, from which we extract
14591 // two single-precision floating-point numbers. This happens with
14592 // std::complex<float>, and other similar structures, because of the way we
14593 // canonicalize structure copies. However, if we lack direct moves,
14594 // then the final bitcasts from the extracted integer values to the
14595 // floating-point numbers turn into store/load pairs. Even with direct moves,
14596 // just loading the two floating-point numbers is likely better.
14597 auto ReplaceTwoFloatLoad = [&]() {
14598 if (VT != MVT::i64)
14599 return false;
14600
14601 if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14602 LD->isVolatile())
14603 return false;
14604
14605 // We're looking for a sequence like this:
14606 // t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14607 // t16: i64 = srl t13, Constant:i32<32>
14608 // t17: i32 = truncate t16
14609 // t18: f32 = bitcast t17
14610 // t19: i32 = truncate t13
14611 // t20: f32 = bitcast t19
14612
14613 if (!LD->hasNUsesOfValue(2, 0))
14614 return false;
14615
14616 auto UI = LD->use_begin();
14617 while (UI.getUse().getResNo() != 0) ++UI;
14618 SDNode *Trunc = *UI++;
14619 while (UI.getUse().getResNo() != 0) ++UI;
14620 SDNode *RightShift = *UI;
14621 if (Trunc->getOpcode() != ISD::TRUNCATE)
14622 std::swap(Trunc, RightShift);
14623
14624 if (Trunc->getOpcode() != ISD::TRUNCATE ||
14625 Trunc->getValueType(0) != MVT::i32 ||
14626 !Trunc->hasOneUse())
14627 return false;
14628 if (RightShift->getOpcode() != ISD::SRL ||
14629 !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14630 RightShift->getConstantOperandVal(1) != 32 ||
14631 !RightShift->hasOneUse())
14632 return false;
14633
14634 SDNode *Trunc2 = *RightShift->use_begin();
14635 if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14636 Trunc2->getValueType(0) != MVT::i32 ||
14637 !Trunc2->hasOneUse())
14638 return false;
14639
14640 SDNode *Bitcast = *Trunc->use_begin();
14641 SDNode *Bitcast2 = *Trunc2->use_begin();
14642
14643 if (Bitcast->getOpcode() != ISD::BITCAST ||
14644 Bitcast->getValueType(0) != MVT::f32)
14645 return false;
14646 if (Bitcast2->getOpcode() != ISD::BITCAST ||
14647 Bitcast2->getValueType(0) != MVT::f32)
14648 return false;
14649
14650 if (Subtarget.isLittleEndian())
14651 std::swap(Bitcast, Bitcast2);
14652
14653 // Bitcast has the second float (in memory-layout order) and Bitcast2
14654 // has the first one.
14655
14656 SDValue BasePtr = LD->getBasePtr();
14657 if (LD->isIndexed()) {
14658 assert(LD->getAddressingMode() == ISD::PRE_INC &&((LD->getAddressingMode() == ISD::PRE_INC && "Non-pre-inc AM on PPC?"
) ? static_cast<void> (0) : __assert_fail ("LD->getAddressingMode() == ISD::PRE_INC && \"Non-pre-inc AM on PPC?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14659, __PRETTY_FUNCTION__))
14659 "Non-pre-inc AM on PPC?")((LD->getAddressingMode() == ISD::PRE_INC && "Non-pre-inc AM on PPC?"
) ? static_cast<void> (0) : __assert_fail ("LD->getAddressingMode() == ISD::PRE_INC && \"Non-pre-inc AM on PPC?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 14659, __PRETTY_FUNCTION__))
;
14660 BasePtr =
14661 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14662 LD->getOffset());
14663 }
14664
14665 auto MMOFlags =
14666 LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14667 SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14668 LD->getPointerInfo(), LD->getAlignment(),
14669 MMOFlags, LD->getAAInfo());
14670 SDValue AddPtr =
14671 DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14672 BasePtr, DAG.getIntPtrConstant(4, dl));
14673 SDValue FloatLoad2 = DAG.getLoad(
14674 MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14675 LD->getPointerInfo().getWithOffset(4),
14676 MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14677
14678 if (LD->isIndexed()) {
14679 // Note that DAGCombine should re-form any pre-increment load(s) from
14680 // what is produced here if that makes sense.
14681 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14682 }
14683
14684 DCI.CombineTo(Bitcast2, FloatLoad);
14685 DCI.CombineTo(Bitcast, FloatLoad2);
14686
14687 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14688 SDValue(FloatLoad2.getNode(), 1));
14689 return true;
14690 };
14691
14692 if (ReplaceTwoFloatLoad())
14693 return SDValue(N, 0);
14694
14695 EVT MemVT = LD->getMemoryVT();
14696 Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
14697 Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
14698 if (LD->isUnindexed() && VT.isVector() &&
14699 ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
14700 // P8 and later hardware should just use LOAD.
14701 !Subtarget.hasP8Vector() &&
14702 (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
14703 VT == MVT::v4f32))) &&
14704 LD->getAlign() < ABIAlignment) {
14705 // This is a type-legal unaligned Altivec load.
14706 SDValue Chain = LD->getChain();
14707 SDValue Ptr = LD->getBasePtr();
14708 bool isLittleEndian = Subtarget.isLittleEndian();
14709
14710 // This implements the loading of unaligned vectors as described in
14711 // the venerable Apple Velocity Engine overview. Specifically:
14712 // https://developer.apple.com/hardwaredrivers/ve/alignment.html
14713 // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
14714 //
14715 // The general idea is to expand a sequence of one or more unaligned
14716 // loads into an alignment-based permutation-control instruction (lvsl
14717 // or lvsr), a series of regular vector loads (which always truncate
14718 // their input address to an aligned address), and a series of
14719 // permutations. The results of these permutations are the requested
14720 // loaded values. The trick is that the last "extra" load is not taken
14721 // from the address you might suspect (sizeof(vector) bytes after the
14722 // last requested load), but rather sizeof(vector) - 1 bytes after the
14723 // last requested vector. The point of this is to avoid a page fault if
14724 // the base address happened to be aligned. This works because if the
14725 // base address is aligned, then adding less than a full vector length
14726 // will cause the last vector in the sequence to be (re)loaded.
14727 // Otherwise, the next vector will be fetched as you might suspect was
14728 // necessary.
14729
14730 // We might be able to reuse the permutation generation from
14731 // a different base address offset from this one by an aligned amount.
14732 // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14733 // optimization later.
14734 Intrinsic::ID Intr, IntrLD, IntrPerm;
14735 MVT PermCntlTy, PermTy, LDTy;
14736 Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14737 : Intrinsic::ppc_altivec_lvsl;
14738 IntrLD = Intrinsic::ppc_altivec_lvx;
14739 IntrPerm = Intrinsic::ppc_altivec_vperm;
14740 PermCntlTy = MVT::v16i8;
14741 PermTy = MVT::v4i32;
14742 LDTy = MVT::v4i32;
14743
14744 SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14745
14746 // Create the new MMO for the new base load. It is like the original MMO,
14747 // but represents an area in memory almost twice the vector size centered
14748 // on the original address. If the address is unaligned, we might start
14749 // reading up to (sizeof(vector)-1) bytes below the address of the
14750 // original unaligned load.
14751 MachineFunction &MF = DAG.getMachineFunction();
14752 MachineMemOperand *BaseMMO =
14753 MF.getMachineMemOperand(LD->getMemOperand(),
14754 -(long)MemVT.getStoreSize()+1,
14755 2*MemVT.getStoreSize()-1);
14756
14757 // Create the new base load.
14758 SDValue LDXIntID =
14759 DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14760 SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14761 SDValue BaseLoad =
14762 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14763 DAG.getVTList(PermTy, MVT::Other),
14764 BaseLoadOps, LDTy, BaseMMO);
14765
14766 // Note that the value of IncOffset (which is provided to the next
14767 // load's pointer info offset value, and thus used to calculate the
14768 // alignment), and the value of IncValue (which is actually used to
14769 // increment the pointer value) are different! This is because we
14770 // require the next load to appear to be aligned, even though it
14771 // is actually offset from the base pointer by a lesser amount.
14772 int IncOffset = VT.getSizeInBits() / 8;
14773 int IncValue = IncOffset;
14774
14775 // Walk (both up and down) the chain looking for another load at the real
14776 // (aligned) offset (the alignment of the other load does not matter in
14777 // this case). If found, then do not use the offset reduction trick, as
14778 // that will prevent the loads from being later combined (as they would
14779 // otherwise be duplicates).
14780 if (!findConsecutiveLoad(LD, DAG))
14781 --IncValue;
14782
14783 SDValue Increment =
14784 DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
14785 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
14786
14787 MachineMemOperand *ExtraMMO =
14788 MF.getMachineMemOperand(LD->getMemOperand(),
14789 1, 2*MemVT.getStoreSize()-1);
14790 SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
14791 SDValue ExtraLoad =
14792 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14793 DAG.getVTList(PermTy, MVT::Other),
14794 ExtraLoadOps, LDTy, ExtraMMO);
14795
14796 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
14797 BaseLoad.getValue(1), ExtraLoad.getValue(1));
14798
14799 // Because vperm has a big-endian bias, we must reverse the order
14800 // of the input vectors and complement the permute control vector
14801 // when generating little endian code. We have already handled the
14802 // latter by using lvsr instead of lvsl, so just reverse BaseLoad
14803 // and ExtraLoad here.
14804 SDValue Perm;
14805 if (isLittleEndian)
14806 Perm = BuildIntrinsicOp(IntrPerm,
14807 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
14808 else
14809 Perm = BuildIntrinsicOp(IntrPerm,
14810 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
14811
14812 if (VT != PermTy)
14813 Perm = Subtarget.hasAltivec()
14814 ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
14815 : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
14816 DAG.getTargetConstant(1, dl, MVT::i64));
14817 // second argument is 1 because this rounding
14818 // is always exact.
14819
14820 // The output of the permutation is our loaded result, the TokenFactor is
14821 // our new chain.
14822 DCI.CombineTo(N, Perm, TF);
14823 return SDValue(N, 0);
14824 }
14825 }
14826 break;
14827 case ISD::INTRINSIC_WO_CHAIN: {
14828 bool isLittleEndian = Subtarget.isLittleEndian();
14829 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14830 Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14831 : Intrinsic::ppc_altivec_lvsl);
14832 if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
14833 SDValue Add = N->getOperand(1);
14834
14835 int Bits = 4 /* 16 byte alignment */;
14836
14837 if (DAG.MaskedValueIsZero(Add->getOperand(1),
14838 APInt::getAllOnesValue(Bits /* alignment */)
14839 .zext(Add.getScalarValueSizeInBits()))) {
14840 SDNode *BasePtr = Add->getOperand(0).getNode();
14841 for (SDNode::use_iterator UI = BasePtr->use_begin(),
14842 UE = BasePtr->use_end();
14843 UI != UE; ++UI) {
14844 if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14845 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
14846 IID) {
14847 // We've found another LVSL/LVSR, and this address is an aligned
14848 // multiple of that one. The results will be the same, so use the
14849 // one we've just found instead.
14850
14851 return SDValue(*UI, 0);
14852 }
14853 }
14854 }
14855
14856 if (isa<ConstantSDNode>(Add->getOperand(1))) {
14857 SDNode *BasePtr = Add->getOperand(0).getNode();
14858 for (SDNode::use_iterator UI = BasePtr->use_begin(),
14859 UE = BasePtr->use_end(); UI != UE; ++UI) {
14860 if (UI->getOpcode() == ISD::ADD &&
14861 isa<ConstantSDNode>(UI->getOperand(1)) &&
14862 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
14863 cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
14864 (1ULL << Bits) == 0) {
14865 SDNode *OtherAdd = *UI;
14866 for (SDNode::use_iterator VI = OtherAdd->use_begin(),
14867 VE = OtherAdd->use_end(); VI != VE; ++VI) {
14868 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
14869 cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
14870 return SDValue(*VI, 0);
14871 }
14872 }
14873 }
14874 }
14875 }
14876 }
14877
14878 // Combine vmaxsw/h/b(a, a's negation) to abs(a)
14879 // Expose the vabsduw/h/b opportunity for down stream
14880 if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
14881 (IID == Intrinsic::ppc_altivec_vmaxsw ||
14882 IID == Intrinsic::ppc_altivec_vmaxsh ||
14883 IID == Intrinsic::ppc_altivec_vmaxsb)) {
14884 SDValue V1 = N->getOperand(1);
14885 SDValue V2 = N->getOperand(2);
14886 if ((V1.getSimpleValueType() == MVT::v4i32 ||
14887 V1.getSimpleValueType() == MVT::v8i16 ||
14888 V1.getSimpleValueType() == MVT::v16i8) &&
14889 V1.getSimpleValueType() == V2.getSimpleValueType()) {
14890 // (0-a, a)
14891 if (V1.getOpcode() == ISD::SUB &&
14892 ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
14893 V1.getOperand(1) == V2) {
14894 return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
14895 }
14896 // (a, 0-a)
14897 if (V2.getOpcode() == ISD::SUB &&
14898 ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
14899 V2.getOperand(1) == V1) {
14900 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14901 }
14902 // (x-y, y-x)
14903 if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
14904 V1.getOperand(0) == V2.getOperand(1) &&
14905 V1.getOperand(1) == V2.getOperand(0)) {
14906 return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
14907 }
14908 }
14909 }
14910 }
14911
14912 break;
14913 case ISD::INTRINSIC_W_CHAIN:
14914 // For little endian, VSX loads require generating lxvd2x/xxswapd.
14915 // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14916 if (Subtarget.needsSwapsForVSXMemOps()) {
14917 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14918 default:
14919 break;
14920 case Intrinsic::ppc_vsx_lxvw4x:
14921 case Intrinsic::ppc_vsx_lxvd2x:
14922 return expandVSXLoadForLE(N, DCI);
14923 }
14924 }
14925 break;
14926 case ISD::INTRINSIC_VOID:
14927 // For little endian, VSX stores require generating xxswapd/stxvd2x.
14928 // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14929 if (Subtarget.needsSwapsForVSXMemOps()) {
14930 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
14931 default:
14932 break;
14933 case Intrinsic::ppc_vsx_stxvw4x:
14934 case Intrinsic::ppc_vsx_stxvd2x:
14935 return expandVSXStoreForLE(N, DCI);
14936 }
14937 }
14938 break;
14939 case ISD::BSWAP:
14940 // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
14941 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
14942 N->getOperand(0).hasOneUse() &&
14943 (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
14944 (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
14945 N->getValueType(0) == MVT::i64))) {
14946 SDValue Load = N->getOperand(0);
14947 LoadSDNode *LD = cast<LoadSDNode>(Load);
14948 // Create the byte-swapping load.
14949 SDValue Ops[] = {
14950 LD->getChain(), // Chain
14951 LD->getBasePtr(), // Ptr
14952 DAG.getValueType(N->getValueType(0)) // VT
14953 };
14954 SDValue BSLoad =
14955 DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
14956 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
14957 MVT::i64 : MVT::i32, MVT::Other),
14958 Ops, LD->getMemoryVT(), LD->getMemOperand());
14959
14960 // If this is an i16 load, insert the truncate.
14961 SDValue ResVal = BSLoad;
14962 if (N->getValueType(0) == MVT::i16)
14963 ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
14964
14965 // First, combine the bswap away. This makes the value produced by the
14966 // load dead.
14967 DCI.CombineTo(N, ResVal);
14968
14969 // Next, combine the load away, we give it a bogus result value but a real
14970 // chain result. The result value is dead because the bswap is dead.
14971 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
14972
14973 // Return N so it doesn't get rechecked!
14974 return SDValue(N, 0);
14975 }
14976 break;
14977 case PPCISD::VCMP:
14978 // If a VCMP_rec node already exists with exactly the same operands as this
14979 // node, use its result instead of this node (VCMP_rec computes both a CR6
14980 // and a normal output).
14981 //
14982 if (!N->getOperand(0).hasOneUse() &&
14983 !N->getOperand(1).hasOneUse() &&
14984 !N->getOperand(2).hasOneUse()) {
14985
14986 // Scan all of the users of the LHS, looking for VCMP_rec's that match.
14987 SDNode *VCMPrecNode = nullptr;
14988
14989 SDNode *LHSN = N->getOperand(0).getNode();
14990 for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
14991 UI != E; ++UI)
14992 if (UI->getOpcode() == PPCISD::VCMP_rec &&
14993 UI->getOperand(1) == N->getOperand(1) &&
14994 UI->getOperand(2) == N->getOperand(2) &&
14995 UI->getOperand(0) == N->getOperand(0)) {
14996 VCMPrecNode = *UI;
14997 break;
14998 }
14999
15000 // If there is no VCMP_rec node, or if the flag value has a single use,
15001 // don't transform this.
15002 if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1))
15003 break;
15004
15005 // Look at the (necessarily single) use of the flag value. If it has a
15006 // chain, this transformation is more complex. Note that multiple things
15007 // could use the value result, which we should ignore.
15008 SDNode *FlagUser = nullptr;
15009 for (SDNode::use_iterator UI = VCMPrecNode->use_begin();
15010 FlagUser == nullptr; ++UI) {
15011 assert(UI != VCMPrecNode->use_end() && "Didn't find user!")((UI != VCMPrecNode->use_end() && "Didn't find user!"
) ? static_cast<void> (0) : __assert_fail ("UI != VCMPrecNode->use_end() && \"Didn't find user!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15011, __PRETTY_FUNCTION__))
;
15012 SDNode *User = *UI;
15013 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
15014 if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) {
15015 FlagUser = User;
15016 break;
15017 }
15018 }
15019 }
15020
15021 // If the user is a MFOCRF instruction, we know this is safe.
15022 // Otherwise we give up for right now.
15023 if (FlagUser->getOpcode() == PPCISD::MFOCRF)
15024 return SDValue(VCMPrecNode, 0);
15025 }
15026 break;
15027 case ISD::BRCOND: {
15028 SDValue Cond = N->getOperand(1);
15029 SDValue Target = N->getOperand(2);
15030
15031 if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15032 cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
15033 Intrinsic::loop_decrement) {
15034
15035 // We now need to make the intrinsic dead (it cannot be instruction
15036 // selected).
15037 DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
15038 assert(Cond.getNode()->hasOneUse() &&((Cond.getNode()->hasOneUse() && "Counter decrement has more than one use"
) ? static_cast<void> (0) : __assert_fail ("Cond.getNode()->hasOneUse() && \"Counter decrement has more than one use\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15039, __PRETTY_FUNCTION__))
15039 "Counter decrement has more than one use")((Cond.getNode()->hasOneUse() && "Counter decrement has more than one use"
) ? static_cast<void> (0) : __assert_fail ("Cond.getNode()->hasOneUse() && \"Counter decrement has more than one use\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15039, __PRETTY_FUNCTION__))
;
15040
15041 return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15042 N->getOperand(0), Target);
15043 }
15044 }
15045 break;
15046 case ISD::BR_CC: {
15047 // If this is a branch on an altivec predicate comparison, lower this so
15048 // that we don't have to do a MFOCRF: instead, branch directly on CR6. This
15049 // lowering is done pre-legalize, because the legalizer lowers the predicate
15050 // compare down to code that is difficult to reassemble.
15051 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15052 SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15053
15054 // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15055 // value. If so, pass-through the AND to get to the intrinsic.
15056 if (LHS.getOpcode() == ISD::AND &&
15057 LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15058 cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15059 Intrinsic::loop_decrement &&
15060 isa<ConstantSDNode>(LHS.getOperand(1)) &&
15061 !isNullConstant(LHS.getOperand(1)))
15062 LHS = LHS.getOperand(0);
15063
15064 if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15065 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15066 Intrinsic::loop_decrement &&
15067 isa<ConstantSDNode>(RHS)) {
15068 assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&(((CC == ISD::SETEQ || CC == ISD::SETNE) && "Counter decrement comparison is not EQ or NE"
) ? static_cast<void> (0) : __assert_fail ("(CC == ISD::SETEQ || CC == ISD::SETNE) && \"Counter decrement comparison is not EQ or NE\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15069, __PRETTY_FUNCTION__))
15069 "Counter decrement comparison is not EQ or NE")(((CC == ISD::SETEQ || CC == ISD::SETNE) && "Counter decrement comparison is not EQ or NE"
) ? static_cast<void> (0) : __assert_fail ("(CC == ISD::SETEQ || CC == ISD::SETNE) && \"Counter decrement comparison is not EQ or NE\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15069, __PRETTY_FUNCTION__))
;
15070
15071 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15072 bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15073 (CC == ISD::SETNE && !Val);
15074
15075 // We now need to make the intrinsic dead (it cannot be instruction
15076 // selected).
15077 DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15078 assert(LHS.getNode()->hasOneUse() &&((LHS.getNode()->hasOneUse() && "Counter decrement has more than one use"
) ? static_cast<void> (0) : __assert_fail ("LHS.getNode()->hasOneUse() && \"Counter decrement has more than one use\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15079, __PRETTY_FUNCTION__))
15079 "Counter decrement has more than one use")((LHS.getNode()->hasOneUse() && "Counter decrement has more than one use"
) ? static_cast<void> (0) : __assert_fail ("LHS.getNode()->hasOneUse() && \"Counter decrement has more than one use\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15079, __PRETTY_FUNCTION__))
;
15080
15081 return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15082 N->getOperand(0), N->getOperand(4));
15083 }
15084
15085 int CompareOpc;
15086 bool isDot;
15087
15088 if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15089 isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15090 getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15091 assert(isDot && "Can't compare against a vector result!")((isDot && "Can't compare against a vector result!") ?
static_cast<void> (0) : __assert_fail ("isDot && \"Can't compare against a vector result!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15091, __PRETTY_FUNCTION__))
;
15092
15093 // If this is a comparison against something other than 0/1, then we know
15094 // that the condition is never/always true.
15095 unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15096 if (Val != 0 && Val != 1) {
15097 if (CC == ISD::SETEQ) // Cond never true, remove branch.
15098 return N->getOperand(0);
15099 // Always !=, turn it into an unconditional branch.
15100 return DAG.getNode(ISD::BR, dl, MVT::Other,
15101 N->getOperand(0), N->getOperand(4));
15102 }
15103
15104 bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15105
15106 // Create the PPCISD altivec 'dot' comparison node.
15107 SDValue Ops[] = {
15108 LHS.getOperand(2), // LHS of compare
15109 LHS.getOperand(3), // RHS of compare
15110 DAG.getConstant(CompareOpc, dl, MVT::i32)
15111 };
15112 EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15113 SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
15114
15115 // Unpack the result based on how the target uses it.
15116 PPC::Predicate CompOpc;
15117 switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15118 default: // Can't happen, don't crash on invalid number though.
15119 case 0: // Branch on the value of the EQ bit of CR6.
15120 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15121 break;
15122 case 1: // Branch on the inverted value of the EQ bit of CR6.
15123 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15124 break;
15125 case 2: // Branch on the value of the LT bit of CR6.
15126 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15127 break;
15128 case 3: // Branch on the inverted value of the LT bit of CR6.
15129 CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15130 break;
15131 }
15132
15133 return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15134 DAG.getConstant(CompOpc, dl, MVT::i32),
15135 DAG.getRegister(PPC::CR6, MVT::i32),
15136 N->getOperand(4), CompNode.getValue(1));
15137 }
15138 break;
15139 }
15140 case ISD::BUILD_VECTOR:
15141 return DAGCombineBuildVector(N, DCI);
15142 case ISD::ABS:
15143 return combineABS(N, DCI);
15144 case ISD::VSELECT:
15145 return combineVSelect(N, DCI);
15146 }
15147
15148 return SDValue();
15149}
15150
15151SDValue
15152PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15153 SelectionDAG &DAG,
15154 SmallVectorImpl<SDNode *> &Created) const {
15155 // fold (sdiv X, pow2)
15156 EVT VT = N->getValueType(0);
15157 if (VT == MVT::i64 && !Subtarget.isPPC64())
15158 return SDValue();
15159 if ((VT != MVT::i32 && VT != MVT::i64) ||
15160 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15161 return SDValue();
15162
15163 SDLoc DL(N);
15164 SDValue N0 = N->getOperand(0);
15165
15166 bool IsNegPow2 = (-Divisor).isPowerOf2();
15167 unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15168 SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15169
15170 SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15171 Created.push_back(Op.getNode());
15172
15173 if (IsNegPow2) {
15174 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15175 Created.push_back(Op.getNode());
15176 }
15177
15178 return Op;
15179}
15180
15181//===----------------------------------------------------------------------===//
15182// Inline Assembly Support
15183//===----------------------------------------------------------------------===//
15184
15185void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15186 KnownBits &Known,
15187 const APInt &DemandedElts,
15188 const SelectionDAG &DAG,
15189 unsigned Depth) const {
15190 Known.resetAll();
15191 switch (Op.getOpcode()) {
15192 default: break;
15193 case PPCISD::LBRX: {
15194 // lhbrx is known to have the top bits cleared out.
15195 if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15196 Known.Zero = 0xFFFF0000;
15197 break;
15198 }
15199 case ISD::INTRINSIC_WO_CHAIN: {
15200 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15201 default: break;
15202 case Intrinsic::ppc_altivec_vcmpbfp_p:
15203 case Intrinsic::ppc_altivec_vcmpeqfp_p:
15204 case Intrinsic::ppc_altivec_vcmpequb_p:
15205 case Intrinsic::ppc_altivec_vcmpequh_p:
15206 case Intrinsic::ppc_altivec_vcmpequw_p:
15207 case Intrinsic::ppc_altivec_vcmpequd_p:
15208 case Intrinsic::ppc_altivec_vcmpequq_p:
15209 case Intrinsic::ppc_altivec_vcmpgefp_p:
15210 case Intrinsic::ppc_altivec_vcmpgtfp_p:
15211 case Intrinsic::ppc_altivec_vcmpgtsb_p:
15212 case Intrinsic::ppc_altivec_vcmpgtsh_p:
15213 case Intrinsic::ppc_altivec_vcmpgtsw_p:
15214 case Intrinsic::ppc_altivec_vcmpgtsd_p:
15215 case Intrinsic::ppc_altivec_vcmpgtsq_p:
15216 case Intrinsic::ppc_altivec_vcmpgtub_p:
15217 case Intrinsic::ppc_altivec_vcmpgtuh_p:
15218 case Intrinsic::ppc_altivec_vcmpgtuw_p:
15219 case Intrinsic::ppc_altivec_vcmpgtud_p:
15220 case Intrinsic::ppc_altivec_vcmpgtuq_p:
15221 Known.Zero = ~1U; // All bits but the low one are known to be zero.
15222 break;
15223 }
15224 }
15225 }
15226}
15227
15228Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15229 switch (Subtarget.getCPUDirective()) {
15230 default: break;
15231 case PPC::DIR_970:
15232 case PPC::DIR_PWR4:
15233 case PPC::DIR_PWR5:
15234 case PPC::DIR_PWR5X:
15235 case PPC::DIR_PWR6:
15236 case PPC::DIR_PWR6X:
15237 case PPC::DIR_PWR7:
15238 case PPC::DIR_PWR8:
15239 case PPC::DIR_PWR9:
15240 case PPC::DIR_PWR10:
15241 case PPC::DIR_PWR_FUTURE: {
15242 if (!ML)
15243 break;
15244
15245 if (!DisableInnermostLoopAlign32) {
15246 // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15247 // so that we can decrease cache misses and branch-prediction misses.
15248 // Actual alignment of the loop will depend on the hotness check and other
15249 // logic in alignBlocks.
15250 if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15251 return Align(32);
15252 }
15253
15254 const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15255
15256 // For small loops (between 5 and 8 instructions), align to a 32-byte
15257 // boundary so that the entire loop fits in one instruction-cache line.
15258 uint64_t LoopSize = 0;
15259 for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15260 for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15261 LoopSize += TII->getInstSizeInBytes(*J);
15262 if (LoopSize > 32)
15263 break;
15264 }
15265
15266 if (LoopSize > 16 && LoopSize <= 32)
15267 return Align(32);
15268
15269 break;
15270 }
15271 }
15272
15273 return TargetLowering::getPrefLoopAlignment(ML);
15274}
15275
15276/// getConstraintType - Given a constraint, return the type of
15277/// constraint it is for this target.
15278PPCTargetLowering::ConstraintType
15279PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15280 if (Constraint.size() == 1) {
15281 switch (Constraint[0]) {
15282 default: break;
15283 case 'b':
15284 case 'r':
15285 case 'f':
15286 case 'd':
15287 case 'v':
15288 case 'y':
15289 return C_RegisterClass;
15290 case 'Z':
15291 // FIXME: While Z does indicate a memory constraint, it specifically
15292 // indicates an r+r address (used in conjunction with the 'y' modifier
15293 // in the replacement string). Currently, we're forcing the base
15294 // register to be r0 in the asm printer (which is interpreted as zero)
15295 // and forming the complete address in the second register. This is
15296 // suboptimal.
15297 return C_Memory;
15298 }
15299 } else if (Constraint == "wc") { // individual CR bits.
15300 return C_RegisterClass;
15301 } else if (Constraint == "wa" || Constraint == "wd" ||
15302 Constraint == "wf" || Constraint == "ws" ||
15303 Constraint == "wi" || Constraint == "ww") {
15304 return C_RegisterClass; // VSX registers.
15305 }
15306 return TargetLowering::getConstraintType(Constraint);
15307}
15308
15309/// Examine constraint type and operand type and determine a weight value.
15310/// This object must already have been set up with the operand type
15311/// and the current alternative constraint selected.
15312TargetLowering::ConstraintWeight
15313PPCTargetLowering::getSingleConstraintMatchWeight(
15314 AsmOperandInfo &info, const char *constraint) const {
15315 ConstraintWeight weight = CW_Invalid;
15316 Value *CallOperandVal = info.CallOperandVal;
15317 // If we don't have a value, we can't do a match,
15318 // but allow it at the lowest weight.
15319 if (!CallOperandVal)
15320 return CW_Default;
15321 Type *type = CallOperandVal->getType();
15322
15323 // Look at the constraint type.
15324 if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15325 return CW_Register; // an individual CR bit.
15326 else if ((StringRef(constraint) == "wa" ||
15327 StringRef(constraint) == "wd" ||
15328 StringRef(constraint) == "wf") &&
15329 type->isVectorTy())
15330 return CW_Register;
15331 else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15332 return CW_Register; // just hold 64-bit integers data.
15333 else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15334 return CW_Register;
15335 else if (StringRef(constraint) == "ww" && type->isFloatTy())
15336 return CW_Register;
15337
15338 switch (*constraint) {
15339 default:
15340 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15341 break;
15342 case 'b':
15343 if (type->isIntegerTy())
15344 weight = CW_Register;
15345 break;
15346 case 'f':
15347 if (type->isFloatTy())
15348 weight = CW_Register;
15349 break;
15350 case 'd':
15351 if (type->isDoubleTy())
15352 weight = CW_Register;
15353 break;
15354 case 'v':
15355 if (type->isVectorTy())
15356 weight = CW_Register;
15357 break;
15358 case 'y':
15359 weight = CW_Register;
15360 break;
15361 case 'Z':
15362 weight = CW_Memory;
15363 break;
15364 }
15365 return weight;
15366}
15367
15368std::pair<unsigned, const TargetRegisterClass *>
15369PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15370 StringRef Constraint,
15371 MVT VT) const {
15372 if (Constraint.size() == 1) {
15373 // GCC RS6000 Constraint Letters
15374 switch (Constraint[0]) {
15375 case 'b': // R1-R31
15376 if (VT == MVT::i64 && Subtarget.isPPC64())
15377 return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15378 return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15379 case 'r': // R0-R31
15380 if (VT == MVT::i64 && Subtarget.isPPC64())
15381 return std::make_pair(0U, &PPC::G8RCRegClass);
15382 return std::make_pair(0U, &PPC::GPRCRegClass);
15383 // 'd' and 'f' constraints are both defined to be "the floating point
15384 // registers", where one is for 32-bit and the other for 64-bit. We don't
15385 // really care overly much here so just give them all the same reg classes.
15386 case 'd':
15387 case 'f':
15388 if (Subtarget.hasSPE()) {
15389 if (VT == MVT::f32 || VT == MVT::i32)
15390 return std::make_pair(0U, &PPC::GPRCRegClass);
15391 if (VT == MVT::f64 || VT == MVT::i64)
15392 return std::make_pair(0U, &PPC::SPERCRegClass);
15393 } else {
15394 if (VT == MVT::f32 || VT == MVT::i32)
15395 return std::make_pair(0U, &PPC::F4RCRegClass);
15396 if (VT == MVT::f64 || VT == MVT::i64)
15397 return std::make_pair(0U, &PPC::F8RCRegClass);
15398 }
15399 break;
15400 case 'v':
15401 if (Subtarget.hasAltivec())
15402 return std::make_pair(0U, &PPC::VRRCRegClass);
15403 break;
15404 case 'y': // crrc
15405 return std::make_pair(0U, &PPC::CRRCRegClass);
15406 }
15407 } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15408 // An individual CR bit.
15409 return std::make_pair(0U, &PPC::CRBITRCRegClass);
15410 } else if ((Constraint == "wa" || Constraint == "wd" ||
15411 Constraint == "wf" || Constraint == "wi") &&
15412 Subtarget.hasVSX()) {
15413 return std::make_pair(0U, &PPC::VSRCRegClass);
15414 } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15415 if (VT == MVT::f32 && Subtarget.hasP8Vector())
15416 return std::make_pair(0U, &PPC::VSSRCRegClass);
15417 else
15418 return std::make_pair(0U, &PPC::VSFRCRegClass);
15419 }
15420
15421 // Handle special cases of physical registers that are not properly handled
15422 // by the base class.
15423 if (Constraint[0] == '{' && Constraint[Constraint.size() - 1] == '}') {
15424 // If we name a VSX register, we can't defer to the base class because it
15425 // will not recognize the correct register (their names will be VSL{0-31}
15426 // and V{0-31} so they won't match). So we match them here.
15427 if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15428 int VSNum = atoi(Constraint.data() + 3);
15429 assert(VSNum >= 0 && VSNum <= 63 &&((VSNum >= 0 && VSNum <= 63 && "Attempted to access a vsr out of range"
) ? static_cast<void> (0) : __assert_fail ("VSNum >= 0 && VSNum <= 63 && \"Attempted to access a vsr out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15430, __PRETTY_FUNCTION__))
15430 "Attempted to access a vsr out of range")((VSNum >= 0 && VSNum <= 63 && "Attempted to access a vsr out of range"
) ? static_cast<void> (0) : __assert_fail ("VSNum >= 0 && VSNum <= 63 && \"Attempted to access a vsr out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15430, __PRETTY_FUNCTION__))
;
15431 if (VSNum < 32)
15432 return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15433 return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15434 }
15435
15436 // For float registers, we can't defer to the base class as it will match
15437 // the SPILLTOVSRRC class.
15438 if (Constraint.size() > 3 && Constraint[1] == 'f') {
15439 int RegNum = atoi(Constraint.data() + 2);
15440 if (RegNum > 31 || RegNum < 0)
15441 report_fatal_error("Invalid floating point register number");
15442 if (VT == MVT::f32 || VT == MVT::i32)
15443 return Subtarget.hasSPE()
15444 ? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass)
15445 : std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass);
15446 if (VT == MVT::f64 || VT == MVT::i64)
15447 return Subtarget.hasSPE()
15448 ? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass)
15449 : std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass);
15450 }
15451 }
15452
15453 std::pair<unsigned, const TargetRegisterClass *> R =
15454 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15455
15456 // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15457 // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15458 // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15459 // register.
15460 // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15461 // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15462 if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15463 PPC::GPRCRegClass.contains(R.first))
15464 return std::make_pair(TRI->getMatchingSuperReg(R.first,
15465 PPC::sub_32, &PPC::G8RCRegClass),
15466 &PPC::G8RCRegClass);
15467
15468 // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15469 if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15470 R.first = PPC::CR0;
15471 R.second = &PPC::CRRCRegClass;
15472 }
15473 // FIXME: This warning should ideally be emitted in the front end.
15474 const auto &TM = getTargetMachine();
15475 if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) {
15476 if (((R.first >= PPC::V20 && R.first <= PPC::V31) ||
15477 (R.first >= PPC::VF20 && R.first <= PPC::VF31)) &&
15478 (R.second == &PPC::VSRCRegClass || R.second == &PPC::VSFRCRegClass))
15479 errs() << "warning: vector registers 20 to 32 are reserved in the "
15480 "default AIX AltiVec ABI and cannot be used\n";
15481 }
15482
15483 return R;
15484}
15485
15486/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15487/// vector. If it is invalid, don't add anything to Ops.
15488void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15489 std::string &Constraint,
15490 std::vector<SDValue>&Ops,
15491 SelectionDAG &DAG) const {
15492 SDValue Result;
15493
15494 // Only support length 1 constraints.
15495 if (Constraint.length() > 1) return;
15496
15497 char Letter = Constraint[0];
15498 switch (Letter) {
15499 default: break;
15500 case 'I':
15501 case 'J':
15502 case 'K':
15503 case 'L':
15504 case 'M':
15505 case 'N':
15506 case 'O':
15507 case 'P': {
15508 ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15509 if (!CST) return; // Must be an immediate to match.
15510 SDLoc dl(Op);
15511 int64_t Value = CST->getSExtValue();
15512 EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15513 // numbers are printed as such.
15514 switch (Letter) {
15515 default: llvm_unreachable("Unknown constraint letter!")::llvm::llvm_unreachable_internal("Unknown constraint letter!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15515)
;
15516 case 'I': // "I" is a signed 16-bit constant.
15517 if (isInt<16>(Value))
15518 Result = DAG.getTargetConstant(Value, dl, TCVT);
15519 break;
15520 case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
15521 if (isShiftedUInt<16, 16>(Value))
15522 Result = DAG.getTargetConstant(Value, dl, TCVT);
15523 break;
15524 case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
15525 if (isShiftedInt<16, 16>(Value))
15526 Result = DAG.getTargetConstant(Value, dl, TCVT);
15527 break;
15528 case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
15529 if (isUInt<16>(Value))
15530 Result = DAG.getTargetConstant(Value, dl, TCVT);
15531 break;
15532 case 'M': // "M" is a constant that is greater than 31.
15533 if (Value > 31)
15534 Result = DAG.getTargetConstant(Value, dl, TCVT);
15535 break;
15536 case 'N': // "N" is a positive constant that is an exact power of two.
15537 if (Value > 0 && isPowerOf2_64(Value))
15538 Result = DAG.getTargetConstant(Value, dl, TCVT);
15539 break;
15540 case 'O': // "O" is the constant zero.
15541 if (Value == 0)
15542 Result = DAG.getTargetConstant(Value, dl, TCVT);
15543 break;
15544 case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
15545 if (isInt<16>(-Value))
15546 Result = DAG.getTargetConstant(Value, dl, TCVT);
15547 break;
15548 }
15549 break;
15550 }
15551 }
15552
15553 if (Result.getNode()) {
15554 Ops.push_back(Result);
15555 return;
15556 }
15557
15558 // Handle standard constraint letters.
15559 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15560}
15561
15562// isLegalAddressingMode - Return true if the addressing mode represented
15563// by AM is legal for this target, for a load/store of the specified type.
15564bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15565 const AddrMode &AM, Type *Ty,
15566 unsigned AS,
15567 Instruction *I) const {
15568 // Vector type r+i form is supported since power9 as DQ form. We don't check
15569 // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
15570 // imm form is preferred and the offset can be adjusted to use imm form later
15571 // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
15572 // max offset to check legal addressing mode, we should be a little aggressive
15573 // to contain other offsets for that LSRUse.
15574 if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
15575 return false;
15576
15577 // PPC allows a sign-extended 16-bit immediate field.
15578 if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15579 return false;
15580
15581 // No global is ever allowed as a base.
15582 if (AM.BaseGV)
15583 return false;
15584
15585 // PPC only support r+r,
15586 switch (AM.Scale) {
15587 case 0: // "r+i" or just "i", depending on HasBaseReg.
15588 break;
15589 case 1:
15590 if (AM.HasBaseReg && AM.BaseOffs) // "r+r+i" is not allowed.
15591 return false;
15592 // Otherwise we have r+r or r+i.
15593 break;
15594 case 2:
15595 if (AM.HasBaseReg || AM.BaseOffs) // 2*r+r or 2*r+i is not allowed.
15596 return false;
15597 // Allow 2*r as r+r.
15598 break;
15599 default:
15600 // No other scales are supported.
15601 return false;
15602 }
15603
15604 return true;
15605}
15606
15607SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15608 SelectionDAG &DAG) const {
15609 MachineFunction &MF = DAG.getMachineFunction();
15610 MachineFrameInfo &MFI = MF.getFrameInfo();
15611 MFI.setReturnAddressIsTaken(true);
15612
15613 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15614 return SDValue();
15615
15616 SDLoc dl(Op);
15617 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15618
15619 // Make sure the function does not optimize away the store of the RA to
15620 // the stack.
15621 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15622 FuncInfo->setLRStoreRequired();
15623 bool isPPC64 = Subtarget.isPPC64();
15624 auto PtrVT = getPointerTy(MF.getDataLayout());
15625
15626 if (Depth > 0) {
15627 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15628 SDValue Offset =
15629 DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15630 isPPC64 ? MVT::i64 : MVT::i32);
15631 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15632 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15633 MachinePointerInfo());
15634 }
15635
15636 // Just load the return address off the stack.
15637 SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15638 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15639 MachinePointerInfo());
15640}
15641
15642SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15643 SelectionDAG &DAG) const {
15644 SDLoc dl(Op);
15645 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15646
15647 MachineFunction &MF = DAG.getMachineFunction();
15648 MachineFrameInfo &MFI = MF.getFrameInfo();
15649 MFI.setFrameAddressIsTaken(true);
15650
15651 EVT PtrVT = getPointerTy(MF.getDataLayout());
15652 bool isPPC64 = PtrVT == MVT::i64;
15653
15654 // Naked functions never have a frame pointer, and so we use r1. For all
15655 // other functions, this decision must be delayed until during PEI.
15656 unsigned FrameReg;
15657 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15658 FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15659 else
15660 FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15661
15662 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15663 PtrVT);
15664 while (Depth--)
15665 FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15666 FrameAddr, MachinePointerInfo());
15667 return FrameAddr;
15668}
15669
15670// FIXME? Maybe this could be a TableGen attribute on some registers and
15671// this table could be generated automatically from RegInfo.
15672Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15673 const MachineFunction &MF) const {
15674 bool isPPC64 = Subtarget.isPPC64();
15675
15676 bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15677 if (!is64Bit && VT != LLT::scalar(32))
15678 report_fatal_error("Invalid register global variable type");
15679
15680 Register Reg = StringSwitch<Register>(RegName)
15681 .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15682 .Case("r2", isPPC64 ? Register() : PPC::R2)
15683 .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15684 .Default(Register());
15685
15686 if (Reg)
15687 return Reg;
15688 report_fatal_error("Invalid register name global variable");
15689}
15690
15691bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15692 // 32-bit SVR4 ABI access everything as got-indirect.
15693 if (Subtarget.is32BitELFABI())
15694 return true;
15695
15696 // AIX accesses everything indirectly through the TOC, which is similar to
15697 // the GOT.
15698 if (Subtarget.isAIXABI())
15699 return true;
15700
15701 CodeModel::Model CModel = getTargetMachine().getCodeModel();
15702 // If it is small or large code model, module locals are accessed
15703 // indirectly by loading their address from .toc/.got.
15704 if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15705 return true;
15706
15707 // JumpTable and BlockAddress are accessed as got-indirect.
15708 if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15709 return true;
15710
15711 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15712 return Subtarget.isGVIndirectSymbol(G->getGlobal());
15713
15714 return false;
15715}
15716
15717bool
15718PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15719 // The PowerPC target isn't yet aware of offsets.
15720 return false;
15721}
15722
15723bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15724 const CallInst &I,
15725 MachineFunction &MF,
15726 unsigned Intrinsic) const {
15727 switch (Intrinsic) {
15728 case Intrinsic::ppc_altivec_lvx:
15729 case Intrinsic::ppc_altivec_lvxl:
15730 case Intrinsic::ppc_altivec_lvebx:
15731 case Intrinsic::ppc_altivec_lvehx:
15732 case Intrinsic::ppc_altivec_lvewx:
15733 case Intrinsic::ppc_vsx_lxvd2x:
15734 case Intrinsic::ppc_vsx_lxvw4x:
15735 case Intrinsic::ppc_vsx_lxvd2x_be:
15736 case Intrinsic::ppc_vsx_lxvw4x_be:
15737 case Intrinsic::ppc_vsx_lxvl:
15738 case Intrinsic::ppc_vsx_lxvll: {
15739 EVT VT;
15740 switch (Intrinsic) {
15741 case Intrinsic::ppc_altivec_lvebx:
15742 VT = MVT::i8;
15743 break;
15744 case Intrinsic::ppc_altivec_lvehx:
15745 VT = MVT::i16;
15746 break;
15747 case Intrinsic::ppc_altivec_lvewx:
15748 VT = MVT::i32;
15749 break;
15750 case Intrinsic::ppc_vsx_lxvd2x:
15751 case Intrinsic::ppc_vsx_lxvd2x_be:
15752 VT = MVT::v2f64;
15753 break;
15754 default:
15755 VT = MVT::v4i32;
15756 break;
15757 }
15758
15759 Info.opc = ISD::INTRINSIC_W_CHAIN;
15760 Info.memVT = VT;
15761 Info.ptrVal = I.getArgOperand(0);
15762 Info.offset = -VT.getStoreSize()+1;
15763 Info.size = 2*VT.getStoreSize()-1;
15764 Info.align = Align(1);
15765 Info.flags = MachineMemOperand::MOLoad;
15766 return true;
15767 }
15768 case Intrinsic::ppc_altivec_stvx:
15769 case Intrinsic::ppc_altivec_stvxl:
15770 case Intrinsic::ppc_altivec_stvebx:
15771 case Intrinsic::ppc_altivec_stvehx:
15772 case Intrinsic::ppc_altivec_stvewx:
15773 case Intrinsic::ppc_vsx_stxvd2x:
15774 case Intrinsic::ppc_vsx_stxvw4x:
15775 case Intrinsic::ppc_vsx_stxvd2x_be:
15776 case Intrinsic::ppc_vsx_stxvw4x_be:
15777 case Intrinsic::ppc_vsx_stxvl:
15778 case Intrinsic::ppc_vsx_stxvll: {
15779 EVT VT;
15780 switch (Intrinsic) {
15781 case Intrinsic::ppc_altivec_stvebx:
15782 VT = MVT::i8;
15783 break;
15784 case Intrinsic::ppc_altivec_stvehx:
15785 VT = MVT::i16;
15786 break;
15787 case Intrinsic::ppc_altivec_stvewx:
15788 VT = MVT::i32;
15789 break;
15790 case Intrinsic::ppc_vsx_stxvd2x:
15791 case Intrinsic::ppc_vsx_stxvd2x_be:
15792 VT = MVT::v2f64;
15793 break;
15794 default:
15795 VT = MVT::v4i32;
15796 break;
15797 }
15798
15799 Info.opc = ISD::INTRINSIC_VOID;
15800 Info.memVT = VT;
15801 Info.ptrVal = I.getArgOperand(1);
15802 Info.offset = -VT.getStoreSize()+1;
15803 Info.size = 2*VT.getStoreSize()-1;
15804 Info.align = Align(1);
15805 Info.flags = MachineMemOperand::MOStore;
15806 return true;
15807 }
15808 default:
15809 break;
15810 }
15811
15812 return false;
15813}
15814
15815/// It returns EVT::Other if the type should be determined using generic
15816/// target-independent logic.
15817EVT PPCTargetLowering::getOptimalMemOpType(
15818 const MemOp &Op, const AttributeList &FuncAttributes) const {
15819 if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
15820 // We should use Altivec/VSX loads and stores when available. For unaligned
15821 // addresses, unaligned VSX loads are only fast starting with the P8.
15822 if (Subtarget.hasAltivec() && Op.size() >= 16 &&
15823 (Op.isAligned(Align(16)) ||
15824 ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
15825 return MVT::v4i32;
15826 }
15827
15828 if (Subtarget.isPPC64()) {
15829 return MVT::i64;
15830 }
15831
15832 return MVT::i32;
15833}
15834
15835/// Returns true if it is beneficial to convert a load of a constant
15836/// to just the constant itself.
15837bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
15838 Type *Ty) const {
15839 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15839, __PRETTY_FUNCTION__))
;
15840
15841 unsigned BitSize = Ty->getPrimitiveSizeInBits();
15842 return !(BitSize == 0 || BitSize > 64);
15843}
15844
15845bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
15846 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
15847 return false;
15848 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
15849 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
15850 return NumBits1 == 64 && NumBits2 == 32;
15851}
15852
15853bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
15854 if (!VT1.isInteger() || !VT2.isInteger())
15855 return false;
15856 unsigned NumBits1 = VT1.getSizeInBits();
15857 unsigned NumBits2 = VT2.getSizeInBits();
15858 return NumBits1 == 64 && NumBits2 == 32;
15859}
15860
15861bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
15862 // Generally speaking, zexts are not free, but they are free when they can be
15863 // folded with other operations.
15864 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
15865 EVT MemVT = LD->getMemoryVT();
15866 if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
15867 (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
15868 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
15869 LD->getExtensionType() == ISD::ZEXTLOAD))
15870 return true;
15871 }
15872
15873 // FIXME: Add other cases...
15874 // - 32-bit shifts with a zext to i64
15875 // - zext after ctlz, bswap, etc.
15876 // - zext after and by a constant mask
15877
15878 return TargetLowering::isZExtFree(Val, VT2);
15879}
15880
15881bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
15882 assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&((DestVT.isFloatingPoint() && SrcVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15883, __PRETTY_FUNCTION__))
15883 "invalid fpext types")((DestVT.isFloatingPoint() && SrcVT.isFloatingPoint()
&& "invalid fpext types") ? static_cast<void> (
0) : __assert_fail ("DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() && \"invalid fpext types\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15883, __PRETTY_FUNCTION__))
;
15884 // Extending to float128 is not free.
15885 if (DestVT == MVT::f128)
15886 return false;
15887 return true;
15888}
15889
15890bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
15891 return isInt<16>(Imm) || isUInt<16>(Imm);
15892}
15893
15894bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
15895 return isInt<16>(Imm) || isUInt<16>(Imm);
15896}
15897
15898bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
15899 MachineMemOperand::Flags,
15900 bool *Fast) const {
15901 if (DisablePPCUnaligned)
15902 return false;
15903
15904 // PowerPC supports unaligned memory access for simple non-vector types.
15905 // Although accessing unaligned addresses is not as efficient as accessing
15906 // aligned addresses, it is generally more efficient than manual expansion,
15907 // and generally only traps for software emulation when crossing page
15908 // boundaries.
15909
15910 if (!VT.isSimple())
15911 return false;
15912
15913 if (VT.isFloatingPoint() && !VT.isVector() &&
15914 !Subtarget.allowsUnalignedFPAccess())
15915 return false;
15916
15917 if (VT.getSimpleVT().isVector()) {
15918 if (Subtarget.hasVSX()) {
15919 if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
15920 VT != MVT::v4f32 && VT != MVT::v4i32)
15921 return false;
15922 } else {
15923 return false;
15924 }
15925 }
15926
15927 if (VT == MVT::ppcf128)
15928 return false;
15929
15930 if (Fast)
15931 *Fast = true;
15932
15933 return true;
15934}
15935
15936bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
15937 SDValue C) const {
15938 // Check integral scalar types.
15939 if (!VT.isScalarInteger())
15940 return false;
15941 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
15942 if (!ConstNode->getAPIntValue().isSignedIntN(64))
15943 return false;
15944 // This transformation will generate >= 2 operations. But the following
15945 // cases will generate <= 2 instructions during ISEL. So exclude them.
15946 // 1. If the constant multiplier fits 16 bits, it can be handled by one
15947 // HW instruction, ie. MULLI
15948 // 2. If the multiplier after shifted fits 16 bits, an extra shift
15949 // instruction is needed than case 1, ie. MULLI and RLDICR
15950 int64_t Imm = ConstNode->getSExtValue();
15951 unsigned Shift = countTrailingZeros<uint64_t>(Imm);
15952 Imm >>= Shift;
15953 if (isInt<16>(Imm))
15954 return false;
15955 uint64_t UImm = static_cast<uint64_t>(Imm);
15956 if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) ||
15957 isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm))
15958 return true;
15959 }
15960 return false;
15961}
15962
15963bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
15964 EVT VT) const {
15965 return isFMAFasterThanFMulAndFAdd(
15966 MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
15967}
15968
15969bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
15970 Type *Ty) const {
15971 switch (Ty->getScalarType()->getTypeID()) {
15972 case Type::FloatTyID:
15973 case Type::DoubleTyID:
15974 return true;
15975 case Type::FP128TyID:
15976 return Subtarget.hasP9Vector();
15977 default:
15978 return false;
15979 }
15980}
15981
15982// FIXME: add more patterns which are not profitable to hoist.
15983bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
15984 if (!I->hasOneUse())
15985 return true;
15986
15987 Instruction *User = I->user_back();
15988 assert(User && "A single use instruction with no uses.")((User && "A single use instruction with no uses.") ?
static_cast<void> (0) : __assert_fail ("User && \"A single use instruction with no uses.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 15988, __PRETTY_FUNCTION__))
;
15989
15990 switch (I->getOpcode()) {
15991 case Instruction::FMul: {
15992 // Don't break FMA, PowerPC prefers FMA.
15993 if (User->getOpcode() != Instruction::FSub &&
15994 User->getOpcode() != Instruction::FAdd)
15995 return true;
15996
15997 const TargetOptions &Options = getTargetMachine().Options;
15998 const Function *F = I->getFunction();
15999 const DataLayout &DL = F->getParent()->getDataLayout();
16000 Type *Ty = User->getOperand(0)->getType();
16001
16002 return !(
16003 isFMAFasterThanFMulAndFAdd(*F, Ty) &&
16004 isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
16005 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
16006 }
16007 case Instruction::Load: {
16008 // Don't break "store (load float*)" pattern, this pattern will be combined
16009 // to "store (load int32)" in later InstCombine pass. See function
16010 // combineLoadToOperationType. On PowerPC, loading a float point takes more
16011 // cycles than loading a 32 bit integer.
16012 LoadInst *LI = cast<LoadInst>(I);
16013 // For the loads that combineLoadToOperationType does nothing, like
16014 // ordered load, it should be profitable to hoist them.
16015 // For swifterror load, it can only be used for pointer to pointer type, so
16016 // later type check should get rid of this case.
16017 if (!LI->isUnordered())
16018 return true;
16019
16020 if (User->getOpcode() != Instruction::Store)
16021 return true;
16022
16023 if (I->getType()->getTypeID() != Type::FloatTyID)
16024 return true;
16025
16026 return false;
16027 }
16028 default:
16029 return true;
16030 }
16031 return true;
16032}
16033
16034const MCPhysReg *
16035PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
16036 // LR is a callee-save register, but we must treat it as clobbered by any call
16037 // site. Hence we include LR in the scratch registers, which are in turn added
16038 // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
16039 // to CTR, which is used by any indirect call.
16040 static const MCPhysReg ScratchRegs[] = {
16041 PPC::X12, PPC::LR8, PPC::CTR8, 0
16042 };
16043
16044 return ScratchRegs;
16045}
16046
16047Register PPCTargetLowering::getExceptionPointerRegister(
16048 const Constant *PersonalityFn) const {
16049 return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
16050}
16051
16052Register PPCTargetLowering::getExceptionSelectorRegister(
16053 const Constant *PersonalityFn) const {
16054 return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
16055}
16056
16057bool
16058PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
16059 EVT VT , unsigned DefinedValues) const {
16060 if (VT == MVT::v2i64)
16061 return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
16062
16063 if (Subtarget.hasVSX())
16064 return true;
16065
16066 return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
16067}
16068
16069Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
16070 if (DisableILPPref || Subtarget.enableMachineScheduler())
16071 return TargetLowering::getSchedulingPreference(N);
16072
16073 return Sched::ILP;
16074}
16075
16076// Create a fast isel object.
16077FastISel *
16078PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
16079 const TargetLibraryInfo *LibInfo) const {
16080 return PPC::createFastISel(FuncInfo, LibInfo);
16081}
16082
16083// 'Inverted' means the FMA opcode after negating one multiplicand.
16084// For example, (fma -a b c) = (fnmsub a b c)
16085static unsigned invertFMAOpcode(unsigned Opc) {
16086 switch (Opc) {
16087 default:
16088 llvm_unreachable("Invalid FMA opcode for PowerPC!")::llvm::llvm_unreachable_internal("Invalid FMA opcode for PowerPC!"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 16088)
;
16089 case ISD::FMA:
16090 return PPCISD::FNMSUB;
16091 case PPCISD::FNMSUB:
16092 return ISD::FMA;
16093 }
16094}
16095
16096SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
16097 bool LegalOps, bool OptForSize,
16098 NegatibleCost &Cost,
16099 unsigned Depth) const {
16100 if (Depth > SelectionDAG::MaxRecursionDepth)
16101 return SDValue();
16102
16103 unsigned Opc = Op.getOpcode();
16104 EVT VT = Op.getValueType();
16105 SDNodeFlags Flags = Op.getNode()->getFlags();
16106
16107 switch (Opc) {
16108 case PPCISD::FNMSUB:
16109 if (!Op.hasOneUse() || !isTypeLegal(VT))
16110 break;
16111
16112 const TargetOptions &Options = getTargetMachine().Options;
16113 SDValue N0 = Op.getOperand(0);
16114 SDValue N1 = Op.getOperand(1);
16115 SDValue N2 = Op.getOperand(2);
16116 SDLoc Loc(Op);
16117
16118 NegatibleCost N2Cost = NegatibleCost::Expensive;
16119 SDValue NegN2 =
16120 getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16121
16122 if (!NegN2)
16123 return SDValue();
16124
16125 // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16126 // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16127 // These transformations may change sign of zeroes. For example,
16128 // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16129 if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16130 // Try and choose the cheaper one to negate.
16131 NegatibleCost N0Cost = NegatibleCost::Expensive;
16132 SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16133 N0Cost, Depth + 1);
16134
16135 NegatibleCost N1Cost = NegatibleCost::Expensive;
16136 SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16137 N1Cost, Depth + 1);
16138
16139 if (NegN0 && N0Cost <= N1Cost) {
16140 Cost = std::min(N0Cost, N2Cost);
16141 return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16142 } else if (NegN1) {
16143 Cost = std::min(N1Cost, N2Cost);
16144 return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16145 }
16146 }
16147
16148 // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16149 if (isOperationLegal(ISD::FMA, VT)) {
16150 Cost = N2Cost;
16151 return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16152 }
16153
16154 break;
16155 }
16156
16157 return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16158 Cost, Depth);
16159}
16160
16161// Override to enable LOAD_STACK_GUARD lowering on Linux.
16162bool PPCTargetLowering::useLoadStackGuardNode() const {
16163 if (!Subtarget.isTargetLinux())
16164 return TargetLowering::useLoadStackGuardNode();
16165 return true;
16166}
16167
16168// Override to disable global variable loading on Linux.
16169void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16170 if (!Subtarget.isTargetLinux())
16171 return TargetLowering::insertSSPDeclarations(M);
16172}
16173
16174bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16175 bool ForCodeSize) const {
16176 if (!VT.isSimple() || !Subtarget.hasVSX())
16177 return false;
16178
16179 switch(VT.getSimpleVT().SimpleTy) {
16180 default:
16181 // For FP types that are currently not supported by PPC backend, return
16182 // false. Examples: f16, f80.
16183 return false;
16184 case MVT::f32:
16185 case MVT::f64:
16186 if (Subtarget.hasPrefixInstrs()) {
16187 // we can materialize all immediatess via XXSPLTI32DX and XXSPLTIDP.
16188 return true;
16189 }
16190 LLVM_FALLTHROUGH[[gnu::fallthrough]];
16191 case MVT::ppcf128:
16192 return Imm.isPosZero();
16193 }
16194}
16195
16196// For vector shift operation op, fold
16197// (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
16198static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16199 SelectionDAG &DAG) {
16200 SDValue N0 = N->getOperand(0);
16201 SDValue N1 = N->getOperand(1);
16202 EVT VT = N0.getValueType();
16203 unsigned OpSizeInBits = VT.getScalarSizeInBits();
16204 unsigned Opcode = N->getOpcode();
16205 unsigned TargetOpcode;
16206
16207 switch (Opcode) {
16208 default:
16209 llvm_unreachable("Unexpected shift operation")::llvm::llvm_unreachable_internal("Unexpected shift operation"
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 16209)
;
16210 case ISD::SHL:
16211 TargetOpcode = PPCISD::SHL;
16212 break;
16213 case ISD::SRL:
16214 TargetOpcode = PPCISD::SRL;
16215 break;
16216 case ISD::SRA:
16217 TargetOpcode = PPCISD::SRA;
16218 break;
16219 }
16220
16221 if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16222 N1->getOpcode() == ISD::AND)
16223 if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16224 if (Mask->getZExtValue() == OpSizeInBits - 1)
16225 return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16226
16227 return SDValue();
16228}
16229
16230SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16231 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16232 return Value;
16233
16234 SDValue N0 = N->getOperand(0);
16235 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16236 if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
16237 N0.getOpcode() != ISD::SIGN_EXTEND ||
16238 N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr ||
16239 N->getValueType(0) != MVT::i64)
16240 return SDValue();
16241
16242 // We can't save an operation here if the value is already extended, and
16243 // the existing shift is easier to combine.
16244 SDValue ExtsSrc = N0.getOperand(0);
16245 if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16246 ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16247 return SDValue();
16248
16249 SDLoc DL(N0);
16250 SDValue ShiftBy = SDValue(CN1, 0);
16251 // We want the shift amount to be i32 on the extswli, but the shift could
16252 // have an i64.
16253 if (ShiftBy.getValueType() == MVT::i64)
16254 ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16255
16256 return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16257 ShiftBy);
16258}
16259
16260SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16261 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16262 return Value;
16263
16264 return SDValue();
16265}
16266
16267SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16268 if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16269 return Value;
16270
16271 return SDValue();
16272}
16273
16274// Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16275// Transform (add X, (zext(sete Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16276// When C is zero, the equation (addi Z, -C) can be simplified to Z
16277// Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
16278static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16279 const PPCSubtarget &Subtarget) {
16280 if (!Subtarget.isPPC64())
16281 return SDValue();
16282
16283 SDValue LHS = N->getOperand(0);
16284 SDValue RHS = N->getOperand(1);
16285
16286 auto isZextOfCompareWithConstant = [](SDValue Op) {
16287 if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16288 Op.getValueType() != MVT::i64)
16289 return false;
16290
16291 SDValue Cmp = Op.getOperand(0);
16292 if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16293 Cmp.getOperand(0).getValueType() != MVT::i64)
16294 return false;
16295
16296 if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16297 int64_t NegConstant = 0 - Constant->getSExtValue();
16298 // Due to the limitations of the addi instruction,
16299 // -C is required to be [-32768, 32767].
16300 return isInt<16>(NegConstant);
16301 }
16302
16303 return false;
16304 };
16305
16306 bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16307 bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16308
16309 // If there is a pattern, canonicalize a zext operand to the RHS.
16310 if (LHSHasPattern && !RHSHasPattern)
16311 std::swap(LHS, RHS);
16312 else if (!LHSHasPattern && !RHSHasPattern)
16313 return SDValue();
16314
16315 SDLoc DL(N);
16316 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16317 SDValue Cmp = RHS.getOperand(0);
16318 SDValue Z = Cmp.getOperand(0);
16319 auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16320
16321 assert(Constant && "Constant Should not be a null pointer.")((Constant && "Constant Should not be a null pointer."
) ? static_cast<void> (0) : __assert_fail ("Constant && \"Constant Should not be a null pointer.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 16321, __PRETTY_FUNCTION__))
;
16322 int64_t NegConstant = 0 - Constant->getSExtValue();
16323
16324 switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16325 default: break;
16326 case ISD::SETNE: {
16327 // when C == 0
16328 // --> addze X, (addic Z, -1).carry
16329 // /
16330 // add X, (zext(setne Z, C))--
16331 // \ when -32768 <= -C <= 32767 && C != 0
16332 // --> addze X, (addic (addi Z, -C), -1).carry
16333 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16334 DAG.getConstant(NegConstant, DL, MVT::i64));
16335 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16336 SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16337 AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16338 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16339 SDValue(Addc.getNode(), 1));
16340 }
16341 case ISD::SETEQ: {
16342 // when C == 0
16343 // --> addze X, (subfic Z, 0).carry
16344 // /
16345 // add X, (zext(sete Z, C))--
16346 // \ when -32768 <= -C <= 32767 && C != 0
16347 // --> addze X, (subfic (addi Z, -C), 0).carry
16348 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16349 DAG.getConstant(NegConstant, DL, MVT::i64));
16350 SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16351 SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16352 DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16353 return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16354 SDValue(Subc.getNode(), 1));
16355 }
16356 }
16357
16358 return SDValue();
16359}
16360
16361// Transform
16362// (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16363// (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16364// In this case both C1 and C2 must be known constants.
16365// C1+C2 must fit into a 34 bit signed integer.
16366static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16367 const PPCSubtarget &Subtarget) {
16368 if (!Subtarget.isUsingPCRelativeCalls())
16369 return SDValue();
16370
16371 // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16372 // If we find that node try to cast the Global Address and the Constant.
16373 SDValue LHS = N->getOperand(0);
16374 SDValue RHS = N->getOperand(1);
16375
16376 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16377 std::swap(LHS, RHS);
16378
16379 if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16380 return SDValue();
16381
16382 // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16383 GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16384 ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16385
16386 // Check that both casts succeeded.
16387 if (!GSDN || !ConstNode)
16388 return SDValue();
16389
16390 int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16391 SDLoc DL(GSDN);
16392
16393 // The signed int offset needs to fit in 34 bits.
16394 if (!isInt<34>(NewOffset))
16395 return SDValue();
16396
16397 // The new global address is a copy of the old global address except
16398 // that it has the updated Offset.
16399 SDValue GA =
16400 DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16401 NewOffset, GSDN->getTargetFlags());
16402 SDValue MatPCRel =
16403 DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16404 return MatPCRel;
16405}
16406
16407SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16408 if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16409 return Value;
16410
16411 if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16412 return Value;
16413
16414 return SDValue();
16415}
16416
16417// Detect TRUNCATE operations on bitcasts of float128 values.
16418// What we are looking for here is the situtation where we extract a subset
16419// of bits from a 128 bit float.
16420// This can be of two forms:
16421// 1) BITCAST of f128 feeding TRUNCATE
16422// 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16423// The reason this is required is because we do not have a legal i128 type
16424// and so we want to prevent having to store the f128 and then reload part
16425// of it.
16426SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16427 DAGCombinerInfo &DCI) const {
16428 // If we are using CRBits then try that first.
16429 if (Subtarget.useCRBits()) {
16430 // Check if CRBits did anything and return that if it did.
16431 if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16432 return CRTruncValue;
16433 }
16434
16435 SDLoc dl(N);
16436 SDValue Op0 = N->getOperand(0);
16437
16438 // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16439 if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16440 EVT VT = N->getValueType(0);
16441 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16442 return SDValue();
16443 SDValue Sub = Op0.getOperand(0);
16444 if (Sub.getOpcode() == ISD::SUB) {
16445 SDValue SubOp0 = Sub.getOperand(0);
16446 SDValue SubOp1 = Sub.getOperand(1);
16447 if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16448 (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16449 return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16450 SubOp1.getOperand(0),
16451 DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16452 }
16453 }
16454 }
16455
16456 // Looking for a truncate of i128 to i64.
16457 if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16458 return SDValue();
16459
16460 int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16461
16462 // SRL feeding TRUNCATE.
16463 if (Op0.getOpcode() == ISD::SRL) {
16464 ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16465 // The right shift has to be by 64 bits.
16466 if (!ConstNode || ConstNode->getZExtValue() != 64)
16467 return SDValue();
16468
16469 // Switch the element number to extract.
16470 EltToExtract = EltToExtract ? 0 : 1;
16471 // Update Op0 past the SRL.
16472 Op0 = Op0.getOperand(0);
16473 }
16474
16475 // BITCAST feeding a TRUNCATE possibly via SRL.
16476 if (Op0.getOpcode() == ISD::BITCAST &&
16477 Op0.getValueType() == MVT::i128 &&
16478 Op0.getOperand(0).getValueType() == MVT::f128) {
16479 SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16480 return DCI.DAG.getNode(
16481 ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16482 DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16483 }
16484 return SDValue();
16485}
16486
16487SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16488 SelectionDAG &DAG = DCI.DAG;
16489
16490 ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16491 if (!ConstOpOrElement)
16492 return SDValue();
16493
16494 // An imul is usually smaller than the alternative sequence for legal type.
16495 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16496 isOperationLegal(ISD::MUL, N->getValueType(0)))
16497 return SDValue();
16498
16499 auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16500 switch (this->Subtarget.getCPUDirective()) {
16501 default:
16502 // TODO: enhance the condition for subtarget before pwr8
16503 return false;
16504 case PPC::DIR_PWR8:
16505 // type mul add shl
16506 // scalar 4 1 1
16507 // vector 7 2 2
16508 return true;
16509 case PPC::DIR_PWR9:
16510 case PPC::DIR_PWR10:
16511 case PPC::DIR_PWR_FUTURE:
16512 // type mul add shl
16513 // scalar 5 2 2
16514 // vector 7 2 2
16515
16516 // The cycle RATIO of related operations are showed as a table above.
16517 // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16518 // scalar and vector type. For 2 instrs patterns, add/sub + shl
16519 // are 4, it is always profitable; but for 3 instrs patterns
16520 // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16521 // So we should only do it for vector type.
16522 return IsAddOne && IsNeg ? VT.isVector() : true;
16523 }
16524 };
16525
16526 EVT VT = N->getValueType(0);
16527 SDLoc DL(N);
16528
16529 const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16530 bool IsNeg = MulAmt.isNegative();
16531 APInt MulAmtAbs = MulAmt.abs();
16532
16533 if ((MulAmtAbs - 1).isPowerOf2()) {
16534 // (mul x, 2^N + 1) => (add (shl x, N), x)
16535 // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16536
16537 if (!IsProfitable(IsNeg, true, VT))
16538 return SDValue();
16539
16540 SDValue Op0 = N->getOperand(0);
16541 SDValue Op1 =
16542 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16543 DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16544 SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16545
16546 if (!IsNeg)
16547 return Res;
16548
16549 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16550 } else if ((MulAmtAbs + 1).isPowerOf2()) {
16551 // (mul x, 2^N - 1) => (sub (shl x, N), x)
16552 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16553
16554 if (!IsProfitable(IsNeg, false, VT))
16555 return SDValue();
16556
16557 SDValue Op0 = N->getOperand(0);
16558 SDValue Op1 =
16559 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16560 DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16561
16562 if (!IsNeg)
16563 return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16564 else
16565 return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16566
16567 } else {
16568 return SDValue();
16569 }
16570}
16571
16572// Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16573// in combiner since we need to check SD flags and other subtarget features.
16574SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16575 DAGCombinerInfo &DCI) const {
16576 SDValue N0 = N->getOperand(0);
16577 SDValue N1 = N->getOperand(1);
16578 SDValue N2 = N->getOperand(2);
16579 SDNodeFlags Flags = N->getFlags();
16580 EVT VT = N->getValueType(0);
16581 SelectionDAG &DAG = DCI.DAG;
16582 const TargetOptions &Options = getTargetMachine().Options;
16583 unsigned Opc = N->getOpcode();
16584 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16585 bool LegalOps = !DCI.isBeforeLegalizeOps();
16586 SDLoc Loc(N);
16587
16588 if (!isOperationLegal(ISD::FMA, VT))
16589 return SDValue();
16590
16591 // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16592 // since (fnmsub a b c)=-0 while c-ab=+0.
16593 if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16594 return SDValue();
16595
16596 // (fma (fneg a) b c) => (fnmsub a b c)
16597 // (fnmsub (fneg a) b c) => (fma a b c)
16598 if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16599 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16600
16601 // (fma a (fneg b) c) => (fnmsub a b c)
16602 // (fnmsub a (fneg b) c) => (fma a b c)
16603 if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16604 return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16605
16606 return SDValue();
16607}
16608
16609bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16610 // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16611 if (!Subtarget.is64BitELFABI())
16612 return false;
16613
16614 // If not a tail call then no need to proceed.
16615 if (!CI->isTailCall())
16616 return false;
16617
16618 // If sibling calls have been disabled and tail-calls aren't guaranteed
16619 // there is no reason to duplicate.
16620 auto &TM = getTargetMachine();
16621 if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16622 return false;
16623
16624 // Can't tail call a function called indirectly, or if it has variadic args.
16625 const Function *Callee = CI->getCalledFunction();
16626 if (!Callee || Callee->isVarArg())
16627 return false;
16628
16629 // Make sure the callee and caller calling conventions are eligible for tco.
16630 const Function *Caller = CI->getParent()->getParent();
16631 if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
16632 CI->getCallingConv()))
16633 return false;
16634
16635 // If the function is local then we have a good chance at tail-calling it
16636 return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
16637}
16638
16639bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
16640 if (!Subtarget.hasVSX())
16641 return false;
16642 if (Subtarget.hasP9Vector() && VT == MVT::f128)
16643 return true;
16644 return VT == MVT::f32 || VT == MVT::f64 ||
16645 VT == MVT::v4f32 || VT == MVT::v2f64;
16646}
16647
16648bool PPCTargetLowering::
16649isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
16650 const Value *Mask = AndI.getOperand(1);
16651 // If the mask is suitable for andi. or andis. we should sink the and.
16652 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
16653 // Can't handle constants wider than 64-bits.
16654 if (CI->getBitWidth() > 64)
16655 return false;
16656 int64_t ConstVal = CI->getZExtValue();
16657 return isUInt<16>(ConstVal) ||
16658 (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16659 }
16660
16661 // For non-constant masks, we can always use the record-form and.
16662 return true;
16663}
16664
16665// Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16666// Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16667// Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16668// Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16669// Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
16670SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16671 assert((N->getOpcode() == ISD::ABS) && "Need ABS node here")(((N->getOpcode() == ISD::ABS) && "Need ABS node here"
) ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::ABS) && \"Need ABS node here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 16671, __PRETTY_FUNCTION__))
;
16672 assert(Subtarget.hasP9Altivec() &&((Subtarget.hasP9Altivec() && "Only combine this when P9 altivec supported!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasP9Altivec() && \"Only combine this when P9 altivec supported!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 16673, __PRETTY_FUNCTION__))
16673 "Only combine this when P9 altivec supported!")((Subtarget.hasP9Altivec() && "Only combine this when P9 altivec supported!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasP9Altivec() && \"Only combine this when P9 altivec supported!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 16673, __PRETTY_FUNCTION__))
;
16674 EVT VT = N->getValueType(0);
16675 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16676 return SDValue();
16677
16678 SelectionDAG &DAG = DCI.DAG;
16679 SDLoc dl(N);
16680 if (N->getOperand(0).getOpcode() == ISD::SUB) {
16681 // Even for signed integers, if it's known to be positive (as signed
16682 // integer) due to zero-extended inputs.
16683 unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16684 unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16685 if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16686 SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16687 (SubOpcd1 == ISD::ZERO_EXTEND ||
16688 SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16689 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16690 N->getOperand(0)->getOperand(0),
16691 N->getOperand(0)->getOperand(1),
16692 DAG.getTargetConstant(0, dl, MVT::i32));
16693 }
16694
16695 // For type v4i32, it can be optimized with xvnegsp + vabsduw
16696 if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16697 N->getOperand(0).hasOneUse()) {
16698 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16699 N->getOperand(0)->getOperand(0),
16700 N->getOperand(0)->getOperand(1),
16701 DAG.getTargetConstant(1, dl, MVT::i32));
16702 }
16703 }
16704
16705 return SDValue();
16706}
16707
16708// For type v4i32/v8ii16/v16i8, transform
16709// from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16710// from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16711// from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16712// from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
16713SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16714 DAGCombinerInfo &DCI) const {
16715 assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here")(((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here"
) ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::VSELECT) && \"Need VSELECT node here\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 16715, __PRETTY_FUNCTION__))
;
16716 assert(Subtarget.hasP9Altivec() &&((Subtarget.hasP9Altivec() && "Only combine this when P9 altivec supported!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasP9Altivec() && \"Only combine this when P9 altivec supported!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 16717, __PRETTY_FUNCTION__))
16717 "Only combine this when P9 altivec supported!")((Subtarget.hasP9Altivec() && "Only combine this when P9 altivec supported!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasP9Altivec() && \"Only combine this when P9 altivec supported!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/PowerPC/PPCISelLowering.cpp"
, 16717, __PRETTY_FUNCTION__))
;
16718
16719 SelectionDAG &DAG = DCI.DAG;
16720 SDLoc dl(N);
16721 SDValue Cond = N->getOperand(0);
16722 SDValue TrueOpnd = N->getOperand(1);
16723 SDValue FalseOpnd = N->getOperand(2);
16724 EVT VT = N->getOperand(1).getValueType();
16725
16726 if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
16727 FalseOpnd.getOpcode() != ISD::SUB)
16728 return SDValue();
16729
16730 // ABSD only available for type v4i32/v8i16/v16i8
16731 if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16732 return SDValue();
16733
16734 // At least to save one more dependent computation
16735 if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
16736 return SDValue();
16737
16738 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
16739
16740 // Can only handle unsigned comparison here
16741 switch (CC) {
16742 default:
16743 return SDValue();
16744 case ISD::SETUGT:
16745 case ISD::SETUGE:
16746 break;
16747 case ISD::SETULT:
16748 case ISD::SETULE:
16749 std::swap(TrueOpnd, FalseOpnd);
16750 break;
16751 }
16752
16753 SDValue CmpOpnd1 = Cond.getOperand(0);
16754 SDValue CmpOpnd2 = Cond.getOperand(1);
16755
16756 // SETCC CmpOpnd1 CmpOpnd2 cond
16757 // TrueOpnd = CmpOpnd1 - CmpOpnd2
16758 // FalseOpnd = CmpOpnd2 - CmpOpnd1
16759 if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
16760 TrueOpnd.getOperand(1) == CmpOpnd2 &&
16761 FalseOpnd.getOperand(0) == CmpOpnd2 &&
16762 FalseOpnd.getOperand(1) == CmpOpnd1) {
16763 return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
16764 CmpOpnd1, CmpOpnd2,
16765 DAG.getTargetConstant(0, dl, MVT::i32));
16766 }
16767
16768 return SDValue();
16769}

/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h

1//===- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ----*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file declares the SDNode class and derived classes, which are used to
10// represent the nodes and operations present in a SelectionDAG. These nodes
11// and operations are machine code level operations, with some similarities to
12// the GCC RTL representation.
13//
14// Clients should include the SelectionDAG.h file instead of this file directly.
15//
16//===----------------------------------------------------------------------===//
17
18#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
19#define LLVM_CODEGEN_SELECTIONDAGNODES_H
20
21#include "llvm/ADT/APFloat.h"
22#include "llvm/ADT/ArrayRef.h"
23#include "llvm/ADT/BitVector.h"
24#include "llvm/ADT/FoldingSet.h"
25#include "llvm/ADT/GraphTraits.h"
26#include "llvm/ADT/SmallPtrSet.h"
27#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/ilist_node.h"
29#include "llvm/ADT/iterator.h"
30#include "llvm/ADT/iterator_range.h"
31#include "llvm/CodeGen/ISDOpcodes.h"
32#include "llvm/CodeGen/MachineMemOperand.h"
33#include "llvm/CodeGen/Register.h"
34#include "llvm/CodeGen/ValueTypes.h"
35#include "llvm/IR/Constants.h"
36#include "llvm/IR/DebugLoc.h"
37#include "llvm/IR/Instruction.h"
38#include "llvm/IR/Instructions.h"
39#include "llvm/IR/Metadata.h"
40#include "llvm/IR/Operator.h"
41#include "llvm/Support/AlignOf.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/MachineValueType.h"
46#include "llvm/Support/TypeSize.h"
47#include <algorithm>
48#include <cassert>
49#include <climits>
50#include <cstddef>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <string>
55#include <tuple>
56
57namespace llvm {
58
59class APInt;
60class Constant;
61template <typename T> struct DenseMapInfo;
62class GlobalValue;
63class MachineBasicBlock;
64class MachineConstantPoolValue;
65class MCSymbol;
66class raw_ostream;
67class SDNode;
68class SelectionDAG;
69class Type;
70class Value;
71
72void checkForCycles(const SDNode *N, const SelectionDAG *DAG = nullptr,
73 bool force = false);
74
75/// This represents a list of ValueType's that has been intern'd by
76/// a SelectionDAG. Instances of this simple value class are returned by
77/// SelectionDAG::getVTList(...).
78///
79struct SDVTList {
80 const EVT *VTs;
81 unsigned int NumVTs;
82};
83
84namespace ISD {
85
86 /// Node predicates
87
88/// If N is a BUILD_VECTOR or SPLAT_VECTOR node whose elements are all the
89/// same constant or undefined, return true and return the constant value in
90/// \p SplatValue.
91bool isConstantSplatVector(const SDNode *N, APInt &SplatValue);
92
93/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
94/// all of the elements are ~0 or undef. If \p BuildVectorOnly is set to
95/// true, it only checks BUILD_VECTOR.
96bool isConstantSplatVectorAllOnes(const SDNode *N,
97 bool BuildVectorOnly = false);
98
99/// Return true if the specified node is a BUILD_VECTOR or SPLAT_VECTOR where
100/// all of the elements are 0 or undef. If \p BuildVectorOnly is set to true, it
101/// only checks BUILD_VECTOR.
102bool isConstantSplatVectorAllZeros(const SDNode *N,
103 bool BuildVectorOnly = false);
104
105/// Return true if the specified node is a BUILD_VECTOR where all of the
106/// elements are ~0 or undef.
107bool isBuildVectorAllOnes(const SDNode *N);
108
109/// Return true if the specified node is a BUILD_VECTOR where all of the
110/// elements are 0 or undef.
111bool isBuildVectorAllZeros(const SDNode *N);
112
113/// Return true if the specified node is a BUILD_VECTOR node of all
114/// ConstantSDNode or undef.
115bool isBuildVectorOfConstantSDNodes(const SDNode *N);
116
117/// Return true if the specified node is a BUILD_VECTOR node of all
118/// ConstantFPSDNode or undef.
119bool isBuildVectorOfConstantFPSDNodes(const SDNode *N);
120
121/// Return true if the node has at least one operand and all operands of the
122/// specified node are ISD::UNDEF.
123bool allOperandsUndef(const SDNode *N);
124
125} // end namespace ISD
126
127//===----------------------------------------------------------------------===//
128/// Unlike LLVM values, Selection DAG nodes may return multiple
129/// values as the result of a computation. Many nodes return multiple values,
130/// from loads (which define a token and a return value) to ADDC (which returns
131/// a result and a carry value), to calls (which may return an arbitrary number
132/// of values).
133///
134/// As such, each use of a SelectionDAG computation must indicate the node that
135/// computes it as well as which return value to use from that node. This pair
136/// of information is represented with the SDValue value type.
137///
138class SDValue {
139 friend struct DenseMapInfo<SDValue>;
140
141 SDNode *Node = nullptr; // The node defining the value we are using.
142 unsigned ResNo = 0; // Which return value of the node we are using.
143
144public:
145 SDValue() = default;
146 SDValue(SDNode *node, unsigned resno);
147
148 /// get the index which selects a specific result in the SDNode
149 unsigned getResNo() const { return ResNo; }
150
151 /// get the SDNode which holds the desired result
152 SDNode *getNode() const { return Node; }
153
154 /// set the SDNode
155 void setNode(SDNode *N) { Node = N; }
156
157 inline SDNode *operator->() const { return Node; }
158
159 bool operator==(const SDValue &O) const {
160 return Node == O.Node && ResNo == O.ResNo;
161 }
162 bool operator!=(const SDValue &O) const {
163 return !operator==(O);
164 }
165 bool operator<(const SDValue &O) const {
166 return std::tie(Node, ResNo) < std::tie(O.Node, O.ResNo);
167 }
168 explicit operator bool() const {
169 return Node != nullptr;
2
Assuming the condition is false
3
Returning zero, which participates in a condition later
170 }
171
172 SDValue getValue(unsigned R) const {
173 return SDValue(Node, R);
174 }
175
176 /// Return true if this node is an operand of N.
177 bool isOperandOf(const SDNode *N) const;
178
179 /// Return the ValueType of the referenced return value.
180 inline EVT getValueType() const;
181
182 /// Return the simple ValueType of the referenced return value.
183 MVT getSimpleValueType() const {
184 return getValueType().getSimpleVT();
185 }
186
187 /// Returns the size of the value in bits.
188 ///
189 /// If the value type is a scalable vector type, the scalable property will
190 /// be set and the runtime size will be a positive integer multiple of the
191 /// base size.
192 TypeSize getValueSizeInBits() const {
193 return getValueType().getSizeInBits();
194 }
195
196 uint64_t getScalarValueSizeInBits() const {
197 return getValueType().getScalarType().getFixedSizeInBits();
198 }
199
200 // Forwarding methods - These forward to the corresponding methods in SDNode.
201 inline unsigned getOpcode() const;
202 inline unsigned getNumOperands() const;
203 inline const SDValue &getOperand(unsigned i) const;
204 inline uint64_t getConstantOperandVal(unsigned i) const;
205 inline const APInt &getConstantOperandAPInt(unsigned i) const;
206 inline bool isTargetMemoryOpcode() const;
207 inline bool isTargetOpcode() const;
208 inline bool isMachineOpcode() const;
209 inline bool isUndef() const;
210 inline unsigned getMachineOpcode() const;
211 inline const DebugLoc &getDebugLoc() const;
212 inline void dump() const;
213 inline void dump(const SelectionDAG *G) const;
214 inline void dumpr() const;
215 inline void dumpr(const SelectionDAG *G) const;
216
217 /// Return true if this operand (which must be a chain) reaches the
218 /// specified operand without crossing any side-effecting instructions.
219 /// In practice, this looks through token factors and non-volatile loads.
220 /// In order to remain efficient, this only
221 /// looks a couple of nodes in, it does not do an exhaustive search.
222 bool reachesChainWithoutSideEffects(SDValue Dest,
223 unsigned Depth = 2) const;
224
225 /// Return true if there are no nodes using value ResNo of Node.
226 inline bool use_empty() const;
227
228 /// Return true if there is exactly one node using value ResNo of Node.
229 inline bool hasOneUse() const;
230};
231
232template<> struct DenseMapInfo<SDValue> {
233 static inline SDValue getEmptyKey() {
234 SDValue V;
235 V.ResNo = -1U;
236 return V;
237 }
238
239 static inline SDValue getTombstoneKey() {
240 SDValue V;
241 V.ResNo = -2U;
242 return V;
243 }
244
245 static unsigned getHashValue(const SDValue &Val) {
246 return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
247 (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
248 }
249
250 static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
251 return LHS == RHS;
252 }
253};
254
255/// Allow casting operators to work directly on
256/// SDValues as if they were SDNode*'s.
257template<> struct simplify_type<SDValue> {
258 using SimpleType = SDNode *;
259
260 static SimpleType getSimplifiedValue(SDValue &Val) {
261 return Val.getNode();
262 }
263};
264template<> struct simplify_type<const SDValue> {
265 using SimpleType = /*const*/ SDNode *;
266
267 static SimpleType getSimplifiedValue(const SDValue &Val) {
268 return Val.getNode();
269 }
270};
271
272/// Represents a use of a SDNode. This class holds an SDValue,
273/// which records the SDNode being used and the result number, a
274/// pointer to the SDNode using the value, and Next and Prev pointers,
275/// which link together all the uses of an SDNode.
276///
277class SDUse {
278 /// Val - The value being used.
279 SDValue Val;
280 /// User - The user of this value.
281 SDNode *User = nullptr;
282 /// Prev, Next - Pointers to the uses list of the SDNode referred by
283 /// this operand.
284 SDUse **Prev = nullptr;
285 SDUse *Next = nullptr;
286
287public:
288 SDUse() = default;
289 SDUse(const SDUse &U) = delete;
290 SDUse &operator=(const SDUse &) = delete;
291
292 /// Normally SDUse will just implicitly convert to an SDValue that it holds.
293 operator const SDValue&() const { return Val; }
294
295 /// If implicit conversion to SDValue doesn't work, the get() method returns
296 /// the SDValue.
297 const SDValue &get() const { return Val; }
298
299 /// This returns the SDNode that contains this Use.
300 SDNode *getUser() { return User; }
301
302 /// Get the next SDUse in the use list.
303 SDUse *getNext() const { return Next; }
304
305 /// Convenience function for get().getNode().
306 SDNode *getNode() const { return Val.getNode(); }
307 /// Convenience function for get().getResNo().
308 unsigned getResNo() const { return Val.getResNo(); }
309 /// Convenience function for get().getValueType().
310 EVT getValueType() const { return Val.getValueType(); }
311
312 /// Convenience function for get().operator==
313 bool operator==(const SDValue &V) const {
314 return Val == V;
315 }
316
317 /// Convenience function for get().operator!=
318 bool operator!=(const SDValue &V) const {
319 return Val != V;
320 }
321
322 /// Convenience function for get().operator<
323 bool operator<(const SDValue &V) const {
324 return Val < V;
325 }
326
327private:
328 friend class SelectionDAG;
329 friend class SDNode;
330 // TODO: unfriend HandleSDNode once we fix its operand handling.
331 friend class HandleSDNode;
332
333 void setUser(SDNode *p) { User = p; }
334
335 /// Remove this use from its existing use list, assign it the
336 /// given value, and add it to the new value's node's use list.
337 inline void set(const SDValue &V);
338 /// Like set, but only supports initializing a newly-allocated
339 /// SDUse with a non-null value.
340 inline void setInitial(const SDValue &V);
341 /// Like set, but only sets the Node portion of the value,
342 /// leaving the ResNo portion unmodified.
343 inline void setNode(SDNode *N);
344
345 void addToList(SDUse **List) {
346 Next = *List;
347 if (Next) Next->Prev = &Next;
348 Prev = List;
349 *List = this;
350 }
351
352 void removeFromList() {
353 *Prev = Next;
354 if (Next) Next->Prev = Prev;
355 }
356};
357
358/// simplify_type specializations - Allow casting operators to work directly on
359/// SDValues as if they were SDNode*'s.
360template<> struct simplify_type<SDUse> {
361 using SimpleType = SDNode *;
362
363 static SimpleType getSimplifiedValue(SDUse &Val) {
364 return Val.getNode();
365 }
366};
367
368/// These are IR-level optimization flags that may be propagated to SDNodes.
369/// TODO: This data structure should be shared by the IR optimizer and the
370/// the backend.
371struct SDNodeFlags {
372private:
373 bool NoUnsignedWrap : 1;
374 bool NoSignedWrap : 1;
375 bool Exact : 1;
376 bool NoNaNs : 1;
377 bool NoInfs : 1;
378 bool NoSignedZeros : 1;
379 bool AllowReciprocal : 1;
380 bool AllowContract : 1;
381 bool ApproximateFuncs : 1;
382 bool AllowReassociation : 1;
383
384 // We assume instructions do not raise floating-point exceptions by default,
385 // and only those marked explicitly may do so. We could choose to represent
386 // this via a positive "FPExcept" flags like on the MI level, but having a
387 // negative "NoFPExcept" flag here (that defaults to true) makes the flag
388 // intersection logic more straightforward.
389 bool NoFPExcept : 1;
390
391public:
392 /// Default constructor turns off all optimization flags.
393 SDNodeFlags()
394 : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false), NoNaNs(false),
395 NoInfs(false), NoSignedZeros(false), AllowReciprocal(false),
396 AllowContract(false), ApproximateFuncs(false),
397 AllowReassociation(false), NoFPExcept(false) {}
398
399 /// Propagate the fast-math-flags from an IR FPMathOperator.
400 void copyFMF(const FPMathOperator &FPMO) {
401 setNoNaNs(FPMO.hasNoNaNs());
402 setNoInfs(FPMO.hasNoInfs());
403 setNoSignedZeros(FPMO.hasNoSignedZeros());
404 setAllowReciprocal(FPMO.hasAllowReciprocal());
405 setAllowContract(FPMO.hasAllowContract());
406 setApproximateFuncs(FPMO.hasApproxFunc());
407 setAllowReassociation(FPMO.hasAllowReassoc());
408 }
409
410 // These are mutators for each flag.
411 void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
412 void setNoSignedWrap(bool b) { NoSignedWrap = b; }
413 void setExact(bool b) { Exact = b; }
414 void setNoNaNs(bool b) { NoNaNs = b; }
415 void setNoInfs(bool b) { NoInfs = b; }
416 void setNoSignedZeros(bool b) { NoSignedZeros = b; }
417 void setAllowReciprocal(bool b) { AllowReciprocal = b; }
418 void setAllowContract(bool b) { AllowContract = b; }
419 void setApproximateFuncs(bool b) { ApproximateFuncs = b; }
420 void setAllowReassociation(bool b) { AllowReassociation = b; }
421 void setNoFPExcept(bool b) { NoFPExcept = b; }
422
423 // These are accessors for each flag.
424 bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
425 bool hasNoSignedWrap() const { return NoSignedWrap; }
426 bool hasExact() const { return Exact; }
427 bool hasNoNaNs() const { return NoNaNs; }
428 bool hasNoInfs() const { return NoInfs; }
429 bool hasNoSignedZeros() const { return NoSignedZeros; }
430 bool hasAllowReciprocal() const { return AllowReciprocal; }
431 bool hasAllowContract() const { return AllowContract; }
432 bool hasApproximateFuncs() const { return ApproximateFuncs; }
433 bool hasAllowReassociation() const { return AllowReassociation; }
434 bool hasNoFPExcept() const { return NoFPExcept; }
435
436 /// Clear any flags in this flag set that aren't also set in Flags. All
437 /// flags will be cleared if Flags are undefined.
438 void intersectWith(const SDNodeFlags Flags) {
439 NoUnsignedWrap &= Flags.NoUnsignedWrap;
440 NoSignedWrap &= Flags.NoSignedWrap;
441 Exact &= Flags.Exact;
442 NoNaNs &= Flags.NoNaNs;
443 NoInfs &= Flags.NoInfs;
444 NoSignedZeros &= Flags.NoSignedZeros;
445 AllowReciprocal &= Flags.AllowReciprocal;
446 AllowContract &= Flags.AllowContract;
447 ApproximateFuncs &= Flags.ApproximateFuncs;
448 AllowReassociation &= Flags.AllowReassociation;
449 NoFPExcept &= Flags.NoFPExcept;
450 }
451};
452
453/// Represents one node in the SelectionDAG.
454///
455class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
456private:
457 /// The operation that this node performs.
458 int16_t NodeType;
459
460protected:
461 // We define a set of mini-helper classes to help us interpret the bits in our
462 // SubclassData. These are designed to fit within a uint16_t so they pack
463 // with NodeType.
464
465#if defined(_AIX) && (!defined(__GNUC__4) || defined(__ibmxl__))
466// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
467// and give the `pack` pragma push semantics.
468#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")pack(2)
469#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")pack(pop)
470#else
471#define BEGIN_TWO_BYTE_PACK()
472#define END_TWO_BYTE_PACK()
473#endif
474
475BEGIN_TWO_BYTE_PACK()
476 class SDNodeBitfields {
477 friend class SDNode;
478 friend class MemIntrinsicSDNode;
479 friend class MemSDNode;
480 friend class SelectionDAG;
481
482 uint16_t HasDebugValue : 1;
483 uint16_t IsMemIntrinsic : 1;
484 uint16_t IsDivergent : 1;
485 };
486 enum { NumSDNodeBits = 3 };
487
488 class ConstantSDNodeBitfields {
489 friend class ConstantSDNode;
490
491 uint16_t : NumSDNodeBits;
492
493 uint16_t IsOpaque : 1;
494 };
495
496 class MemSDNodeBitfields {
497 friend class MemSDNode;
498 friend class MemIntrinsicSDNode;
499 friend class AtomicSDNode;
500
501 uint16_t : NumSDNodeBits;
502
503 uint16_t IsVolatile : 1;
504 uint16_t IsNonTemporal : 1;
505 uint16_t IsDereferenceable : 1;
506 uint16_t IsInvariant : 1;
507 };
508 enum { NumMemSDNodeBits = NumSDNodeBits + 4 };
509
510 class LSBaseSDNodeBitfields {
511 friend class LSBaseSDNode;
512 friend class MaskedLoadStoreSDNode;
513 friend class MaskedGatherScatterSDNode;
514
515 uint16_t : NumMemSDNodeBits;
516
517 // This storage is shared between disparate class hierarchies to hold an
518 // enumeration specific to the class hierarchy in use.
519 // LSBaseSDNode => enum ISD::MemIndexedMode
520 // MaskedLoadStoreBaseSDNode => enum ISD::MemIndexedMode
521 // MaskedGatherScatterSDNode => enum ISD::MemIndexType
522 uint16_t AddressingMode : 3;
523 };
524 enum { NumLSBaseSDNodeBits = NumMemSDNodeBits + 3 };
525
526 class LoadSDNodeBitfields {
527 friend class LoadSDNode;
528 friend class MaskedLoadSDNode;
529 friend class MaskedGatherSDNode;
530
531 uint16_t : NumLSBaseSDNodeBits;
532
533 uint16_t ExtTy : 2; // enum ISD::LoadExtType
534 uint16_t IsExpanding : 1;
535 };
536
537 class StoreSDNodeBitfields {
538 friend class StoreSDNode;
539 friend class MaskedStoreSDNode;
540 friend class MaskedScatterSDNode;
541
542 uint16_t : NumLSBaseSDNodeBits;
543
544 uint16_t IsTruncating : 1;
545 uint16_t IsCompressing : 1;
546 };
547
548 union {
549 char RawSDNodeBits[sizeof(uint16_t)];
550 SDNodeBitfields SDNodeBits;
551 ConstantSDNodeBitfields ConstantSDNodeBits;
552 MemSDNodeBitfields MemSDNodeBits;
553 LSBaseSDNodeBitfields LSBaseSDNodeBits;
554 LoadSDNodeBitfields LoadSDNodeBits;
555 StoreSDNodeBitfields StoreSDNodeBits;
556 };
557END_TWO_BYTE_PACK()
558#undef BEGIN_TWO_BYTE_PACK
559#undef END_TWO_BYTE_PACK
560
561 // RawSDNodeBits must cover the entirety of the union. This means that all of
562 // the union's members must have size <= RawSDNodeBits. We write the RHS as
563 // "2" instead of sizeof(RawSDNodeBits) because MSVC can't handle the latter.
564 static_assert(sizeof(SDNodeBitfields) <= 2, "field too wide");
565 static_assert(sizeof(ConstantSDNodeBitfields) <= 2, "field too wide");
566 static_assert(sizeof(MemSDNodeBitfields) <= 2, "field too wide");
567 static_assert(sizeof(LSBaseSDNodeBitfields) <= 2, "field too wide");
568 static_assert(sizeof(LoadSDNodeBitfields) <= 2, "field too wide");
569 static_assert(sizeof(StoreSDNodeBitfields) <= 2, "field too wide");
570
571private:
572 friend class SelectionDAG;
573 // TODO: unfriend HandleSDNode once we fix its operand handling.
574 friend class HandleSDNode;
575
576 /// Unique id per SDNode in the DAG.
577 int NodeId = -1;
578
579 /// The values that are used by this operation.
580 SDUse *OperandList = nullptr;
581
582 /// The types of the values this node defines. SDNode's may
583 /// define multiple values simultaneously.
584 const EVT *ValueList;
585
586 /// List of uses for this SDNode.
587 SDUse *UseList = nullptr;
588
589 /// The number of entries in the Operand/Value list.
590 unsigned short NumOperands = 0;
591 unsigned short NumValues;
592
593 // The ordering of the SDNodes. It roughly corresponds to the ordering of the
594 // original LLVM instructions.
595 // This is used for turning off scheduling, because we'll forgo
596 // the normal scheduling algorithms and output the instructions according to
597 // this ordering.
598 unsigned IROrder;
599
600 /// Source line information.
601 DebugLoc debugLoc;
602
603 /// Return a pointer to the specified value type.
604 static const EVT *getValueTypeList(EVT VT);
605
606 SDNodeFlags Flags;
607
608public:
609 /// Unique and persistent id per SDNode in the DAG.
610 /// Used for debug printing.
611 uint16_t PersistentId;
612
613 //===--------------------------------------------------------------------===//
614 // Accessors
615 //
616
617 /// Return the SelectionDAG opcode value for this node. For
618 /// pre-isel nodes (those for which isMachineOpcode returns false), these
619 /// are the opcode values in the ISD and <target>ISD namespaces. For
620 /// post-isel opcodes, see getMachineOpcode.
621 unsigned getOpcode() const { return (unsigned short)NodeType; }
622
623 /// Test if this node has a target-specific opcode (in the
624 /// \<target\>ISD namespace).
625 bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
626
627 /// Test if this node has a target-specific opcode that may raise
628 /// FP exceptions (in the \<target\>ISD namespace and greater than
629 /// FIRST_TARGET_STRICTFP_OPCODE). Note that all target memory
630 /// opcode are currently automatically considered to possibly raise
631 /// FP exceptions as well.
632 bool isTargetStrictFPOpcode() const {
633 return NodeType >= ISD::FIRST_TARGET_STRICTFP_OPCODE;
634 }
635
636 /// Test if this node has a target-specific
637 /// memory-referencing opcode (in the \<target\>ISD namespace and
638 /// greater than FIRST_TARGET_MEMORY_OPCODE).
639 bool isTargetMemoryOpcode() const {
640 return NodeType >= ISD::FIRST_TARGET_MEMORY_OPCODE;
641 }
642
643 /// Return true if the type of the node type undefined.
644 bool isUndef() const { return NodeType == ISD::UNDEF; }
30
Assuming field 'NodeType' is not equal to UNDEF
31
Returning zero, which participates in a condition later
645
646 /// Test if this node is a memory intrinsic (with valid pointer information).
647 /// INTRINSIC_W_CHAIN and INTRINSIC_VOID nodes are sometimes created for
648 /// non-memory intrinsics (with chains) that are not really instances of
649 /// MemSDNode. For such nodes, we need some extra state to determine the
650 /// proper classof relationship.
651 bool isMemIntrinsic() const {
652 return (NodeType == ISD::INTRINSIC_W_CHAIN ||
653 NodeType == ISD::INTRINSIC_VOID) &&
654 SDNodeBits.IsMemIntrinsic;
655 }
656
657 /// Test if this node is a strict floating point pseudo-op.
658 bool isStrictFPOpcode() {
659 switch (NodeType) {
660 default:
661 return false;
662 case ISD::STRICT_FP16_TO_FP:
663 case ISD::STRICT_FP_TO_FP16:
664#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN) \
665 case ISD::STRICT_##DAGN:
666#include "llvm/IR/ConstrainedOps.def"
667 return true;
668 }
669 }
670
671 /// Test if this node has a post-isel opcode, directly
672 /// corresponding to a MachineInstr opcode.
673 bool isMachineOpcode() const { return NodeType < 0; }
674
675 /// This may only be called if isMachineOpcode returns
676 /// true. It returns the MachineInstr opcode value that the node's opcode
677 /// corresponds to.
678 unsigned getMachineOpcode() const {
679 assert(isMachineOpcode() && "Not a MachineInstr opcode!")((isMachineOpcode() && "Not a MachineInstr opcode!") ?
static_cast<void> (0) : __assert_fail ("isMachineOpcode() && \"Not a MachineInstr opcode!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 679, __PRETTY_FUNCTION__))
;
680 return ~NodeType;
681 }
682
683 bool getHasDebugValue() const { return SDNodeBits.HasDebugValue; }
684 void setHasDebugValue(bool b) { SDNodeBits.HasDebugValue = b; }
685
686 bool isDivergent() const { return SDNodeBits.IsDivergent; }
687
688 /// Return true if there are no uses of this node.
689 bool use_empty() const { return UseList == nullptr; }
690
691 /// Return true if there is exactly one use of this node.
692 bool hasOneUse() const { return hasSingleElement(uses()); }
693
694 /// Return the number of uses of this node. This method takes
695 /// time proportional to the number of uses.
696 size_t use_size() const { return std::distance(use_begin(), use_end()); }
697
698 /// Return the unique node id.
699 int getNodeId() const { return NodeId; }
700
701 /// Set unique node id.
702 void setNodeId(int Id) { NodeId = Id; }
703
704 /// Return the node ordering.
705 unsigned getIROrder() const { return IROrder; }
706
707 /// Set the node ordering.
708 void setIROrder(unsigned Order) { IROrder = Order; }
709
710 /// Return the source location info.
711 const DebugLoc &getDebugLoc() const { return debugLoc; }
712
713 /// Set source location info. Try to avoid this, putting
714 /// it in the constructor is preferable.
715 void setDebugLoc(DebugLoc dl) { debugLoc = std::move(dl); }
716
717 /// This class provides iterator support for SDUse
718 /// operands that use a specific SDNode.
719 class use_iterator {
720 friend class SDNode;
721
722 SDUse *Op = nullptr;
723
724 explicit use_iterator(SDUse *op) : Op(op) {}
725
726 public:
727 using iterator_category = std::forward_iterator_tag;
728 using value_type = SDUse;
729 using difference_type = std::ptrdiff_t;
730 using pointer = value_type *;
731 using reference = value_type &;
732
733 use_iterator() = default;
734 use_iterator(const use_iterator &I) : Op(I.Op) {}
735
736 bool operator==(const use_iterator &x) const {
737 return Op == x.Op;
738 }
739 bool operator!=(const use_iterator &x) const {
740 return !operator==(x);
741 }
742
743 /// Return true if this iterator is at the end of uses list.
744 bool atEnd() const { return Op == nullptr; }
745
746 // Iterator traversal: forward iteration only.
747 use_iterator &operator++() { // Preincrement
748 assert(Op && "Cannot increment end iterator!")((Op && "Cannot increment end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot increment end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 748, __PRETTY_FUNCTION__))
;
749 Op = Op->getNext();
750 return *this;
751 }
752
753 use_iterator operator++(int) { // Postincrement
754 use_iterator tmp = *this; ++*this; return tmp;
755 }
756
757 /// Retrieve a pointer to the current user node.
758 SDNode *operator*() const {
759 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 759, __PRETTY_FUNCTION__))
;
760 return Op->getUser();
761 }
762
763 SDNode *operator->() const { return operator*(); }
764
765 SDUse &getUse() const { return *Op; }
766
767 /// Retrieve the operand # of this use in its user.
768 unsigned getOperandNo() const {
769 assert(Op && "Cannot dereference end iterator!")((Op && "Cannot dereference end iterator!") ? static_cast
<void> (0) : __assert_fail ("Op && \"Cannot dereference end iterator!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 769, __PRETTY_FUNCTION__))
;
770 return (unsigned)(Op - Op->getUser()->OperandList);
771 }
772 };
773
774 /// Provide iteration support to walk over all uses of an SDNode.
775 use_iterator use_begin() const {
776 return use_iterator(UseList);
777 }
778
779 static use_iterator use_end() { return use_iterator(nullptr); }
780
781 inline iterator_range<use_iterator> uses() {
782 return make_range(use_begin(), use_end());
783 }
784 inline iterator_range<use_iterator> uses() const {
785 return make_range(use_begin(), use_end());
786 }
787
788 /// Return true if there are exactly NUSES uses of the indicated value.
789 /// This method ignores uses of other values defined by this operation.
790 bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
791
792 /// Return true if there are any use of the indicated value.
793 /// This method ignores uses of other values defined by this operation.
794 bool hasAnyUseOfValue(unsigned Value) const;
795
796 /// Return true if this node is the only use of N.
797 bool isOnlyUserOf(const SDNode *N) const;
798
799 /// Return true if this node is an operand of N.
800 bool isOperandOf(const SDNode *N) const;
801
802 /// Return true if this node is a predecessor of N.
803 /// NOTE: Implemented on top of hasPredecessor and every bit as
804 /// expensive. Use carefully.
805 bool isPredecessorOf(const SDNode *N) const {
806 return N->hasPredecessor(this);
807 }
808
809 /// Return true if N is a predecessor of this node.
810 /// N is either an operand of this node, or can be reached by recursively
811 /// traversing up the operands.
812 /// NOTE: This is an expensive method. Use it carefully.
813 bool hasPredecessor(const SDNode *N) const;
814
815 /// Returns true if N is a predecessor of any node in Worklist. This
816 /// helper keeps Visited and Worklist sets externally to allow unions
817 /// searches to be performed in parallel, caching of results across
818 /// queries and incremental addition to Worklist. Stops early if N is
819 /// found but will resume. Remember to clear Visited and Worklists
820 /// if DAG changes. MaxSteps gives a maximum number of nodes to visit before
821 /// giving up. The TopologicalPrune flag signals that positive NodeIds are
822 /// topologically ordered (Operands have strictly smaller node id) and search
823 /// can be pruned leveraging this.
824 static bool hasPredecessorHelper(const SDNode *N,
825 SmallPtrSetImpl<const SDNode *> &Visited,
826 SmallVectorImpl<const SDNode *> &Worklist,
827 unsigned int MaxSteps = 0,
828 bool TopologicalPrune = false) {
829 SmallVector<const SDNode *, 8> DeferredNodes;
830 if (Visited.count(N))
831 return true;
832
833 // Node Id's are assigned in three places: As a topological
834 // ordering (> 0), during legalization (results in values set to
835 // 0), new nodes (set to -1). If N has a topolgical id then we
836 // know that all nodes with ids smaller than it cannot be
837 // successors and we need not check them. Filter out all node
838 // that can't be matches. We add them to the worklist before exit
839 // in case of multiple calls. Note that during selection the topological id
840 // may be violated if a node's predecessor is selected before it. We mark
841 // this at selection negating the id of unselected successors and
842 // restricting topological pruning to positive ids.
843
844 int NId = N->getNodeId();
845 // If we Invalidated the Id, reconstruct original NId.
846 if (NId < -1)
847 NId = -(NId + 1);
848
849 bool Found = false;
850 while (!Worklist.empty()) {
851 const SDNode *M = Worklist.pop_back_val();
852 int MId = M->getNodeId();
853 if (TopologicalPrune && M->getOpcode() != ISD::TokenFactor && (NId > 0) &&
854 (MId > 0) && (MId < NId)) {
855 DeferredNodes.push_back(M);
856 continue;
857 }
858 for (const SDValue &OpV : M->op_values()) {
859 SDNode *Op = OpV.getNode();
860 if (Visited.insert(Op).second)
861 Worklist.push_back(Op);
862 if (Op == N)
863 Found = true;
864 }
865 if (Found)
866 break;
867 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
868 break;
869 }
870 // Push deferred nodes back on worklist.
871 Worklist.append(DeferredNodes.begin(), DeferredNodes.end());
872 // If we bailed early, conservatively return found.
873 if (MaxSteps != 0 && Visited.size() >= MaxSteps)
874 return true;
875 return Found;
876 }
877
878 /// Return true if all the users of N are contained in Nodes.
879 /// NOTE: Requires at least one match, but doesn't require them all.
880 static bool areOnlyUsersOf(ArrayRef<const SDNode *> Nodes, const SDNode *N);
881
882 /// Return the number of values used by this operation.
883 unsigned getNumOperands() const { return NumOperands; }
884
885 /// Return the maximum number of operands that a SDNode can hold.
886 static constexpr size_t getMaxNumOperands() {
887 return std::numeric_limits<decltype(SDNode::NumOperands)>::max();
888 }
889
890 /// Helper method returns the integer value of a ConstantSDNode operand.
891 inline uint64_t getConstantOperandVal(unsigned Num) const;
892
893 /// Helper method returns the APInt of a ConstantSDNode operand.
894 inline const APInt &getConstantOperandAPInt(unsigned Num) const;
895
896 const SDValue &getOperand(unsigned Num) const {
897 assert(Num < NumOperands && "Invalid child # of SDNode!")((Num < NumOperands && "Invalid child # of SDNode!"
) ? static_cast<void> (0) : __assert_fail ("Num < NumOperands && \"Invalid child # of SDNode!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 897, __PRETTY_FUNCTION__))
;
898 return OperandList[Num];
899 }
900
901 using op_iterator = SDUse *;
902
903 op_iterator op_begin() const { return OperandList; }
904 op_iterator op_end() const { return OperandList+NumOperands; }
905 ArrayRef<SDUse> ops() const { return makeArrayRef(op_begin(), op_end()); }
906
907 /// Iterator for directly iterating over the operand SDValue's.
908 struct value_op_iterator
909 : iterator_adaptor_base<value_op_iterator, op_iterator,
910 std::random_access_iterator_tag, SDValue,
911 ptrdiff_t, value_op_iterator *,
912 value_op_iterator *> {
913 explicit value_op_iterator(SDUse *U = nullptr)
914 : iterator_adaptor_base(U) {}
915
916 const SDValue &operator*() const { return I->get(); }
917 };
918
919 iterator_range<value_op_iterator> op_values() const {
920 return make_range(value_op_iterator(op_begin()),
921 value_op_iterator(op_end()));
922 }
923
924 SDVTList getVTList() const {
925 SDVTList X = { ValueList, NumValues };
926 return X;
927 }
928
929 /// If this node has a glue operand, return the node
930 /// to which the glue operand points. Otherwise return NULL.
931 SDNode *getGluedNode() const {
932 if (getNumOperands() != 0 &&
933 getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
934 return getOperand(getNumOperands()-1).getNode();
935 return nullptr;
936 }
937
938 /// If this node has a glue value with a user, return
939 /// the user (there is at most one). Otherwise return NULL.
940 SDNode *getGluedUser() const {
941 for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
942 if (UI.getUse().get().getValueType() == MVT::Glue)
943 return *UI;
944 return nullptr;
945 }
946
947 SDNodeFlags getFlags() const { return Flags; }
948 void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
949
950 /// Clear any flags in this node that aren't also set in Flags.
951 /// If Flags is not in a defined state then this has no effect.
952 void intersectFlagsWith(const SDNodeFlags Flags);
953
954 /// Return the number of values defined/returned by this operator.
955 unsigned getNumValues() const { return NumValues; }
956
957 /// Return the type of a specified result.
958 EVT getValueType(unsigned ResNo) const {
959 assert(ResNo < NumValues && "Illegal result number!")((ResNo < NumValues && "Illegal result number!") ?
static_cast<void> (0) : __assert_fail ("ResNo < NumValues && \"Illegal result number!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 959, __PRETTY_FUNCTION__))
;
960 return ValueList[ResNo];
961 }
962
963 /// Return the type of a specified result as a simple type.
964 MVT getSimpleValueType(unsigned ResNo) const {
965 return getValueType(ResNo).getSimpleVT();
966 }
967
968 /// Returns MVT::getSizeInBits(getValueType(ResNo)).
969 ///
970 /// If the value type is a scalable vector type, the scalable property will
971 /// be set and the runtime size will be a positive integer multiple of the
972 /// base size.
973 TypeSize getValueSizeInBits(unsigned ResNo) const {
974 return getValueType(ResNo).getSizeInBits();
975 }
976
977 using value_iterator = const EVT *;
978
979 value_iterator value_begin() const { return ValueList; }
980 value_iterator value_end() const { return ValueList+NumValues; }
981 iterator_range<value_iterator> values() const {
982 return llvm::make_range(value_begin(), value_end());
983 }
984
985 /// Return the opcode of this operation for printing.
986 std::string getOperationName(const SelectionDAG *G = nullptr) const;
987 static const char* getIndexedModeName(ISD::MemIndexedMode AM);
988 void print_types(raw_ostream &OS, const SelectionDAG *G) const;
989 void print_details(raw_ostream &OS, const SelectionDAG *G) const;
990 void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
991 void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
992
993 /// Print a SelectionDAG node and all children down to
994 /// the leaves. The given SelectionDAG allows target-specific nodes
995 /// to be printed in human-readable form. Unlike printr, this will
996 /// print the whole DAG, including children that appear multiple
997 /// times.
998 ///
999 void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
1000
1001 /// Print a SelectionDAG node and children up to
1002 /// depth "depth." The given SelectionDAG allows target-specific
1003 /// nodes to be printed in human-readable form. Unlike printr, this
1004 /// will print children that appear multiple times wherever they are
1005 /// used.
1006 ///
1007 void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
1008 unsigned depth = 100) const;
1009
1010 /// Dump this node, for debugging.
1011 void dump() const;
1012
1013 /// Dump (recursively) this node and its use-def subgraph.
1014 void dumpr() const;
1015
1016 /// Dump this node, for debugging.
1017 /// The given SelectionDAG allows target-specific nodes to be printed
1018 /// in human-readable form.
1019 void dump(const SelectionDAG *G) const;
1020
1021 /// Dump (recursively) this node and its use-def subgraph.
1022 /// The given SelectionDAG allows target-specific nodes to be printed
1023 /// in human-readable form.
1024 void dumpr(const SelectionDAG *G) const;
1025
1026 /// printrFull to dbgs(). The given SelectionDAG allows
1027 /// target-specific nodes to be printed in human-readable form.
1028 /// Unlike dumpr, this will print the whole DAG, including children
1029 /// that appear multiple times.
1030 void dumprFull(const SelectionDAG *G = nullptr) const;
1031
1032 /// printrWithDepth to dbgs(). The given
1033 /// SelectionDAG allows target-specific nodes to be printed in
1034 /// human-readable form. Unlike dumpr, this will print children
1035 /// that appear multiple times wherever they are used.
1036 ///
1037 void dumprWithDepth(const SelectionDAG *G = nullptr,
1038 unsigned depth = 100) const;
1039
1040 /// Gather unique data for the node.
1041 void Profile(FoldingSetNodeID &ID) const;
1042
1043 /// This method should only be used by the SDUse class.
1044 void addUse(SDUse &U) { U.addToList(&UseList); }
1045
1046protected:
1047 static SDVTList getSDVTList(EVT VT) {
1048 SDVTList Ret = { getValueTypeList(VT), 1 };
1049 return Ret;
1050 }
1051
1052 /// Create an SDNode.
1053 ///
1054 /// SDNodes are created without any operands, and never own the operand
1055 /// storage. To add operands, see SelectionDAG::createOperands.
1056 SDNode(unsigned Opc, unsigned Order, DebugLoc dl, SDVTList VTs)
1057 : NodeType(Opc), ValueList(VTs.VTs), NumValues(VTs.NumVTs),
1058 IROrder(Order), debugLoc(std::move(dl)) {
1059 memset(&RawSDNodeBits, 0, sizeof(RawSDNodeBits));
1060 assert(debugLoc.hasTrivialDestructor() && "Expected trivial destructor")((debugLoc.hasTrivialDestructor() && "Expected trivial destructor"
) ? static_cast<void> (0) : __assert_fail ("debugLoc.hasTrivialDestructor() && \"Expected trivial destructor\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1060, __PRETTY_FUNCTION__))
;
1061 assert(NumValues == VTs.NumVTs &&((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __PRETTY_FUNCTION__))
1062 "NumValues wasn't wide enough for its operands!")((NumValues == VTs.NumVTs && "NumValues wasn't wide enough for its operands!"
) ? static_cast<void> (0) : __assert_fail ("NumValues == VTs.NumVTs && \"NumValues wasn't wide enough for its operands!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1062, __PRETTY_FUNCTION__))
;
1063 }
1064
1065 /// Release the operands and set this node to have zero operands.
1066 void DropOperands();
1067};
1068
1069/// Wrapper class for IR location info (IR ordering and DebugLoc) to be passed
1070/// into SDNode creation functions.
1071/// When an SDNode is created from the DAGBuilder, the DebugLoc is extracted
1072/// from the original Instruction, and IROrder is the ordinal position of
1073/// the instruction.
1074/// When an SDNode is created after the DAG is being built, both DebugLoc and
1075/// the IROrder are propagated from the original SDNode.
1076/// So SDLoc class provides two constructors besides the default one, one to
1077/// be used by the DAGBuilder, the other to be used by others.
1078class SDLoc {
1079private:
1080 DebugLoc DL;
1081 int IROrder = 0;
1082
1083public:
1084 SDLoc() = default;
1085 SDLoc(const SDNode *N) : DL(N->getDebugLoc()), IROrder(N->getIROrder()) {}
1086 SDLoc(const SDValue V) : SDLoc(V.getNode()) {}
1087 SDLoc(const Instruction *I, int Order) : IROrder(Order) {
1088 assert(Order >= 0 && "bad IROrder")((Order >= 0 && "bad IROrder") ? static_cast<void
> (0) : __assert_fail ("Order >= 0 && \"bad IROrder\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1088, __PRETTY_FUNCTION__))
;
1089 if (I)
1090 DL = I->getDebugLoc();
1091 }
1092
1093 unsigned getIROrder() const { return IROrder; }
1094 const DebugLoc &getDebugLoc() const { return DL; }
1095};
1096
1097// Define inline functions from the SDValue class.
1098
1099inline SDValue::SDValue(SDNode *node, unsigned resno)
1100 : Node(node), ResNo(resno) {
1101 // Explicitly check for !ResNo to avoid use-after-free, because there are
1102 // callers that use SDValue(N, 0) with a deleted N to indicate successful
1103 // combines.
1104 assert((!Node || !ResNo || ResNo < Node->getNumValues()) &&(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __PRETTY_FUNCTION__))
1105 "Invalid result number for the given node!")(((!Node || !ResNo || ResNo < Node->getNumValues()) &&
"Invalid result number for the given node!") ? static_cast<
void> (0) : __assert_fail ("(!Node || !ResNo || ResNo < Node->getNumValues()) && \"Invalid result number for the given node!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1105, __PRETTY_FUNCTION__))
;
1106 assert(ResNo < -2U && "Cannot use result numbers reserved for DenseMaps.")((ResNo < -2U && "Cannot use result numbers reserved for DenseMaps."
) ? static_cast<void> (0) : __assert_fail ("ResNo < -2U && \"Cannot use result numbers reserved for DenseMaps.\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1106, __PRETTY_FUNCTION__))
;
1107}
1108
1109inline unsigned SDValue::getOpcode() const {
1110 return Node->getOpcode();
1111}
1112
1113inline EVT SDValue::getValueType() const {
1114 return Node->getValueType(ResNo);
1115}
1116
1117inline unsigned SDValue::getNumOperands() const {
1118 return Node->getNumOperands();
1119}
1120
1121inline const SDValue &SDValue::getOperand(unsigned i) const {
1122 return Node->getOperand(i);
1123}
1124
1125inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
1126 return Node->getConstantOperandVal(i);
1127}
1128
1129inline const APInt &SDValue::getConstantOperandAPInt(unsigned i) const {
1130 return Node->getConstantOperandAPInt(i);
1131}
1132
1133inline bool SDValue::isTargetOpcode() const {
1134 return Node->isTargetOpcode();
1135}
1136
1137inline bool SDValue::isTargetMemoryOpcode() const {
1138 return Node->isTargetMemoryOpcode();
1139}
1140
1141inline bool SDValue::isMachineOpcode() const {
1142 return Node->isMachineOpcode();
1143}
1144
1145inline unsigned SDValue::getMachineOpcode() const {
1146 return Node->getMachineOpcode();
1147}
1148
1149inline bool SDValue::isUndef() const {
1150 return Node->isUndef();
29
Calling 'SDNode::isUndef'
32
Returning from 'SDNode::isUndef'
33
Returning zero, which participates in a condition later
1151}
1152
1153inline bool SDValue::use_empty() const {
1154 return !Node->hasAnyUseOfValue(ResNo);
1155}
1156
1157inline bool SDValue::hasOneUse() const {
1158 return Node->hasNUsesOfValue(1, ResNo);
1159}
1160
1161inline const DebugLoc &SDValue::getDebugLoc() const {
1162 return Node->getDebugLoc();
1163}
1164
1165inline void SDValue::dump() const {
1166 return Node->dump();
1167}
1168
1169inline void SDValue::dump(const SelectionDAG *G) const {
1170 return Node->dump(G);
1171}
1172
1173inline void SDValue::dumpr() const {
1174 return Node->dumpr();
1175}
1176
1177inline void SDValue::dumpr(const SelectionDAG *G) const {
1178 return Node->dumpr(G);
1179}
1180
1181// Define inline functions from the SDUse class.
1182
1183inline void SDUse::set(const SDValue &V) {
1184 if (Val.getNode()) removeFromList();
1185 Val = V;
1186 if (V.getNode()) V.getNode()->addUse(*this);
1187}
1188
1189inline void SDUse::setInitial(const SDValue &V) {
1190 Val = V;
1191 V.getNode()->addUse(*this);
1192}
1193
1194inline void SDUse::setNode(SDNode *N) {
1195 if (Val.getNode()) removeFromList();
1196 Val.setNode(N);
1197 if (N) N->addUse(*this);
1198}
1199
1200/// This class is used to form a handle around another node that
1201/// is persistent and is updated across invocations of replaceAllUsesWith on its
1202/// operand. This node should be directly created by end-users and not added to
1203/// the AllNodes list.
1204class HandleSDNode : public SDNode {
1205 SDUse Op;
1206
1207public:
1208 explicit HandleSDNode(SDValue X)
1209 : SDNode(ISD::HANDLENODE, 0, DebugLoc(), getSDVTList(MVT::Other)) {
1210 // HandleSDNodes are never inserted into the DAG, so they won't be
1211 // auto-numbered. Use ID 65535 as a sentinel.
1212 PersistentId = 0xffff;
1213
1214 // Manually set up the operand list. This node type is special in that it's
1215 // always stack allocated and SelectionDAG does not manage its operands.
1216 // TODO: This should either (a) not be in the SDNode hierarchy, or (b) not
1217 // be so special.
1218 Op.setUser(this);
1219 Op.setInitial(X);
1220 NumOperands = 1;
1221 OperandList = &Op;
1222 }
1223 ~HandleSDNode();
1224
1225 const SDValue &getValue() const { return Op; }
1226};
1227
1228class AddrSpaceCastSDNode : public SDNode {
1229private:
1230 unsigned SrcAddrSpace;
1231 unsigned DestAddrSpace;
1232
1233public:
1234 AddrSpaceCastSDNode(unsigned Order, const DebugLoc &dl, EVT VT,
1235 unsigned SrcAS, unsigned DestAS);
1236
1237 unsigned getSrcAddressSpace() const { return SrcAddrSpace; }
1238 unsigned getDestAddressSpace() const { return DestAddrSpace; }
1239
1240 static bool classof(const SDNode *N) {
1241 return N->getOpcode() == ISD::ADDRSPACECAST;
1242 }
1243};
1244
1245/// This is an abstract virtual class for memory operations.
1246class MemSDNode : public SDNode {
1247private:
1248 // VT of in-memory value.
1249 EVT MemoryVT;
1250
1251protected:
1252 /// Memory reference information.
1253 MachineMemOperand *MMO;
1254
1255public:
1256 MemSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTs,
1257 EVT memvt, MachineMemOperand *MMO);
1258
1259 bool readMem() const { return MMO->isLoad(); }
1260 bool writeMem() const { return MMO->isStore(); }
1261
1262 /// Returns alignment and volatility of the memory access
1263 Align getOriginalAlign() const { return MMO->getBaseAlign(); }
1264 Align getAlign() const { return MMO->getAlign(); }
1265 LLVM_ATTRIBUTE_DEPRECATED(unsigned getOriginalAlignment() const,[[deprecated("Use getOriginalAlign() instead")]] unsigned getOriginalAlignment
() const
1266 "Use getOriginalAlign() instead")[[deprecated("Use getOriginalAlign() instead")]] unsigned getOriginalAlignment
() const
{
1267 return MMO->getBaseAlign().value();
1268 }
1269 // FIXME: Remove once transition to getAlign is over.
1270 unsigned getAlignment() const { return MMO->getAlign().value(); }
1271
1272 /// Return the SubclassData value, without HasDebugValue. This contains an
1273 /// encoding of the volatile flag, as well as bits used by subclasses. This
1274 /// function should only be used to compute a FoldingSetNodeID value.
1275 /// The HasDebugValue bit is masked out because CSE map needs to match
1276 /// nodes with debug info with nodes without debug info. Same is about
1277 /// isDivergent bit.
1278 unsigned getRawSubclassData() const {
1279 uint16_t Data;
1280 union {
1281 char RawSDNodeBits[sizeof(uint16_t)];
1282 SDNodeBitfields SDNodeBits;
1283 };
1284 memcpy(&RawSDNodeBits, &this->RawSDNodeBits, sizeof(this->RawSDNodeBits));
1285 SDNodeBits.HasDebugValue = 0;
1286 SDNodeBits.IsDivergent = false;
1287 memcpy(&Data, &RawSDNodeBits, sizeof(RawSDNodeBits));
1288 return Data;
1289 }
1290
1291 bool isVolatile() const { return MemSDNodeBits.IsVolatile; }
1292 bool isNonTemporal() const { return MemSDNodeBits.IsNonTemporal; }
1293 bool isDereferenceable() const { return MemSDNodeBits.IsDereferenceable; }
1294 bool isInvariant() const { return MemSDNodeBits.IsInvariant; }
1295
1296 // Returns the offset from the location of the access.
1297 int64_t getSrcValueOffset() const { return MMO->getOffset(); }
1298
1299 /// Returns the AA info that describes the dereference.
1300 AAMDNodes getAAInfo() const { return MMO->getAAInfo(); }
1301
1302 /// Returns the Ranges that describes the dereference.
1303 const MDNode *getRanges() const { return MMO->getRanges(); }
1304
1305 /// Returns the synchronization scope ID for this memory operation.
1306 SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
1307
1308 /// Return the atomic ordering requirements for this memory operation. For
1309 /// cmpxchg atomic operations, return the atomic ordering requirements when
1310 /// store occurs.
1311 AtomicOrdering getOrdering() const { return MMO->getOrdering(); }
1312
1313 /// Return true if the memory operation ordering is Unordered or higher.
1314 bool isAtomic() const { return MMO->isAtomic(); }
1315
1316 /// Returns true if the memory operation doesn't imply any ordering
1317 /// constraints on surrounding memory operations beyond the normal memory
1318 /// aliasing rules.
1319 bool isUnordered() const { return MMO->isUnordered(); }
1320
1321 /// Returns true if the memory operation is neither atomic or volatile.
1322 bool isSimple() const { return !isAtomic() && !isVolatile(); }
1323
1324 /// Return the type of the in-memory value.
1325 EVT getMemoryVT() const { return MemoryVT; }
1326
1327 /// Return a MachineMemOperand object describing the memory
1328 /// reference performed by operation.
1329 MachineMemOperand *getMemOperand() const { return MMO; }
1330
1331 const MachinePointerInfo &getPointerInfo() const {
1332 return MMO->getPointerInfo();
1333 }
1334
1335 /// Return the address space for the associated pointer
1336 unsigned getAddressSpace() const {
1337 return getPointerInfo().getAddrSpace();
1338 }
1339
1340 /// Update this MemSDNode's MachineMemOperand information
1341 /// to reflect the alignment of NewMMO, if it has a greater alignment.
1342 /// This must only be used when the new alignment applies to all users of
1343 /// this MachineMemOperand.
1344 void refineAlignment(const MachineMemOperand *NewMMO) {
1345 MMO->refineAlignment(NewMMO);
1346 }
1347
1348 const SDValue &getChain() const { return getOperand(0); }
1349
1350 const SDValue &getBasePtr() const {
1351 switch (getOpcode()) {
1352 case ISD::STORE:
1353 case ISD::MSTORE:
1354 return getOperand(2);
1355 case ISD::MGATHER:
1356 case ISD::MSCATTER:
1357 return getOperand(3);
1358 default:
1359 return getOperand(1);
1360 }
1361 }
1362
1363 // Methods to support isa and dyn_cast
1364 static bool classof(const SDNode *N) {
1365 // For some targets, we lower some target intrinsics to a MemIntrinsicNode
1366 // with either an intrinsic or a target opcode.
1367 return N->getOpcode() == ISD::LOAD ||
1368 N->getOpcode() == ISD::STORE ||
1369 N->getOpcode() == ISD::PREFETCH ||
1370 N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1371 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1372 N->getOpcode() == ISD::ATOMIC_SWAP ||
1373 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1374 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1375 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1376 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1377 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1378 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1379 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1380 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1381 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1382 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1383 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1384 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1385 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1386 N->getOpcode() == ISD::ATOMIC_LOAD ||
1387 N->getOpcode() == ISD::ATOMIC_STORE ||
1388 N->getOpcode() == ISD::MLOAD ||
1389 N->getOpcode() == ISD::MSTORE ||
1390 N->getOpcode() == ISD::MGATHER ||
1391 N->getOpcode() == ISD::MSCATTER ||
1392 N->isMemIntrinsic() ||
1393 N->isTargetMemoryOpcode();
1394 }
1395};
1396
1397/// This is an SDNode representing atomic operations.
1398class AtomicSDNode : public MemSDNode {
1399public:
1400 AtomicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl, SDVTList VTL,
1401 EVT MemVT, MachineMemOperand *MMO)
1402 : MemSDNode(Opc, Order, dl, VTL, MemVT, MMO) {
1403 assert(((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) ||((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1404, __PRETTY_FUNCTION__))
1404 MMO->isAtomic()) && "then why are we using an AtomicSDNode?")((((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE
) || MMO->isAtomic()) && "then why are we using an AtomicSDNode?"
) ? static_cast<void> (0) : __assert_fail ("((Opc != ISD::ATOMIC_LOAD && Opc != ISD::ATOMIC_STORE) || MMO->isAtomic()) && \"then why are we using an AtomicSDNode?\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1404, __PRETTY_FUNCTION__))
;
1405 }
1406
1407 const SDValue &getBasePtr() const { return getOperand(1); }
1408 const SDValue &getVal() const { return getOperand(2); }
1409
1410 /// Returns true if this SDNode represents cmpxchg atomic operation, false
1411 /// otherwise.
1412 bool isCompareAndSwap() const {
1413 unsigned Op = getOpcode();
1414 return Op == ISD::ATOMIC_CMP_SWAP ||
1415 Op == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS;
1416 }
1417
1418 /// For cmpxchg atomic operations, return the atomic ordering requirements
1419 /// when store does not occur.
1420 AtomicOrdering getFailureOrdering() const {
1421 assert(isCompareAndSwap() && "Must be cmpxchg operation")((isCompareAndSwap() && "Must be cmpxchg operation") ?
static_cast<void> (0) : __assert_fail ("isCompareAndSwap() && \"Must be cmpxchg operation\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1421, __PRETTY_FUNCTION__))
;
1422 return MMO->getFailureOrdering();
1423 }
1424
1425 // Methods to support isa and dyn_cast
1426 static bool classof(const SDNode *N) {
1427 return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
1428 N->getOpcode() == ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS ||
1429 N->getOpcode() == ISD::ATOMIC_SWAP ||
1430 N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
1431 N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
1432 N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
1433 N->getOpcode() == ISD::ATOMIC_LOAD_CLR ||
1434 N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
1435 N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
1436 N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
1437 N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
1438 N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
1439 N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
1440 N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
1441 N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
1442 N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
1443 N->getOpcode() == ISD::ATOMIC_LOAD ||
1444 N->getOpcode() == ISD::ATOMIC_STORE;
1445 }
1446};
1447
1448/// This SDNode is used for target intrinsics that touch
1449/// memory and need an associated MachineMemOperand. Its opcode may be
1450/// INTRINSIC_VOID, INTRINSIC_W_CHAIN, PREFETCH, or a target-specific opcode
1451/// with a value not less than FIRST_TARGET_MEMORY_OPCODE.
1452class MemIntrinsicSDNode : public MemSDNode {
1453public:
1454 MemIntrinsicSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
1455 SDVTList VTs, EVT MemoryVT, MachineMemOperand *MMO)
1456 : MemSDNode(Opc, Order, dl, VTs, MemoryVT, MMO) {
1457 SDNodeBits.IsMemIntrinsic = true;
1458 }
1459
1460 // Methods to support isa and dyn_cast
1461 static bool classof(const SDNode *N) {
1462 // We lower some target intrinsics to their target opcode
1463 // early a node with a target opcode can be of this class
1464 return N->isMemIntrinsic() ||
1465 N->getOpcode() == ISD::PREFETCH ||
1466 N->isTargetMemoryOpcode();
1467 }
1468};
1469
1470/// This SDNode is used to implement the code generator
1471/// support for the llvm IR shufflevector instruction. It combines elements
1472/// from two input vectors into a new input vector, with the selection and
1473/// ordering of elements determined by an array of integers, referred to as
1474/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
1475/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
1476/// An index of -1 is treated as undef, such that the code generator may put
1477/// any value in the corresponding element of the result.
1478class ShuffleVectorSDNode : public SDNode {
1479 // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
1480 // is freed when the SelectionDAG object is destroyed.
1481 const int *Mask;
1482
1483protected:
1484 friend class SelectionDAG;
1485
1486 ShuffleVectorSDNode(EVT VT, unsigned Order, const DebugLoc &dl, const int *M)
1487 : SDNode(ISD::VECTOR_SHUFFLE, Order, dl, getSDVTList(VT)), Mask(M) {}
1488
1489public:
1490 ArrayRef<int> getMask() const {
1491 EVT VT = getValueType(0);
1492 return makeArrayRef(Mask, VT.getVectorNumElements());
1493 }
1494
1495 int getMaskElt(unsigned Idx) const {
1496 assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!")((Idx < getValueType(0).getVectorNumElements() && "Idx out of range!"
) ? static_cast<void> (0) : __assert_fail ("Idx < getValueType(0).getVectorNumElements() && \"Idx out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1496, __PRETTY_FUNCTION__))
;
1497 return Mask[Idx];
1498 }
1499
1500 bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
1501
1502 int getSplatIndex() const {
1503 assert(isSplat() && "Cannot get splat index for non-splat!")((isSplat() && "Cannot get splat index for non-splat!"
) ? static_cast<void> (0) : __assert_fail ("isSplat() && \"Cannot get splat index for non-splat!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1503, __PRETTY_FUNCTION__))
;
1504 EVT VT = getValueType(0);
1505 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
1506 if (Mask[i] >= 0)
1507 return Mask[i];
1508
1509 // We can choose any index value here and be correct because all elements
1510 // are undefined. Return 0 for better potential for callers to simplify.
1511 return 0;
1512 }
1513
1514 static bool isSplatMask(const int *Mask, EVT VT);
1515
1516 /// Change values in a shuffle permute mask assuming
1517 /// the two vector operands have swapped position.
1518 static void commuteMask(MutableArrayRef<int> Mask) {
1519 unsigned NumElems = Mask.size();
1520 for (unsigned i = 0; i != NumElems; ++i) {
1521 int idx = Mask[i];
1522 if (idx < 0)
1523 continue;
1524 else if (idx < (int)NumElems)
1525 Mask[i] = idx + NumElems;
1526 else
1527 Mask[i] = idx - NumElems;
1528 }
1529 }
1530
1531 static bool classof(const SDNode *N) {
1532 return N->getOpcode() == ISD::VECTOR_SHUFFLE;
1533 }
1534};
1535
1536class ConstantSDNode : public SDNode {
1537 friend class SelectionDAG;
1538
1539 const ConstantInt *Value;
1540
1541 ConstantSDNode(bool isTarget, bool isOpaque, const ConstantInt *val, EVT VT)
1542 : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant, 0, DebugLoc(),
1543 getSDVTList(VT)),
1544 Value(val) {
1545 ConstantSDNodeBits.IsOpaque = isOpaque;
1546 }
1547
1548public:
1549 const ConstantInt *getConstantIntValue() const { return Value; }
1550 const APInt &getAPIntValue() const { return Value->getValue(); }
1551 uint64_t getZExtValue() const { return Value->getZExtValue(); }
1552 int64_t getSExtValue() const { return Value->getSExtValue(); }
1553 uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX(18446744073709551615UL)) {
1554 return Value->getLimitedValue(Limit);
1555 }
1556 MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
1557 Align getAlignValue() const { return Value->getAlignValue(); }
1558
1559 bool isOne() const { return Value->isOne(); }
1560 bool isNullValue() const { return Value->isZero(); }
1561 bool isAllOnesValue() const { return Value->isMinusOne(); }
1562
1563 bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
1564
1565 static bool classof(const SDNode *N) {
1566 return N->getOpcode() == ISD::Constant ||
1567 N->getOpcode() == ISD::TargetConstant;
1568 }
1569};
1570
1571uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
1572 return cast<ConstantSDNode>(getOperand(Num))->getZExtValue();
1573}
1574
1575const APInt &SDNode::getConstantOperandAPInt(unsigned Num) const {
1576 return cast<ConstantSDNode>(getOperand(Num))->getAPIntValue();
1577}
1578
1579class ConstantFPSDNode : public SDNode {
1580 friend class SelectionDAG;
1581
1582 const ConstantFP *Value;
1583
1584 ConstantFPSDNode(bool isTarget, const ConstantFP *val, EVT VT)
1585 : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP, 0,
1586 DebugLoc(), getSDVTList(VT)),
1587 Value(val) {}
1588
1589public:
1590 const APFloat& getValueAPF() const { return Value->getValueAPF(); }
1591 const ConstantFP *getConstantFPValue() const { return Value; }
1592
1593 /// Return true if the value is positive or negative zero.
1594 bool isZero() const { return Value->isZero(); }
1595
1596 /// Return true if the value is a NaN.
1597 bool isNaN() const { return Value->isNaN(); }
1598
1599 /// Return true if the value is an infinity
1600 bool isInfinity() const { return Value->isInfinity(); }
1601
1602 /// Return true if the value is negative.
1603 bool isNegative() const { return Value->isNegative(); }
1604
1605 /// We don't rely on operator== working on double values, as
1606 /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
1607 /// As such, this method can be used to do an exact bit-for-bit comparison of
1608 /// two floating point values.
1609
1610 /// We leave the version with the double argument here because it's just so
1611 /// convenient to write "2.0" and the like. Without this function we'd
1612 /// have to duplicate its logic everywhere it's called.
1613 bool isExactlyValue(double V) const {
1614 return Value->getValueAPF().isExactlyValue(V);
1615 }
1616 bool isExactlyValue(const APFloat& V) const;
1617
1618 static bool isValueValidForType(EVT VT, const APFloat& Val);
1619
1620 static bool classof(const SDNode *N) {
1621 return N->getOpcode() == ISD::ConstantFP ||
1622 N->getOpcode() == ISD::TargetConstantFP;
1623 }
1624};
1625
1626/// Returns true if \p V is a constant integer zero.
1627bool isNullConstant(SDValue V);
1628
1629/// Returns true if \p V is an FP constant with a value of positive zero.
1630bool isNullFPConstant(SDValue V);
1631
1632/// Returns true if \p V is an integer constant with all bits set.
1633bool isAllOnesConstant(SDValue V);
1634
1635/// Returns true if \p V is a constant integer one.
1636bool isOneConstant(SDValue V);
1637
1638/// Return the non-bitcasted source operand of \p V if it exists.
1639/// If \p V is not a bitcasted value, it is returned as-is.
1640SDValue peekThroughBitcasts(SDValue V);
1641
1642/// Return the non-bitcasted and one-use source operand of \p V if it exists.
1643/// If \p V is not a bitcasted one-use value, it is returned as-is.
1644SDValue peekThroughOneUseBitcasts(SDValue V);
1645
1646/// Return the non-extracted vector source operand of \p V if it exists.
1647/// If \p V is not an extracted subvector, it is returned as-is.
1648SDValue peekThroughExtractSubvectors(SDValue V);
1649
1650/// Returns true if \p V is a bitwise not operation. Assumes that an all ones
1651/// constant is canonicalized to be operand 1.
1652bool isBitwiseNot(SDValue V, bool AllowUndefs = false);
1653
1654/// Returns the SDNode if it is a constant splat BuildVector or constant int.
1655ConstantSDNode *isConstOrConstSplat(SDValue N, bool AllowUndefs = false,
1656 bool AllowTruncation = false);
1657
1658/// Returns the SDNode if it is a demanded constant splat BuildVector or
1659/// constant int.
1660ConstantSDNode *isConstOrConstSplat(SDValue N, const APInt &DemandedElts,
1661 bool AllowUndefs = false,
1662 bool AllowTruncation = false);
1663
1664/// Returns the SDNode if it is a constant splat BuildVector or constant float.
1665ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, bool AllowUndefs = false);
1666
1667/// Returns the SDNode if it is a demanded constant splat BuildVector or
1668/// constant float.
1669ConstantFPSDNode *isConstOrConstSplatFP(SDValue N, const APInt &DemandedElts,
1670 bool AllowUndefs = false);
1671
1672/// Return true if the value is a constant 0 integer or a splatted vector of
1673/// a constant 0 integer (with no undefs by default).
1674/// Build vector implicit truncation is not an issue for null values.
1675bool isNullOrNullSplat(SDValue V, bool AllowUndefs = false);
1676
1677/// Return true if the value is a constant 1 integer or a splatted vector of a
1678/// constant 1 integer (with no undefs).
1679/// Does not permit build vector implicit truncation.
1680bool isOneOrOneSplat(SDValue V, bool AllowUndefs = false);
1681
1682/// Return true if the value is a constant -1 integer or a splatted vector of a
1683/// constant -1 integer (with no undefs).
1684/// Does not permit build vector implicit truncation.
1685bool isAllOnesOrAllOnesSplat(SDValue V, bool AllowUndefs = false);
1686
1687/// Return true if \p V is either a integer or FP constant.
1688inline bool isIntOrFPConstant(SDValue V) {
1689 return isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V);
1690}
1691
1692class GlobalAddressSDNode : public SDNode {
1693 friend class SelectionDAG;
1694
1695 const GlobalValue *TheGlobal;
1696 int64_t Offset;
1697 unsigned TargetFlags;
1698
1699 GlobalAddressSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL,
1700 const GlobalValue *GA, EVT VT, int64_t o,
1701 unsigned TF);
1702
1703public:
1704 const GlobalValue *getGlobal() const { return TheGlobal; }
1705 int64_t getOffset() const { return Offset; }
1706 unsigned getTargetFlags() const { return TargetFlags; }
1707 // Return the address space this GlobalAddress belongs to.
1708 unsigned getAddressSpace() const;
1709
1710 static bool classof(const SDNode *N) {
1711 return N->getOpcode() == ISD::GlobalAddress ||
1712 N->getOpcode() == ISD::TargetGlobalAddress ||
1713 N->getOpcode() == ISD::GlobalTLSAddress ||
1714 N->getOpcode() == ISD::TargetGlobalTLSAddress;
1715 }
1716};
1717
1718class FrameIndexSDNode : public SDNode {
1719 friend class SelectionDAG;
1720
1721 int FI;
1722
1723 FrameIndexSDNode(int fi, EVT VT, bool isTarg)
1724 : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
1725 0, DebugLoc(), getSDVTList(VT)), FI(fi) {
1726 }
1727
1728public:
1729 int getIndex() const { return FI; }
1730
1731 static bool classof(const SDNode *N) {
1732 return N->getOpcode() == ISD::FrameIndex ||
1733 N->getOpcode() == ISD::TargetFrameIndex;
1734 }
1735};
1736
1737/// This SDNode is used for LIFETIME_START/LIFETIME_END values, which indicate
1738/// the offet and size that are started/ended in the underlying FrameIndex.
1739class LifetimeSDNode : public SDNode {
1740 friend class SelectionDAG;
1741 int64_t Size;
1742 int64_t Offset; // -1 if offset is unknown.
1743
1744 LifetimeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl,
1745 SDVTList VTs, int64_t Size, int64_t Offset)
1746 : SDNode(Opcode, Order, dl, VTs), Size(Size), Offset(Offset) {}
1747public:
1748 int64_t getFrameIndex() const {
1749 return cast<FrameIndexSDNode>(getOperand(1))->getIndex();
1750 }
1751
1752 bool hasOffset() const { return Offset >= 0; }
1753 int64_t getOffset() const {
1754 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1754, __PRETTY_FUNCTION__))
;
1755 return Offset;
1756 }
1757 int64_t getSize() const {
1758 assert(hasOffset() && "offset is unknown")((hasOffset() && "offset is unknown") ? static_cast<
void> (0) : __assert_fail ("hasOffset() && \"offset is unknown\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1758, __PRETTY_FUNCTION__))
;
1759 return Size;
1760 }
1761
1762 // Methods to support isa and dyn_cast
1763 static bool classof(const SDNode *N) {
1764 return N->getOpcode() == ISD::LIFETIME_START ||
1765 N->getOpcode() == ISD::LIFETIME_END;
1766 }
1767};
1768
1769/// This SDNode is used for PSEUDO_PROBE values, which are the function guid and
1770/// the index of the basic block being probed. A pseudo probe serves as a place
1771/// holder and will be removed at the end of compilation. It does not have any
1772/// operand because we do not want the instruction selection to deal with any.
1773class PseudoProbeSDNode : public SDNode {
1774 friend class SelectionDAG;
1775 uint64_t Guid;
1776 uint64_t Index;
1777 uint32_t Attributes;
1778
1779 PseudoProbeSDNode(unsigned Opcode, unsigned Order, const DebugLoc &Dl,
1780 SDVTList VTs, uint64_t Guid, uint64_t Index, uint32_t Attr)
1781 : SDNode(Opcode, Order, Dl, VTs), Guid(Guid), Index(Index),
1782 Attributes(Attr) {}
1783
1784public:
1785 uint64_t getGuid() const { return Guid; }
1786 uint64_t getIndex() const { return Index; }
1787 uint32_t getAttributes() const { return Attributes; }
1788
1789 // Methods to support isa and dyn_cast
1790 static bool classof(const SDNode *N) {
1791 return N->getOpcode() == ISD::PSEUDO_PROBE;
1792 }
1793};
1794
1795class JumpTableSDNode : public SDNode {
1796 friend class SelectionDAG;
1797
1798 int JTI;
1799 unsigned TargetFlags;
1800
1801 JumpTableSDNode(int jti, EVT VT, bool isTarg, unsigned TF)
1802 : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
1803 0, DebugLoc(), getSDVTList(VT)), JTI(jti), TargetFlags(TF) {
1804 }
1805
1806public:
1807 int getIndex() const { return JTI; }
1808 unsigned getTargetFlags() const { return TargetFlags; }
1809
1810 static bool classof(const SDNode *N) {
1811 return N->getOpcode() == ISD::JumpTable ||
1812 N->getOpcode() == ISD::TargetJumpTable;
1813 }
1814};
1815
1816class ConstantPoolSDNode : public SDNode {
1817 friend class SelectionDAG;
1818
1819 union {
1820 const Constant *ConstVal;
1821 MachineConstantPoolValue *MachineCPVal;
1822 } Val;
1823 int Offset; // It's a MachineConstantPoolValue if top bit is set.
1824 Align Alignment; // Minimum alignment requirement of CP.
1825 unsigned TargetFlags;
1826
1827 ConstantPoolSDNode(bool isTarget, const Constant *c, EVT VT, int o,
1828 Align Alignment, unsigned TF)
1829 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1830 DebugLoc(), getSDVTList(VT)),
1831 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1832 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1832, __PRETTY_FUNCTION__))
;
1833 Val.ConstVal = c;
1834 }
1835
1836 ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v, EVT VT, int o,
1837 Align Alignment, unsigned TF)
1838 : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool, 0,
1839 DebugLoc(), getSDVTList(VT)),
1840 Offset(o), Alignment(Alignment), TargetFlags(TF) {
1841 assert(Offset >= 0 && "Offset is too large")((Offset >= 0 && "Offset is too large") ? static_cast
<void> (0) : __assert_fail ("Offset >= 0 && \"Offset is too large\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1841, __PRETTY_FUNCTION__))
;
1842 Val.MachineCPVal = v;
1843 Offset |= 1 << (sizeof(unsigned)*CHAR_BIT8-1);
1844 }
1845
1846public:
1847 bool isMachineConstantPoolEntry() const {
1848 return Offset < 0;
1849 }
1850
1851 const Constant *getConstVal() const {
1852 assert(!isMachineConstantPoolEntry() && "Wrong constantpool type")((!isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("!isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1852, __PRETTY_FUNCTION__))
;
1853 return Val.ConstVal;
1854 }
1855
1856 MachineConstantPoolValue *getMachineCPVal() const {
1857 assert(isMachineConstantPoolEntry() && "Wrong constantpool type")((isMachineConstantPoolEntry() && "Wrong constantpool type"
) ? static_cast<void> (0) : __assert_fail ("isMachineConstantPoolEntry() && \"Wrong constantpool type\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 1857, __PRETTY_FUNCTION__))
;
1858 return Val.MachineCPVal;
1859 }
1860
1861 int getOffset() const {
1862 return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT8-1));
1863 }
1864
1865 // Return the alignment of this constant pool object, which is either 0 (for
1866 // default alignment) or the desired value.
1867 Align getAlign() const { return Alignment; }
1868 unsigned getTargetFlags() const { return TargetFlags; }
1869
1870 Type *getType() const;
1871
1872 static bool classof(const SDNode *N) {
1873 return N->getOpcode() == ISD::ConstantPool ||
1874 N->getOpcode() == ISD::TargetConstantPool;
1875 }
1876};
1877
1878/// Completely target-dependent object reference.
1879class TargetIndexSDNode : public SDNode {
1880 friend class SelectionDAG;
1881
1882 unsigned TargetFlags;
1883 int Index;
1884 int64_t Offset;
1885
1886public:
1887 TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned TF)
1888 : SDNode(ISD::TargetIndex, 0, DebugLoc(), getSDVTList(VT)),
1889 TargetFlags(TF), Index(Idx), Offset(Ofs) {}
1890
1891 unsigned getTargetFlags() const { return TargetFlags; }
1892 int getIndex() const { return Index; }
1893 int64_t getOffset() const { return Offset; }
1894
1895 static bool classof(const SDNode *N) {
1896 return N->getOpcode() == ISD::TargetIndex;
1897 }
1898};
1899
1900class BasicBlockSDNode : public SDNode {
1901 friend class SelectionDAG;
1902
1903 MachineBasicBlock *MBB;
1904
1905 /// Debug info is meaningful and potentially useful here, but we create
1906 /// blocks out of order when they're jumped to, which makes it a bit
1907 /// harder. Let's see if we need it first.
1908 explicit BasicBlockSDNode(MachineBasicBlock *mbb)
1909 : SDNode(ISD::BasicBlock, 0, DebugLoc(), getSDVTList(MVT::Other)), MBB(mbb)
1910 {}
1911
1912public:
1913 MachineBasicBlock *getBasicBlock() const { return MBB; }
1914
1915 static bool classof(const SDNode *N) {
1916 return N->getOpcode() == ISD::BasicBlock;
1917 }
1918};
1919
1920/// A "pseudo-class" with methods for operating on BUILD_VECTORs.
1921class BuildVectorSDNode : public SDNode {
1922public:
1923 // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
1924 explicit BuildVectorSDNode() = delete;
1925
1926 /// Check if this is a constant splat, and if so, find the
1927 /// smallest element size that splats the vector. If MinSplatBits is
1928 /// nonzero, the element size must be at least that large. Note that the
1929 /// splat element may be the entire vector (i.e., a one element vector).
1930 /// Returns the splat element value in SplatValue. Any undefined bits in
1931 /// that value are zero, and the corresponding bits in the SplatUndef mask
1932 /// are set. The SplatBitSize value is set to the splat element size in
1933 /// bits. HasAnyUndefs is set to true if any bits in the vector are
1934 /// undefined. isBigEndian describes the endianness of the target.
1935 bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
1936 unsigned &SplatBitSize, bool &HasAnyUndefs,
1937 unsigned MinSplatBits = 0,
1938 bool isBigEndian = false) const;
1939
1940 /// Returns the demanded splatted value or a null value if this is not a
1941 /// splat.
1942 ///
1943 /// The DemandedElts mask indicates the elements that must be in the splat.
1944 /// If passed a non-null UndefElements bitvector, it will resize it to match
1945 /// the vector width and set the bits where elements are undef.
1946 SDValue getSplatValue(const APInt &DemandedElts,
1947 BitVector *UndefElements = nullptr) const;
1948
1949 /// Returns the splatted value or a null value if this is not a splat.
1950 ///
1951 /// If passed a non-null UndefElements bitvector, it will resize it to match
1952 /// the vector width and set the bits where elements are undef.
1953 SDValue getSplatValue(BitVector *UndefElements = nullptr) const;
1954
1955 /// Find the shortest repeating sequence of values in the build vector.
1956 ///
1957 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1958 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1959 ///
1960 /// Currently this must be a power-of-2 build vector.
1961 /// The DemandedElts mask indicates the elements that must be present,
1962 /// undemanded elements in Sequence may be null (SDValue()). If passed a
1963 /// non-null UndefElements bitvector, it will resize it to match the original
1964 /// vector width and set the bits where elements are undef. If result is
1965 /// false, Sequence will be empty.
1966 bool getRepeatedSequence(const APInt &DemandedElts,
1967 SmallVectorImpl<SDValue> &Sequence,
1968 BitVector *UndefElements = nullptr) const;
1969
1970 /// Find the shortest repeating sequence of values in the build vector.
1971 ///
1972 /// e.g. { u, X, u, X, u, u, X, u } -> { X }
1973 /// { X, Y, u, Y, u, u, X, u } -> { X, Y }
1974 ///
1975 /// Currently this must be a power-of-2 build vector.
1976 /// If passed a non-null UndefElements bitvector, it will resize it to match
1977 /// the original vector width and set the bits where elements are undef.
1978 /// If result is false, Sequence will be empty.
1979 bool getRepeatedSequence(SmallVectorImpl<SDValue> &Sequence,
1980 BitVector *UndefElements = nullptr) const;
1981
1982 /// Returns the demanded splatted constant or null if this is not a constant
1983 /// splat.
1984 ///
1985 /// The DemandedElts mask indicates the elements that must be in the splat.
1986 /// If passed a non-null UndefElements bitvector, it will resize it to match
1987 /// the vector width and set the bits where elements are undef.
1988 ConstantSDNode *
1989 getConstantSplatNode(const APInt &DemandedElts,
1990 BitVector *UndefElements = nullptr) const;
1991
1992 /// Returns the splatted constant or null if this is not a constant
1993 /// splat.
1994 ///
1995 /// If passed a non-null UndefElements bitvector, it will resize it to match
1996 /// the vector width and set the bits where elements are undef.
1997 ConstantSDNode *
1998 getConstantSplatNode(BitVector *UndefElements = nullptr) const;
1999
2000 /// Returns the demanded splatted constant FP or null if this is not a
2001 /// constant FP splat.
2002 ///
2003 /// The DemandedElts mask indicates the elements that must be in the splat.
2004 /// If passed a non-null UndefElements bitvector, it will resize it to match
2005 /// the vector width and set the bits where elements are undef.
2006 ConstantFPSDNode *
2007 getConstantFPSplatNode(const APInt &DemandedElts,
2008 BitVector *UndefElements = nullptr) const;
2009
2010 /// Returns the splatted constant FP or null if this is not a constant
2011 /// FP splat.
2012 ///
2013 /// If passed a non-null UndefElements bitvector, it will resize it to match
2014 /// the vector width and set the bits where elements are undef.
2015 ConstantFPSDNode *
2016 getConstantFPSplatNode(BitVector *UndefElements = nullptr) const;
2017
2018 /// If this is a constant FP splat and the splatted constant FP is an
2019 /// exact power or 2, return the log base 2 integer value. Otherwise,
2020 /// return -1.
2021 ///
2022 /// The BitWidth specifies the necessary bit precision.
2023 int32_t getConstantFPSplatPow2ToLog2Int(BitVector *UndefElements,
2024 uint32_t BitWidth) const;
2025
2026 bool isConstant() const;
2027
2028 static bool classof(const SDNode *N) {
2029 return N->getOpcode() == ISD::BUILD_VECTOR;
2030 }
2031};
2032
2033/// An SDNode that holds an arbitrary LLVM IR Value. This is
2034/// used when the SelectionDAG needs to make a simple reference to something
2035/// in the LLVM IR representation.
2036///
2037class SrcValueSDNode : public SDNode {
2038 friend class SelectionDAG;
2039
2040 const Value *V;
2041
2042 /// Create a SrcValue for a general value.
2043 explicit SrcValueSDNode(const Value *v)
2044 : SDNode(ISD::SRCVALUE, 0, DebugLoc(), getSDVTList(MVT::Other)), V(v) {}
2045
2046public:
2047 /// Return the contained Value.
2048 const Value *getValue() const { return V; }
2049
2050 static bool classof(const SDNode *N) {
2051 return N->getOpcode() == ISD::SRCVALUE;
2052 }
2053};
2054
2055class MDNodeSDNode : public SDNode {
2056 friend class SelectionDAG;
2057
2058 const MDNode *MD;
2059
2060 explicit MDNodeSDNode(const MDNode *md)
2061 : SDNode(ISD::MDNODE_SDNODE, 0, DebugLoc(), getSDVTList(MVT::Other)), MD(md)
2062 {}
2063
2064public:
2065 const MDNode *getMD() const { return MD; }
2066
2067 static bool classof(const SDNode *N) {
2068 return N->getOpcode() == ISD::MDNODE_SDNODE;
2069 }
2070};
2071
2072class RegisterSDNode : public SDNode {
2073 friend class SelectionDAG;
2074
2075 Register Reg;
2076
2077 RegisterSDNode(Register reg, EVT VT)
2078 : SDNode(ISD::Register, 0, DebugLoc(), getSDVTList(VT)), Reg(reg) {}
2079
2080public:
2081 Register getReg() const { return Reg; }
2082
2083 static bool classof(const SDNode *N) {
2084 return N->getOpcode() == ISD::Register;
2085 }
2086};
2087
2088class RegisterMaskSDNode : public SDNode {
2089 friend class SelectionDAG;
2090
2091 // The memory for RegMask is not owned by the node.
2092 const uint32_t *RegMask;
2093
2094 RegisterMaskSDNode(const uint32_t *mask)
2095 : SDNode(ISD::RegisterMask, 0, DebugLoc(), getSDVTList(MVT::Untyped)),
2096 RegMask(mask) {}
2097
2098public:
2099 const uint32_t *getRegMask() const { return RegMask; }
2100
2101 static bool classof(const SDNode *N) {
2102 return N->getOpcode() == ISD::RegisterMask;
2103 }
2104};
2105
2106class BlockAddressSDNode : public SDNode {
2107 friend class SelectionDAG;
2108
2109 const BlockAddress *BA;
2110 int64_t Offset;
2111 unsigned TargetFlags;
2112
2113 BlockAddressSDNode(unsigned NodeTy, EVT VT, const BlockAddress *ba,
2114 int64_t o, unsigned Flags)
2115 : SDNode(NodeTy, 0, DebugLoc(), getSDVTList(VT)),
2116 BA(ba), Offset(o), TargetFlags(Flags) {}
2117
2118public:
2119 const BlockAddress *getBlockAddress() const { return BA; }
2120 int64_t getOffset() const { return Offset; }
2121 unsigned getTargetFlags() const { return TargetFlags; }
2122
2123 static bool classof(const SDNode *N) {
2124 return N->getOpcode() == ISD::BlockAddress ||
2125 N->getOpcode() == ISD::TargetBlockAddress;
2126 }
2127};
2128
2129class LabelSDNode : public SDNode {
2130 friend class SelectionDAG;
2131
2132 MCSymbol *Label;
2133
2134 LabelSDNode(unsigned Opcode, unsigned Order, const DebugLoc &dl, MCSymbol *L)
2135 : SDNode(Opcode, Order, dl, getSDVTList(MVT::Other)), Label(L) {
2136 assert(LabelSDNode::classof(this) && "not a label opcode")((LabelSDNode::classof(this) && "not a label opcode")
? static_cast<void> (0) : __assert_fail ("LabelSDNode::classof(this) && \"not a label opcode\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2136, __PRETTY_FUNCTION__))
;
2137 }
2138
2139public:
2140 MCSymbol *getLabel() const { return Label; }
2141
2142 static bool classof(const SDNode *N) {
2143 return N->getOpcode() == ISD::EH_LABEL ||
2144 N->getOpcode() == ISD::ANNOTATION_LABEL;
2145 }
2146};
2147
2148class ExternalSymbolSDNode : public SDNode {
2149 friend class SelectionDAG;
2150
2151 const char *Symbol;
2152 unsigned TargetFlags;
2153
2154 ExternalSymbolSDNode(bool isTarget, const char *Sym, unsigned TF, EVT VT)
2155 : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol, 0,
2156 DebugLoc(), getSDVTList(VT)),
2157 Symbol(Sym), TargetFlags(TF) {}
2158
2159public:
2160 const char *getSymbol() const { return Symbol; }
2161 unsigned getTargetFlags() const { return TargetFlags; }
2162
2163 static bool classof(const SDNode *N) {
2164 return N->getOpcode() == ISD::ExternalSymbol ||
2165 N->getOpcode() == ISD::TargetExternalSymbol;
2166 }
2167};
2168
2169class MCSymbolSDNode : public SDNode {
2170 friend class SelectionDAG;
2171
2172 MCSymbol *Symbol;
2173
2174 MCSymbolSDNode(MCSymbol *Symbol, EVT VT)
2175 : SDNode(ISD::MCSymbol, 0, DebugLoc(), getSDVTList(VT)), Symbol(Symbol) {}
2176
2177public:
2178 MCSymbol *getMCSymbol() const { return Symbol; }
2179
2180 static bool classof(const SDNode *N) {
2181 return N->getOpcode() == ISD::MCSymbol;
2182 }
2183};
2184
2185class CondCodeSDNode : public SDNode {
2186 friend class SelectionDAG;
2187
2188 ISD::CondCode Condition;
2189
2190 explicit CondCodeSDNode(ISD::CondCode Cond)
2191 : SDNode(ISD::CONDCODE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2192 Condition(Cond) {}
2193
2194public:
2195 ISD::CondCode get() const { return Condition; }
2196
2197 static bool classof(const SDNode *N) {
2198 return N->getOpcode() == ISD::CONDCODE;
2199 }
2200};
2201
2202/// This class is used to represent EVT's, which are used
2203/// to parameterize some operations.
2204class VTSDNode : public SDNode {
2205 friend class SelectionDAG;
2206
2207 EVT ValueType;
2208
2209 explicit VTSDNode(EVT VT)
2210 : SDNode(ISD::VALUETYPE, 0, DebugLoc(), getSDVTList(MVT::Other)),
2211 ValueType(VT) {}
2212
2213public:
2214 EVT getVT() const { return ValueType; }
2215
2216 static bool classof(const SDNode *N) {
2217 return N->getOpcode() == ISD::VALUETYPE;
2218 }
2219};
2220
2221/// Base class for LoadSDNode and StoreSDNode
2222class LSBaseSDNode : public MemSDNode {
2223public:
2224 LSBaseSDNode(ISD::NodeType NodeTy, unsigned Order, const DebugLoc &dl,
2225 SDVTList VTs, ISD::MemIndexedMode AM, EVT MemVT,
2226 MachineMemOperand *MMO)
2227 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2228 LSBaseSDNodeBits.AddressingMode = AM;
2229 assert(getAddressingMode() == AM && "Value truncated")((getAddressingMode() == AM && "Value truncated") ? static_cast
<void> (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2229, __PRETTY_FUNCTION__))
;
2230 }
2231
2232 const SDValue &getOffset() const {
2233 return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
2234 }
2235
2236 /// Return the addressing mode for this load or store:
2237 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2238 ISD::MemIndexedMode getAddressingMode() const {
2239 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2240 }
2241
2242 /// Return true if this is a pre/post inc/dec load/store.
2243 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2244
2245 /// Return true if this is NOT a pre/post inc/dec load/store.
2246 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2247
2248 static bool classof(const SDNode *N) {
2249 return N->getOpcode() == ISD::LOAD ||
2250 N->getOpcode() == ISD::STORE;
2251 }
2252};
2253
2254/// This class is used to represent ISD::LOAD nodes.
2255class LoadSDNode : public LSBaseSDNode {
2256 friend class SelectionDAG;
2257
2258 LoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2259 ISD::MemIndexedMode AM, ISD::LoadExtType ETy, EVT MemVT,
2260 MachineMemOperand *MMO)
2261 : LSBaseSDNode(ISD::LOAD, Order, dl, VTs, AM, MemVT, MMO) {
2262 LoadSDNodeBits.ExtTy = ETy;
2263 assert(readMem() && "Load MachineMemOperand is not a load!")((readMem() && "Load MachineMemOperand is not a load!"
) ? static_cast<void> (0) : __assert_fail ("readMem() && \"Load MachineMemOperand is not a load!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2263, __PRETTY_FUNCTION__))
;
2264 assert(!writeMem() && "Load MachineMemOperand is a store!")((!writeMem() && "Load MachineMemOperand is a store!"
) ? static_cast<void> (0) : __assert_fail ("!writeMem() && \"Load MachineMemOperand is a store!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2264, __PRETTY_FUNCTION__))
;
2265 }
2266
2267public:
2268 /// Return whether this is a plain node,
2269 /// or one of the varieties of value-extending loads.
2270 ISD::LoadExtType getExtensionType() const {
2271 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2272 }
2273
2274 const SDValue &getBasePtr() const { return getOperand(1); }
2275 const SDValue &getOffset() const { return getOperand(2); }
2276
2277 static bool classof(const SDNode *N) {
2278 return N->getOpcode() == ISD::LOAD;
2279 }
2280};
2281
2282/// This class is used to represent ISD::STORE nodes.
2283class StoreSDNode : public LSBaseSDNode {
2284 friend class SelectionDAG;
2285
2286 StoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2287 ISD::MemIndexedMode AM, bool isTrunc, EVT MemVT,
2288 MachineMemOperand *MMO)
2289 : LSBaseSDNode(ISD::STORE, Order, dl, VTs, AM, MemVT, MMO) {
2290 StoreSDNodeBits.IsTruncating = isTrunc;
2291 assert(!readMem() && "Store MachineMemOperand is a load!")((!readMem() && "Store MachineMemOperand is a load!")
? static_cast<void> (0) : __assert_fail ("!readMem() && \"Store MachineMemOperand is a load!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2291, __PRETTY_FUNCTION__))
;
2292 assert(writeMem() && "Store MachineMemOperand is not a store!")((writeMem() && "Store MachineMemOperand is not a store!"
) ? static_cast<void> (0) : __assert_fail ("writeMem() && \"Store MachineMemOperand is not a store!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2292, __PRETTY_FUNCTION__))
;
2293 }
2294
2295public:
2296 /// Return true if the op does a truncation before store.
2297 /// For integers this is the same as doing a TRUNCATE and storing the result.
2298 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2299 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2300 void setTruncatingStore(bool Truncating) {
2301 StoreSDNodeBits.IsTruncating = Truncating;
2302 }
2303
2304 const SDValue &getValue() const { return getOperand(1); }
2305 const SDValue &getBasePtr() const { return getOperand(2); }
2306 const SDValue &getOffset() const { return getOperand(3); }
2307
2308 static bool classof(const SDNode *N) {
2309 return N->getOpcode() == ISD::STORE;
2310 }
2311};
2312
2313/// This base class is used to represent MLOAD and MSTORE nodes
2314class MaskedLoadStoreSDNode : public MemSDNode {
2315public:
2316 friend class SelectionDAG;
2317
2318 MaskedLoadStoreSDNode(ISD::NodeType NodeTy, unsigned Order,
2319 const DebugLoc &dl, SDVTList VTs,
2320 ISD::MemIndexedMode AM, EVT MemVT,
2321 MachineMemOperand *MMO)
2322 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2323 LSBaseSDNodeBits.AddressingMode = AM;
2324 assert(getAddressingMode() == AM && "Value truncated")((getAddressingMode() == AM && "Value truncated") ? static_cast
<void> (0) : __assert_fail ("getAddressingMode() == AM && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2324, __PRETTY_FUNCTION__))
;
2325 }
2326
2327 // MaskedLoadSDNode (Chain, ptr, offset, mask, passthru)
2328 // MaskedStoreSDNode (Chain, data, ptr, offset, mask)
2329 // Mask is a vector of i1 elements
2330 const SDValue &getOffset() const {
2331 return getOperand(getOpcode() == ISD::MLOAD ? 2 : 3);
2332 }
2333 const SDValue &getMask() const {
2334 return getOperand(getOpcode() == ISD::MLOAD ? 3 : 4);
2335 }
2336
2337 /// Return the addressing mode for this load or store:
2338 /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
2339 ISD::MemIndexedMode getAddressingMode() const {
2340 return static_cast<ISD::MemIndexedMode>(LSBaseSDNodeBits.AddressingMode);
2341 }
2342
2343 /// Return true if this is a pre/post inc/dec load/store.
2344 bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
2345
2346 /// Return true if this is NOT a pre/post inc/dec load/store.
2347 bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
2348
2349 static bool classof(const SDNode *N) {
2350 return N->getOpcode() == ISD::MLOAD ||
2351 N->getOpcode() == ISD::MSTORE;
2352 }
2353};
2354
2355/// This class is used to represent an MLOAD node
2356class MaskedLoadSDNode : public MaskedLoadStoreSDNode {
2357public:
2358 friend class SelectionDAG;
2359
2360 MaskedLoadSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2361 ISD::MemIndexedMode AM, ISD::LoadExtType ETy,
2362 bool IsExpanding, EVT MemVT, MachineMemOperand *MMO)
2363 : MaskedLoadStoreSDNode(ISD::MLOAD, Order, dl, VTs, AM, MemVT, MMO) {
2364 LoadSDNodeBits.ExtTy = ETy;
2365 LoadSDNodeBits.IsExpanding = IsExpanding;
2366 }
2367
2368 ISD::LoadExtType getExtensionType() const {
2369 return static_cast<ISD::LoadExtType>(LoadSDNodeBits.ExtTy);
2370 }
2371
2372 const SDValue &getBasePtr() const { return getOperand(1); }
2373 const SDValue &getOffset() const { return getOperand(2); }
2374 const SDValue &getMask() const { return getOperand(3); }
2375 const SDValue &getPassThru() const { return getOperand(4); }
2376
2377 static bool classof(const SDNode *N) {
2378 return N->getOpcode() == ISD::MLOAD;
2379 }
2380
2381 bool isExpandingLoad() const { return LoadSDNodeBits.IsExpanding; }
2382};
2383
2384/// This class is used to represent an MSTORE node
2385class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
2386public:
2387 friend class SelectionDAG;
2388
2389 MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2390 ISD::MemIndexedMode AM, bool isTrunc, bool isCompressing,
2391 EVT MemVT, MachineMemOperand *MMO)
2392 : MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, AM, MemVT, MMO) {
2393 StoreSDNodeBits.IsTruncating = isTrunc;
2394 StoreSDNodeBits.IsCompressing = isCompressing;
2395 }
2396
2397 /// Return true if the op does a truncation before store.
2398 /// For integers this is the same as doing a TRUNCATE and storing the result.
2399 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2400 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2401
2402 /// Returns true if the op does a compression to the vector before storing.
2403 /// The node contiguously stores the active elements (integers or floats)
2404 /// in src (those with their respective bit set in writemask k) to unaligned
2405 /// memory at base_addr.
2406 bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }
2407
2408 const SDValue &getValue() const { return getOperand(1); }
2409 const SDValue &getBasePtr() const { return getOperand(2); }
2410 const SDValue &getOffset() const { return getOperand(3); }
2411 const SDValue &getMask() const { return getOperand(4); }
2412
2413 static bool classof(const SDNode *N) {
2414 return N->getOpcode() == ISD::MSTORE;
2415 }
2416};
2417
2418/// This is a base class used to represent
2419/// MGATHER and MSCATTER nodes
2420///
2421class MaskedGatherScatterSDNode : public MemSDNode {
2422public:
2423 friend class SelectionDAG;
2424
2425 MaskedGatherScatterSDNode(ISD::NodeType NodeTy, unsigned Order,
2426 const DebugLoc &dl, SDVTList VTs, EVT MemVT,
2427 MachineMemOperand *MMO, ISD::MemIndexType IndexType)
2428 : MemSDNode(NodeTy, Order, dl, VTs, MemVT, MMO) {
2429 LSBaseSDNodeBits.AddressingMode = IndexType;
2430 assert(getIndexType() == IndexType && "Value truncated")((getIndexType() == IndexType && "Value truncated") ?
static_cast<void> (0) : __assert_fail ("getIndexType() == IndexType && \"Value truncated\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2430, __PRETTY_FUNCTION__))
;
2431 }
2432
2433 /// How is Index applied to BasePtr when computing addresses.
2434 ISD::MemIndexType getIndexType() const {
2435 return static_cast<ISD::MemIndexType>(LSBaseSDNodeBits.AddressingMode);
2436 }
2437 void setIndexType(ISD::MemIndexType IndexType) {
2438 LSBaseSDNodeBits.AddressingMode = IndexType;
2439 }
2440 bool isIndexScaled() const {
2441 return (getIndexType() == ISD::SIGNED_SCALED) ||
2442 (getIndexType() == ISD::UNSIGNED_SCALED);
2443 }
2444 bool isIndexSigned() const {
2445 return (getIndexType() == ISD::SIGNED_SCALED) ||
2446 (getIndexType() == ISD::SIGNED_UNSCALED);
2447 }
2448
2449 // In the both nodes address is Op1, mask is Op2:
2450 // MaskedGatherSDNode (Chain, passthru, mask, base, index, scale)
2451 // MaskedScatterSDNode (Chain, value, mask, base, index, scale)
2452 // Mask is a vector of i1 elements
2453 const SDValue &getBasePtr() const { return getOperand(3); }
2454 const SDValue &getIndex() const { return getOperand(4); }
2455 const SDValue &getMask() const { return getOperand(2); }
2456 const SDValue &getScale() const { return getOperand(5); }
2457
2458 static bool classof(const SDNode *N) {
2459 return N->getOpcode() == ISD::MGATHER ||
2460 N->getOpcode() == ISD::MSCATTER;
2461 }
2462};
2463
2464/// This class is used to represent an MGATHER node
2465///
2466class MaskedGatherSDNode : public MaskedGatherScatterSDNode {
2467public:
2468 friend class SelectionDAG;
2469
2470 MaskedGatherSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2471 EVT MemVT, MachineMemOperand *MMO,
2472 ISD::MemIndexType IndexType, ISD::LoadExtType ETy)
2473 : MaskedGatherScatterSDNode(ISD::MGATHER, Order, dl, VTs, MemVT, MMO,
2474 IndexType) {
2475 LoadSDNodeBits.ExtTy = ETy;
2476 }
2477
2478 const SDValue &getPassThru() const { return getOperand(1); }
2479
2480 ISD::LoadExtType getExtensionType() const {
2481 return ISD::LoadExtType(LoadSDNodeBits.ExtTy);
2482 }
2483
2484 static bool classof(const SDNode *N) {
2485 return N->getOpcode() == ISD::MGATHER;
2486 }
2487};
2488
2489/// This class is used to represent an MSCATTER node
2490///
2491class MaskedScatterSDNode : public MaskedGatherScatterSDNode {
2492public:
2493 friend class SelectionDAG;
2494
2495 MaskedScatterSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
2496 EVT MemVT, MachineMemOperand *MMO,
2497 ISD::MemIndexType IndexType, bool IsTrunc)
2498 : MaskedGatherScatterSDNode(ISD::MSCATTER, Order, dl, VTs, MemVT, MMO,
2499 IndexType) {
2500 StoreSDNodeBits.IsTruncating = IsTrunc;
2501 }
2502
2503 /// Return true if the op does a truncation before store.
2504 /// For integers this is the same as doing a TRUNCATE and storing the result.
2505 /// For floats, it is the same as doing an FP_ROUND and storing the result.
2506 bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
2507
2508 const SDValue &getValue() const { return getOperand(1); }
2509
2510 static bool classof(const SDNode *N) {
2511 return N->getOpcode() == ISD::MSCATTER;
2512 }
2513};
2514
2515/// An SDNode that represents everything that will be needed
2516/// to construct a MachineInstr. These nodes are created during the
2517/// instruction selection proper phase.
2518///
2519/// Note that the only supported way to set the `memoperands` is by calling the
2520/// `SelectionDAG::setNodeMemRefs` function as the memory management happens
2521/// inside the DAG rather than in the node.
2522class MachineSDNode : public SDNode {
2523private:
2524 friend class SelectionDAG;
2525
2526 MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc &DL, SDVTList VTs)
2527 : SDNode(Opc, Order, DL, VTs) {}
2528
2529 // We use a pointer union between a single `MachineMemOperand` pointer and
2530 // a pointer to an array of `MachineMemOperand` pointers. This is null when
2531 // the number of these is zero, the single pointer variant used when the
2532 // number is one, and the array is used for larger numbers.
2533 //
2534 // The array is allocated via the `SelectionDAG`'s allocator and so will
2535 // always live until the DAG is cleaned up and doesn't require ownership here.
2536 //
2537 // We can't use something simpler like `TinyPtrVector` here because `SDNode`
2538 // subclasses aren't managed in a conforming C++ manner. See the comments on
2539 // `SelectionDAG::MorphNodeTo` which details what all goes on, but the
2540 // constraint here is that these don't manage memory with their constructor or
2541 // destructor and can be initialized to a good state even if they start off
2542 // uninitialized.
2543 PointerUnion<MachineMemOperand *, MachineMemOperand **> MemRefs = {};
2544
2545 // Note that this could be folded into the above `MemRefs` member if doing so
2546 // is advantageous at some point. We don't need to store this in most cases.
2547 // However, at the moment this doesn't appear to make the allocation any
2548 // smaller and makes the code somewhat simpler to read.
2549 int NumMemRefs = 0;
2550
2551public:
2552 using mmo_iterator = ArrayRef<MachineMemOperand *>::const_iterator;
2553
2554 ArrayRef<MachineMemOperand *> memoperands() const {
2555 // Special case the common cases.
2556 if (NumMemRefs == 0)
2557 return {};
2558 if (NumMemRefs == 1)
2559 return makeArrayRef(MemRefs.getAddrOfPtr1(), 1);
2560
2561 // Otherwise we have an actual array.
2562 return makeArrayRef(MemRefs.get<MachineMemOperand **>(), NumMemRefs);
2563 }
2564 mmo_iterator memoperands_begin() const { return memoperands().begin(); }
2565 mmo_iterator memoperands_end() const { return memoperands().end(); }
2566 bool memoperands_empty() const { return memoperands().empty(); }
2567
2568 /// Clear out the memory reference descriptor list.
2569 void clearMemRefs() {
2570 MemRefs = nullptr;
2571 NumMemRefs = 0;
2572 }
2573
2574 static bool classof(const SDNode *N) {
2575 return N->isMachineOpcode();
2576 }
2577};
2578
2579/// An SDNode that records if a register contains a value that is guaranteed to
2580/// be aligned accordingly.
2581class AssertAlignSDNode : public SDNode {
2582 Align Alignment;
2583
2584public:
2585 AssertAlignSDNode(unsigned Order, const DebugLoc &DL, EVT VT, Align A)
2586 : SDNode(ISD::AssertAlign, Order, DL, getSDVTList(VT)), Alignment(A) {}
2587
2588 Align getAlign() const { return Alignment; }
2589
2590 static bool classof(const SDNode *N) {
2591 return N->getOpcode() == ISD::AssertAlign;
2592 }
2593};
2594
2595class SDNodeIterator {
2596 const SDNode *Node;
2597 unsigned Operand;
2598
2599 SDNodeIterator(const SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
2600
2601public:
2602 using iterator_category = std::forward_iterator_tag;
2603 using value_type = SDNode;
2604 using difference_type = std::ptrdiff_t;
2605 using pointer = value_type *;
2606 using reference = value_type &;
2607
2608 bool operator==(const SDNodeIterator& x) const {
2609 return Operand == x.Operand;
2610 }
2611 bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
2612
2613 pointer operator*() const {
2614 return Node->getOperand(Operand).getNode();
2615 }
2616 pointer operator->() const { return operator*(); }
2617
2618 SDNodeIterator& operator++() { // Preincrement
2619 ++Operand;
2620 return *this;
2621 }
2622 SDNodeIterator operator++(int) { // Postincrement
2623 SDNodeIterator tmp = *this; ++*this; return tmp;
2624 }
2625 size_t operator-(SDNodeIterator Other) const {
2626 assert(Node == Other.Node &&((Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? static_cast<void> (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2627, __PRETTY_FUNCTION__))
2627 "Cannot compare iterators of two different nodes!")((Node == Other.Node && "Cannot compare iterators of two different nodes!"
) ? static_cast<void> (0) : __assert_fail ("Node == Other.Node && \"Cannot compare iterators of two different nodes!\""
, "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/SelectionDAGNodes.h"
, 2627, __PRETTY_FUNCTION__))
;
2628 return Operand - Other.Operand;
2629 }
2630
2631 static SDNodeIterator begin(const SDNode *N) { return SDNodeIterator(N, 0); }
2632 static SDNodeIterator end (const SDNode *N) {
2633 return SDNodeIterator(N, N->getNumOperands());
2634 }
2635
2636 unsigned getOperand() const { return Operand; }
2637 const SDNode *getNode() const { return Node; }
2638};
2639
2640template <> struct GraphTraits<SDNode*> {
2641 using NodeRef = SDNode *;
2642 using ChildIteratorType = SDNodeIterator;
2643
2644 static NodeRef getEntryNode(SDNode *N) { return N; }
2645
2646 static ChildIteratorType child_begin(NodeRef N) {
2647 return SDNodeIterator::begin(N);
2648 }
2649
2650 static ChildIteratorType child_end(NodeRef N) {
2651 return SDNodeIterator::end(N);
2652 }
2653};
2654
2655/// A representation of the largest SDNode, for use in sizeof().
2656///
2657/// This needs to be a union because the largest node differs on 32 bit systems
2658/// with 4 and 8 byte pointer alignment, respectively.
2659using LargestSDNode = AlignedCharArrayUnion<AtomicSDNode, TargetIndexSDNode,
2660 BlockAddressSDNode,
2661 GlobalAddressSDNode,
2662 PseudoProbeSDNode>;
2663
2664/// The SDNode class with the greatest alignment requirement.
2665using MostAlignedSDNode = GlobalAddressSDNode;
2666
2667namespace ISD {
2668
2669 /// Returns true if the specified node is a non-extending and unindexed load.
2670 inline bool isNormalLoad(const SDNode *N) {
2671 const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
2672 return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
2673 Ld->getAddressingMode() == ISD::UNINDEXED;
2674 }
2675
2676 /// Returns true if the specified node is a non-extending load.
2677 inline bool isNON_EXTLoad(const SDNode *N) {
2678 return isa<LoadSDNode>(N) &&
2679 cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
2680 }
2681
2682 /// Returns true if the specified node is a EXTLOAD.
2683 inline bool isEXTLoad(const SDNode *N) {
2684 return isa<LoadSDNode>(N) &&
2685 cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
2686 }
2687
2688 /// Returns true if the specified node is a SEXTLOAD.
2689 inline bool isSEXTLoad(const SDNode *N) {
2690 return isa<LoadSDNode>(N) &&
2691 cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
2692 }
2693
2694 /// Returns true if the specified node is a ZEXTLOAD.
2695 inline bool isZEXTLoad(const SDNode *N) {
2696 return isa<LoadSDNode>(N) &&
2697 cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
2698 }
2699
2700 /// Returns true if the specified node is an unindexed load.
2701 inline bool isUNINDEXEDLoad(const SDNode *N) {
2702 return isa<LoadSDNode>(N) &&
2703 cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2704 }
2705
2706 /// Returns true if the specified node is a non-truncating
2707 /// and unindexed store.
2708 inline bool isNormalStore(const SDNode *N) {
2709 const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
2710 return St && !St->isTruncatingStore() &&
2711 St->getAddressingMode() == ISD::UNINDEXED;
2712 }
2713
2714 /// Returns true if the specified node is a non-truncating store.
2715 inline bool isNON_TRUNCStore(const SDNode *N) {
2716 return isa<StoreSDNode>(N) && !cast<StoreSDNode>(N)->isTruncatingStore();
2717 }
2718
2719 /// Returns true if the specified node is a truncating store.
2720 inline bool isTRUNCStore(const SDNode *N) {
2721 return isa<StoreSDNode>(N) && cast<StoreSDNode>(N)->isTruncatingStore();
2722 }
2723
2724 /// Returns true if the specified node is an unindexed store.
2725 inline bool isUNINDEXEDStore(const SDNode *N) {
2726 return isa<StoreSDNode>(N) &&
2727 cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
2728 }
2729
2730 /// Attempt to match a unary predicate against a scalar/splat constant or
2731 /// every element of a constant BUILD_VECTOR.
2732 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2733 bool matchUnaryPredicate(SDValue Op,
2734 std::function<bool(ConstantSDNode *)> Match,
2735 bool AllowUndefs = false);
2736
2737 /// Attempt to match a binary predicate against a pair of scalar/splat
2738 /// constants or every element of a pair of constant BUILD_VECTORs.
2739 /// If AllowUndef is true, then UNDEF elements will pass nullptr to Match.
2740 /// If AllowTypeMismatch is true then RetType + ArgTypes don't need to match.
2741 bool matchBinaryPredicate(
2742 SDValue LHS, SDValue RHS,
2743 std::function<bool(ConstantSDNode *, ConstantSDNode *)> Match,
2744 bool AllowUndefs = false, bool AllowTypeMismatch = false);
2745
2746 /// Returns true if the specified value is the overflow result from one
2747 /// of the overflow intrinsic nodes.
2748 inline bool isOverflowIntrOpRes(SDValue Op) {
2749 unsigned Opc = Op.getOpcode();
2750 return (Op.getResNo() == 1 &&
2751 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
2752 Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO));
2753 }
2754
2755} // end namespace ISD
2756
2757} // end namespace llvm
2758
2759#endif // LLVM_CODEGEN_SELECTIONDAGNODES_H