Bug Summary

File:llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
Warning:line 761, column 36
The result of the left shift is undefined due to shifting by '18446744073709551615', which is greater or equal to the width of type 'uint64_t'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SystemZISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -target-cpu x86-64 -dwarf-column-info -fno-split-dwarf-inlining -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Target/SystemZ -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/include -I /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/build-llvm/lib/Target/SystemZ -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2020-01-13-084841-49055-1 -x c++ /build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp

/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp

1//===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the SystemZTargetLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SystemZISelLowering.h"
14#include "SystemZCallingConv.h"
15#include "SystemZConstantPoolValue.h"
16#include "SystemZMachineFunctionInfo.h"
17#include "SystemZTargetMachine.h"
18#include "llvm/CodeGen/CallingConvLower.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
22#include "llvm/IR/IntrinsicInst.h"
23#include "llvm/IR/Intrinsics.h"
24#include "llvm/IR/IntrinsicsS390.h"
25#include "llvm/Support/CommandLine.h"
26#include "llvm/Support/KnownBits.h"
27#include <cctype>
28
29using namespace llvm;
30
31#define DEBUG_TYPE"systemz-lower" "systemz-lower"
32
33namespace {
34// Represents information about a comparison.
35struct Comparison {
36 Comparison(SDValue Op0In, SDValue Op1In, SDValue ChainIn)
37 : Op0(Op0In), Op1(Op1In), Chain(ChainIn),
38 Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
39
40 // The operands to the comparison.
41 SDValue Op0, Op1;
42
43 // Chain if this is a strict floating-point comparison.
44 SDValue Chain;
45
46 // The opcode that should be used to compare Op0 and Op1.
47 unsigned Opcode;
48
49 // A SystemZICMP value. Only used for integer comparisons.
50 unsigned ICmpType;
51
52 // The mask of CC values that Opcode can produce.
53 unsigned CCValid;
54
55 // The mask of CC values for which the original condition is true.
56 unsigned CCMask;
57};
58} // end anonymous namespace
59
60// Classify VT as either 32 or 64 bit.
61static bool is32Bit(EVT VT) {
62 switch (VT.getSimpleVT().SimpleTy) {
63 case MVT::i32:
64 return true;
65 case MVT::i64:
66 return false;
67 default:
68 llvm_unreachable("Unsupported type")::llvm::llvm_unreachable_internal("Unsupported type", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 68)
;
69 }
70}
71
72// Return a version of MachineOperand that can be safely used before the
73// final use.
74static MachineOperand earlyUseOperand(MachineOperand Op) {
75 if (Op.isReg())
76 Op.setIsKill(false);
77 return Op;
78}
79
80SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
81 const SystemZSubtarget &STI)
82 : TargetLowering(TM), Subtarget(STI) {
83 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));
84
85 // Set up the register classes.
86 if (Subtarget.hasHighWord())
87 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
88 else
89 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
90 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
91 if (Subtarget.hasVector()) {
92 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
93 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
94 } else {
95 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
96 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
97 }
98 if (Subtarget.hasVectorEnhancements1())
99 addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass);
100 else
101 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
102
103 if (Subtarget.hasVector()) {
104 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass);
105 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass);
106 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass);
107 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass);
108 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass);
109 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass);
110 }
111
112 // Compute derived properties from the register classes
113 computeRegisterProperties(Subtarget.getRegisterInfo());
114
115 // Set up special registers.
116 setStackPointerRegisterToSaveRestore(SystemZ::R15D);
117
118 // TODO: It may be better to default to latency-oriented scheduling, however
119 // LLVM's current latency-oriented scheduler can't handle physreg definitions
120 // such as SystemZ has with CC, so set this to the register-pressure
121 // scheduler, because it can.
122 setSchedulingPreference(Sched::RegPressure);
123
124 setBooleanContents(ZeroOrOneBooleanContent);
125 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
126
127 // Instructions are strings of 2-byte aligned 2-byte values.
128 setMinFunctionAlignment(Align(2));
129 // For performance reasons we prefer 16-byte alignment.
130 setPrefFunctionAlignment(Align(16));
131
132 // Handle operations that are handled in a similar way for all types.
133 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
134 I <= MVT::LAST_FP_VALUETYPE;
135 ++I) {
136 MVT VT = MVT::SimpleValueType(I);
137 if (isTypeLegal(VT)) {
138 // Lower SET_CC into an IPM-based sequence.
139 setOperationAction(ISD::SETCC, VT, Custom);
140 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
141 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
142
143 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
144 setOperationAction(ISD::SELECT, VT, Expand);
145
146 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
147 setOperationAction(ISD::SELECT_CC, VT, Custom);
148 setOperationAction(ISD::BR_CC, VT, Custom);
149 }
150 }
151
152 // Expand jump table branches as address arithmetic followed by an
153 // indirect jump.
154 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
155
156 // Expand BRCOND into a BR_CC (see above).
157 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
158
159 // Handle integer types.
160 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
161 I <= MVT::LAST_INTEGER_VALUETYPE;
162 ++I) {
163 MVT VT = MVT::SimpleValueType(I);
164 if (isTypeLegal(VT)) {
165 // Expand individual DIV and REMs into DIVREMs.
166 setOperationAction(ISD::SDIV, VT, Expand);
167 setOperationAction(ISD::UDIV, VT, Expand);
168 setOperationAction(ISD::SREM, VT, Expand);
169 setOperationAction(ISD::UREM, VT, Expand);
170 setOperationAction(ISD::SDIVREM, VT, Custom);
171 setOperationAction(ISD::UDIVREM, VT, Custom);
172
173 // Support addition/subtraction with overflow.
174 setOperationAction(ISD::SADDO, VT, Custom);
175 setOperationAction(ISD::SSUBO, VT, Custom);
176
177 // Support addition/subtraction with carry.
178 setOperationAction(ISD::UADDO, VT, Custom);
179 setOperationAction(ISD::USUBO, VT, Custom);
180
181 // Support carry in as value rather than glue.
182 setOperationAction(ISD::ADDCARRY, VT, Custom);
183 setOperationAction(ISD::SUBCARRY, VT, Custom);
184
185 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
186 // stores, putting a serialization instruction after the stores.
187 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
188 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
189
190 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
191 // available, or if the operand is constant.
192 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
193
194 // Use POPCNT on z196 and above.
195 if (Subtarget.hasPopulationCount())
196 setOperationAction(ISD::CTPOP, VT, Custom);
197 else
198 setOperationAction(ISD::CTPOP, VT, Expand);
199
200 // No special instructions for these.
201 setOperationAction(ISD::CTTZ, VT, Expand);
202 setOperationAction(ISD::ROTR, VT, Expand);
203
204 // Use *MUL_LOHI where possible instead of MULH*.
205 setOperationAction(ISD::MULHS, VT, Expand);
206 setOperationAction(ISD::MULHU, VT, Expand);
207 setOperationAction(ISD::SMUL_LOHI, VT, Custom);
208 setOperationAction(ISD::UMUL_LOHI, VT, Custom);
209
210 // Only z196 and above have native support for conversions to unsigned.
211 // On z10, promoting to i64 doesn't generate an inexact condition for
212 // values that are outside the i32 range but in the i64 range, so use
213 // the default expansion.
214 if (!Subtarget.hasFPExtension())
215 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
216
217 // Mirror those settings for STRICT_FP_TO_[SU]INT. Note that these all
218 // default to Expand, so need to be modified to Legal where appropriate.
219 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Legal);
220 if (Subtarget.hasFPExtension())
221 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Legal);
222
223 // And similarly for STRICT_[SU]INT_TO_FP.
224 setOperationAction(ISD::STRICT_SINT_TO_FP, VT, Legal);
225 if (Subtarget.hasFPExtension())
226 setOperationAction(ISD::STRICT_UINT_TO_FP, VT, Legal);
227 }
228 }
229
230 // Type legalization will convert 8- and 16-bit atomic operations into
231 // forms that operate on i32s (but still keeping the original memory VT).
232 // Lower them into full i32 operations.
233 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
234 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
235 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
236 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
237 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
238 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
239 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
240 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
241 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
242 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
243 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
244
245 // Even though i128 is not a legal type, we still need to custom lower
246 // the atomic operations in order to exploit SystemZ instructions.
247 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
248 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
249
250 // We can use the CC result of compare-and-swap to implement
251 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
252 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom);
253 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom);
254 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
255
256 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
257
258 // Traps are legal, as we will convert them to "j .+2".
259 setOperationAction(ISD::TRAP, MVT::Other, Legal);
260
261 // z10 has instructions for signed but not unsigned FP conversion.
262 // Handle unsigned 32-bit types as signed 64-bit types.
263 if (!Subtarget.hasFPExtension()) {
264 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
265 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
266 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Promote);
267 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
268 }
269
270 // We have native support for a 64-bit CTLZ, via FLOGR.
271 setOperationAction(ISD::CTLZ, MVT::i32, Promote);
272 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote);
273 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
274
275 // On z15 we have native support for a 64-bit CTPOP.
276 if (Subtarget.hasMiscellaneousExtensions3()) {
277 setOperationAction(ISD::CTPOP, MVT::i32, Promote);
278 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
279 }
280
281 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
282 setOperationAction(ISD::OR, MVT::i64, Custom);
283
284 // FIXME: Can we support these natively?
285 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
286 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
287 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
288
289 // We have native instructions for i8, i16 and i32 extensions, but not i1.
290 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
291 for (MVT VT : MVT::integer_valuetypes()) {
292 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
293 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
294 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
295 }
296
297 // Handle the various types of symbolic address.
298 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
299 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
300 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
301 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
302 setOperationAction(ISD::JumpTable, PtrVT, Custom);
303
304 // We need to handle dynamic allocations specially because of the
305 // 160-byte area at the bottom of the stack.
306 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
307 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);
308
309 // Use custom expanders so that we can force the function to use
310 // a frame pointer.
311 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
312 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
313
314 // Handle prefetches with PFD or PFDRL.
315 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
316
317 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
318 // Assume by default that all vector operations need to be expanded.
319 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
320 if (getOperationAction(Opcode, VT) == Legal)
321 setOperationAction(Opcode, VT, Expand);
322
323 // Likewise all truncating stores and extending loads.
324 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
325 setTruncStoreAction(VT, InnerVT, Expand);
326 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
327 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
328 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
329 }
330
331 if (isTypeLegal(VT)) {
332 // These operations are legal for anything that can be stored in a
333 // vector register, even if there is no native support for the format
334 // as such. In particular, we can do these for v4f32 even though there
335 // are no specific instructions for that format.
336 setOperationAction(ISD::LOAD, VT, Legal);
337 setOperationAction(ISD::STORE, VT, Legal);
338 setOperationAction(ISD::VSELECT, VT, Legal);
339 setOperationAction(ISD::BITCAST, VT, Legal);
340 setOperationAction(ISD::UNDEF, VT, Legal);
341
342 // Likewise, except that we need to replace the nodes with something
343 // more specific.
344 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
345 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
346 }
347 }
348
349 // Handle integer vector types.
350 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
351 if (isTypeLegal(VT)) {
352 // These operations have direct equivalents.
353 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal);
354 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal);
355 setOperationAction(ISD::ADD, VT, Legal);
356 setOperationAction(ISD::SUB, VT, Legal);
357 if (VT != MVT::v2i64)
358 setOperationAction(ISD::MUL, VT, Legal);
359 setOperationAction(ISD::AND, VT, Legal);
360 setOperationAction(ISD::OR, VT, Legal);
361 setOperationAction(ISD::XOR, VT, Legal);
362 if (Subtarget.hasVectorEnhancements1())
363 setOperationAction(ISD::CTPOP, VT, Legal);
364 else
365 setOperationAction(ISD::CTPOP, VT, Custom);
366 setOperationAction(ISD::CTTZ, VT, Legal);
367 setOperationAction(ISD::CTLZ, VT, Legal);
368
369 // Convert a GPR scalar to a vector by inserting it into element 0.
370 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
371
372 // Use a series of unpacks for extensions.
373 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
374 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
375
376 // Detect shifts by a scalar amount and convert them into
377 // V*_BY_SCALAR.
378 setOperationAction(ISD::SHL, VT, Custom);
379 setOperationAction(ISD::SRA, VT, Custom);
380 setOperationAction(ISD::SRL, VT, Custom);
381
382 // At present ROTL isn't matched by DAGCombiner. ROTR should be
383 // converted into ROTL.
384 setOperationAction(ISD::ROTL, VT, Expand);
385 setOperationAction(ISD::ROTR, VT, Expand);
386
387 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
388 // and inverting the result as necessary.
389 setOperationAction(ISD::SETCC, VT, Custom);
390 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
391 if (Subtarget.hasVectorEnhancements1())
392 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
393 }
394 }
395
396 if (Subtarget.hasVector()) {
397 // There should be no need to check for float types other than v2f64
398 // since <2 x f32> isn't a legal type.
399 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
400 setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal);
401 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
402 setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal);
403 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
404 setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal);
405 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
406 setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal);
407
408 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
409 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f64, Legal);
410 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
411 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f64, Legal);
412 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
413 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f64, Legal);
414 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
415 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f64, Legal);
416 }
417
418 if (Subtarget.hasVectorEnhancements2()) {
419 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
420 setOperationAction(ISD::FP_TO_SINT, MVT::v4f32, Legal);
421 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
422 setOperationAction(ISD::FP_TO_UINT, MVT::v4f32, Legal);
423 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
424 setOperationAction(ISD::SINT_TO_FP, MVT::v4f32, Legal);
425 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
426 setOperationAction(ISD::UINT_TO_FP, MVT::v4f32, Legal);
427
428 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
429 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f32, Legal);
430 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
431 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f32, Legal);
432 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
433 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f32, Legal);
434 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
435 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f32, Legal);
436 }
437
438 // Handle floating-point types.
439 for (unsigned I = MVT::FIRST_FP_VALUETYPE;
440 I <= MVT::LAST_FP_VALUETYPE;
441 ++I) {
442 MVT VT = MVT::SimpleValueType(I);
443 if (isTypeLegal(VT)) {
444 // We can use FI for FRINT.
445 setOperationAction(ISD::FRINT, VT, Legal);
446
447 // We can use the extended form of FI for other rounding operations.
448 if (Subtarget.hasFPExtension()) {
449 setOperationAction(ISD::FNEARBYINT, VT, Legal);
450 setOperationAction(ISD::FFLOOR, VT, Legal);
451 setOperationAction(ISD::FCEIL, VT, Legal);
452 setOperationAction(ISD::FTRUNC, VT, Legal);
453 setOperationAction(ISD::FROUND, VT, Legal);
454 }
455
456 // No special instructions for these.
457 setOperationAction(ISD::FSIN, VT, Expand);
458 setOperationAction(ISD::FCOS, VT, Expand);
459 setOperationAction(ISD::FSINCOS, VT, Expand);
460 setOperationAction(ISD::FREM, VT, Expand);
461 setOperationAction(ISD::FPOW, VT, Expand);
462
463 // Handle constrained floating-point operations.
464 setOperationAction(ISD::STRICT_FADD, VT, Legal);
465 setOperationAction(ISD::STRICT_FSUB, VT, Legal);
466 setOperationAction(ISD::STRICT_FMUL, VT, Legal);
467 setOperationAction(ISD::STRICT_FDIV, VT, Legal);
468 setOperationAction(ISD::STRICT_FMA, VT, Legal);
469 setOperationAction(ISD::STRICT_FSQRT, VT, Legal);
470 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
471 setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal);
472 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
473 if (Subtarget.hasFPExtension()) {
474 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
475 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
476 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
477 setOperationAction(ISD::STRICT_FROUND, VT, Legal);
478 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
479 }
480 }
481 }
482
483 // Handle floating-point vector types.
484 if (Subtarget.hasVector()) {
485 // Scalar-to-vector conversion is just a subreg.
486 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
487 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
488
489 // Some insertions and extractions can be done directly but others
490 // need to go via integers.
491 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
493 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
494 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
495
496 // These operations have direct equivalents.
497 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
498 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
499 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
500 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
501 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
502 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
503 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
504 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
505 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
506 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
507 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
508 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
509 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
510 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
511
512 // Handle constrained floating-point operations.
513 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
514 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
515 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
516 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
517 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
518 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
519 setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
520 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
521 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
522 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal);
523 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
524 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
525 }
526
527 // The vector enhancements facility 1 has instructions for these.
528 if (Subtarget.hasVectorEnhancements1()) {
529 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
530 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
531 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
532 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
533 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
534 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
535 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
536 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
537 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
538 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
539 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
540 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
541 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
542 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
543
544 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
545 setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal);
546 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
547 setOperationAction(ISD::FMINIMUM, MVT::f64, Legal);
548
549 setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal);
550 setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal);
551 setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal);
552 setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal);
553
554 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
555 setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
556 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
557 setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
558
559 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
560 setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
561 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
562 setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
563
564 setOperationAction(ISD::FMAXNUM, MVT::f128, Legal);
565 setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal);
566 setOperationAction(ISD::FMINNUM, MVT::f128, Legal);
567 setOperationAction(ISD::FMINIMUM, MVT::f128, Legal);
568
569 // Handle constrained floating-point operations.
570 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
571 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
572 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
573 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
574 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
575 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
576 setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
577 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
578 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
579 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal);
580 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
581 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
582 for (auto VT : { MVT::f32, MVT::f64, MVT::f128,
583 MVT::v4f32, MVT::v2f64 }) {
584 setOperationAction(ISD::STRICT_FMAXNUM, VT, Legal);
585 setOperationAction(ISD::STRICT_FMINNUM, VT, Legal);
586 setOperationAction(ISD::STRICT_FMAXIMUM, VT, Legal);
587 setOperationAction(ISD::STRICT_FMINIMUM, VT, Legal);
588 }
589 }
590
591 // We only have fused f128 multiply-addition on vector registers.
592 if (!Subtarget.hasVectorEnhancements1()) {
593 setOperationAction(ISD::FMA, MVT::f128, Expand);
594 setOperationAction(ISD::STRICT_FMA, MVT::f128, Expand);
595 }
596
597 // We don't have a copysign instruction on vector registers.
598 if (Subtarget.hasVectorEnhancements1())
599 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
600
601 // Needed so that we don't try to implement f128 constant loads using
602 // a load-and-extend of a f80 constant (in cases where the constant
603 // would fit in an f80).
604 for (MVT VT : MVT::fp_valuetypes())
605 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
606
607 // We don't have extending load instruction on vector registers.
608 if (Subtarget.hasVectorEnhancements1()) {
609 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
610 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
611 }
612
613 // Floating-point truncation and stores need to be done separately.
614 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
615 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
616 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
617
618 // We have 64-bit FPR<->GPR moves, but need special handling for
619 // 32-bit forms.
620 if (!Subtarget.hasVector()) {
621 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
622 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
623 }
624
625 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
626 // structure, but VAEND is a no-op.
627 setOperationAction(ISD::VASTART, MVT::Other, Custom);
628 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
629 setOperationAction(ISD::VAEND, MVT::Other, Expand);
630
631 // Codes for which we want to perform some z-specific combinations.
632 setTargetDAGCombine(ISD::ZERO_EXTEND);
633 setTargetDAGCombine(ISD::SIGN_EXTEND);
634 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
635 setTargetDAGCombine(ISD::LOAD);
636 setTargetDAGCombine(ISD::STORE);
637 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
638 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
639 setTargetDAGCombine(ISD::FP_ROUND);
640 setTargetDAGCombine(ISD::STRICT_FP_ROUND);
641 setTargetDAGCombine(ISD::FP_EXTEND);
642 setTargetDAGCombine(ISD::STRICT_FP_EXTEND);
643 setTargetDAGCombine(ISD::BSWAP);
644 setTargetDAGCombine(ISD::SDIV);
645 setTargetDAGCombine(ISD::UDIV);
646 setTargetDAGCombine(ISD::SREM);
647 setTargetDAGCombine(ISD::UREM);
648
649 // Handle intrinsics.
650 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
651 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
652
653 // We want to use MVC in preference to even a single load/store pair.
654 MaxStoresPerMemcpy = 0;
655 MaxStoresPerMemcpyOptSize = 0;
656
657 // The main memset sequence is a byte store followed by an MVC.
658 // Two STC or MV..I stores win over that, but the kind of fused stores
659 // generated by target-independent code don't when the byte value is
660 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
661 // than "STC;MVC". Handle the choice in target-specific code instead.
662 MaxStoresPerMemset = 0;
663 MaxStoresPerMemsetOptSize = 0;
664
665 // Default to having -disable-strictnode-mutation on
666 IsStrictFPEnabled = true;
667}
668
669EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL,
670 LLVMContext &, EVT VT) const {
671 if (!VT.isVector())
672 return MVT::i32;
673 return VT.changeVectorElementTypeToInteger();
674}
675
676bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(
677 const MachineFunction &MF, EVT VT) const {
678 VT = VT.getScalarType();
679
680 if (!VT.isSimple())
681 return false;
682
683 switch (VT.getSimpleVT().SimpleTy) {
684 case MVT::f32:
685 case MVT::f64:
686 return true;
687 case MVT::f128:
688 return Subtarget.hasVectorEnhancements1();
689 default:
690 break;
691 }
692
693 return false;
694}
695
696// Return true if the constant can be generated with a vector instruction,
697// such as VGM, VGMB or VREPI.
698bool SystemZVectorConstantInfo::isVectorConstantLegal(
699 const SystemZSubtarget &Subtarget) {
700 const SystemZInstrInfo *TII =
701 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
702 if (!Subtarget.hasVector() ||
3
Assuming the condition is false
703 (isFP128
3.1
Field 'isFP128' is false
3.1
Field 'isFP128' is false
3.1
Field 'isFP128' is false
&& !Subtarget.hasVectorEnhancements1()))
704 return false;
705
706 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
707 // preferred way of creating all-zero and all-one vectors so give it
708 // priority over other methods below.
709 unsigned Mask = 0;
710 unsigned I = 0;
711 for (; I < SystemZ::VectorBytes; ++I) {
4
Loop condition is true. Entering loop body
712 uint64_t Byte = IntBits.lshr(I * 8).trunc(8).getZExtValue();
713 if (Byte == 0xff)
5
Assuming 'Byte' is not equal to 255
6
Taking false branch
714 Mask |= 1ULL << I;
715 else if (Byte != 0)
7
Assuming 'Byte' is not equal to 0
8
Taking true branch
716 break;
9
Execution continues on line 718
717 }
718 if (I
9.1
'I' is not equal to 'VectorBytes'
9.1
'I' is not equal to 'VectorBytes'
9.1
'I' is not equal to 'VectorBytes'
== SystemZ::VectorBytes) {
10
Taking false branch
719 Opcode = SystemZISD::BYTE_MASK;
720 OpVals.push_back(Mask);
721 VecVT = MVT::getVectorVT(MVT::getIntegerVT(8), 16);
722 return true;
723 }
724
725 if (SplatBitSize
10.1
Field 'SplatBitSize' is <= 64
10.1
Field 'SplatBitSize' is <= 64
10.1
Field 'SplatBitSize' is <= 64
> 64)
11
Taking false branch
726 return false;
727
728 auto tryValue = [&](uint64_t Value) -> bool {
729 // Try VECTOR REPLICATE IMMEDIATE
730 int64_t SignedValue = SignExtend64(Value, SplatBitSize);
731 if (isInt<16>(SignedValue)) {
732 OpVals.push_back(((unsigned) SignedValue));
733 Opcode = SystemZISD::REPLICATE;
734 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
735 SystemZ::VectorBits / SplatBitSize);
736 return true;
737 }
738 // Try VECTOR GENERATE MASK
739 unsigned Start, End;
740 if (TII->isRxSBGMask(Value, SplatBitSize, Start, End)) {
741 // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0
742 // denoting 1 << 63 and 63 denoting 1. Convert them to bit numbers for
743 // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1).
744 OpVals.push_back(Start - (64 - SplatBitSize));
745 OpVals.push_back(End - (64 - SplatBitSize));
746 Opcode = SystemZISD::ROTATE_MASK;
747 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
748 SystemZ::VectorBits / SplatBitSize);
749 return true;
750 }
751 return false;
752 };
753
754 // First try assuming that any undefined bits above the highest set bit
755 // and below the lowest set bit are 1s. This increases the likelihood of
756 // being able to use a sign-extended element value in VECTOR REPLICATE
757 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
758 uint64_t SplatBitsZ = SplatBits.getZExtValue();
759 uint64_t SplatUndefZ = SplatUndef.getZExtValue();
760 uint64_t Lower =
761 (SplatUndefZ & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1));
12
Calling 'findFirstSet<unsigned long>'
19
Returning from 'findFirstSet<unsigned long>'
20
The result of the left shift is undefined due to shifting by '18446744073709551615', which is greater or equal to the width of type 'uint64_t'
762 uint64_t Upper =
763 (SplatUndefZ & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1));
764 if (tryValue(SplatBitsZ | Upper | Lower))
765 return true;
766
767 // Now try assuming that any undefined bits between the first and
768 // last defined set bits are set. This increases the chances of
769 // using a non-wraparound mask.
770 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
771 return tryValue(SplatBitsZ | Middle);
772}
773
774SystemZVectorConstantInfo::SystemZVectorConstantInfo(APFloat FPImm) {
775 IntBits = FPImm.bitcastToAPInt().zextOrSelf(128);
776 isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad());
777
778 // Find the smallest splat.
779 SplatBits = FPImm.bitcastToAPInt();
780 unsigned Width = SplatBits.getBitWidth();
781 while (Width > 8) {
782 unsigned HalfSize = Width / 2;
783 APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize);
784 APInt LowValue = SplatBits.trunc(HalfSize);
785
786 // If the two halves do not match, stop here.
787 if (HighValue != LowValue || 8 > HalfSize)
788 break;
789
790 SplatBits = HighValue;
791 Width = HalfSize;
792 }
793 SplatUndef = 0;
794 SplatBitSize = Width;
795}
796
797SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode *BVN) {
798 assert(BVN->isConstant() && "Expected a constant BUILD_VECTOR")((BVN->isConstant() && "Expected a constant BUILD_VECTOR"
) ? static_cast<void> (0) : __assert_fail ("BVN->isConstant() && \"Expected a constant BUILD_VECTOR\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 798, __PRETTY_FUNCTION__))
;
799 bool HasAnyUndefs;
800
801 // Get IntBits by finding the 128 bit splat.
802 BVN->isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
803 true);
804
805 // Get SplatBits by finding the 8 bit or greater splat.
806 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
807 true);
808}
809
810bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
811 bool ForCodeSize) const {
812 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
813 if (Imm.isZero() || Imm.isNegZero())
1
Taking false branch
814 return true;
815
816 return SystemZVectorConstantInfo(Imm).isVectorConstantLegal(Subtarget);
2
Calling 'SystemZVectorConstantInfo::isVectorConstantLegal'
817}
818
819bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
820 // We can use CGFI or CLGFI.
821 return isInt<32>(Imm) || isUInt<32>(Imm);
822}
823
824bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {
825 // We can use ALGFI or SLGFI.
826 return isUInt<32>(Imm) || isUInt<32>(-Imm);
827}
828
829bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
830 EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
831 // Unaligned accesses should never be slower than the expanded version.
832 // We check specifically for aligned accesses in the few cases where
833 // they are required.
834 if (Fast)
835 *Fast = true;
836 return true;
837}
838
839// Information about the addressing mode for a memory access.
840struct AddressingMode {
841 // True if a long displacement is supported.
842 bool LongDisplacement;
843
844 // True if use of index register is supported.
845 bool IndexReg;
846
847 AddressingMode(bool LongDispl, bool IdxReg) :
848 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
849};
850
851// Return the desired addressing mode for a Load which has only one use (in
852// the same block) which is a Store.
853static AddressingMode getLoadStoreAddrMode(bool HasVector,
854 Type *Ty) {
855 // With vector support a Load->Store combination may be combined to either
856 // an MVC or vector operations and it seems to work best to allow the
857 // vector addressing mode.
858 if (HasVector)
859 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
860
861 // Otherwise only the MVC case is special.
862 bool MVC = Ty->isIntegerTy(8);
863 return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/);
864}
865
866// Return the addressing mode which seems most desirable given an LLVM
867// Instruction pointer.
868static AddressingMode
869supportedAddressingMode(Instruction *I, bool HasVector) {
870 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
871 switch (II->getIntrinsicID()) {
872 default: break;
873 case Intrinsic::memset:
874 case Intrinsic::memmove:
875 case Intrinsic::memcpy:
876 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
877 }
878 }
879
880 if (isa<LoadInst>(I) && I->hasOneUse()) {
881 auto *SingleUser = cast<Instruction>(*I->user_begin());
882 if (SingleUser->getParent() == I->getParent()) {
883 if (isa<ICmpInst>(SingleUser)) {
884 if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
885 if (C->getBitWidth() <= 64 &&
886 (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue())))
887 // Comparison of memory with 16 bit signed / unsigned immediate
888 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
889 } else if (isa<StoreInst>(SingleUser))
890 // Load->Store
891 return getLoadStoreAddrMode(HasVector, I->getType());
892 }
893 } else if (auto *StoreI = dyn_cast<StoreInst>(I)) {
894 if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
895 if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent())
896 // Load->Store
897 return getLoadStoreAddrMode(HasVector, LoadI->getType());
898 }
899
900 if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) {
901
902 // * Use LDE instead of LE/LEY for z13 to avoid partial register
903 // dependencies (LDE only supports small offsets).
904 // * Utilize the vector registers to hold floating point
905 // values (vector load / store instructions only support small
906 // offsets).
907
908 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() :
909 I->getOperand(0)->getType());
910 bool IsFPAccess = MemAccessTy->isFloatingPointTy();
911 bool IsVectorAccess = MemAccessTy->isVectorTy();
912
913 // A store of an extracted vector element will be combined into a VSTE type
914 // instruction.
915 if (!IsVectorAccess && isa<StoreInst>(I)) {
916 Value *DataOp = I->getOperand(0);
917 if (isa<ExtractElementInst>(DataOp))
918 IsVectorAccess = true;
919 }
920
921 // A load which gets inserted into a vector element will be combined into a
922 // VLE type instruction.
923 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) {
924 User *LoadUser = *I->user_begin();
925 if (isa<InsertElementInst>(LoadUser))
926 IsVectorAccess = true;
927 }
928
929 if (IsFPAccess || IsVectorAccess)
930 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
931 }
932
933 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
934}
935
936bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
937 const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
938 // Punt on globals for now, although they can be used in limited
939 // RELATIVE LONG cases.
940 if (AM.BaseGV)
941 return false;
942
943 // Require a 20-bit signed offset.
944 if (!isInt<20>(AM.BaseOffs))
945 return false;
946
947 AddressingMode SupportedAM(true, true);
948 if (I != nullptr)
949 SupportedAM = supportedAddressingMode(I, Subtarget.hasVector());
950
951 if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs))
952 return false;
953
954 if (!SupportedAM.IndexReg)
955 // No indexing allowed.
956 return AM.Scale == 0;
957 else
958 // Indexing is OK but no scale factor can be applied.
959 return AM.Scale == 0 || AM.Scale == 1;
960}
961
962bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
963 if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
964 return false;
965 unsigned FromBits = FromType->getPrimitiveSizeInBits();
966 unsigned ToBits = ToType->getPrimitiveSizeInBits();
967 return FromBits > ToBits;
968}
969
970bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
971 if (!FromVT.isInteger() || !ToVT.isInteger())
972 return false;
973 unsigned FromBits = FromVT.getSizeInBits();
974 unsigned ToBits = ToVT.getSizeInBits();
975 return FromBits > ToBits;
976}
977
978//===----------------------------------------------------------------------===//
979// Inline asm support
980//===----------------------------------------------------------------------===//
981
982TargetLowering::ConstraintType
983SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
984 if (Constraint.size() == 1) {
985 switch (Constraint[0]) {
986 case 'a': // Address register
987 case 'd': // Data register (equivalent to 'r')
988 case 'f': // Floating-point register
989 case 'h': // High-part register
990 case 'r': // General-purpose register
991 case 'v': // Vector register
992 return C_RegisterClass;
993
994 case 'Q': // Memory with base and unsigned 12-bit displacement
995 case 'R': // Likewise, plus an index
996 case 'S': // Memory with base and signed 20-bit displacement
997 case 'T': // Likewise, plus an index
998 case 'm': // Equivalent to 'T'.
999 return C_Memory;
1000
1001 case 'I': // Unsigned 8-bit constant
1002 case 'J': // Unsigned 12-bit constant
1003 case 'K': // Signed 16-bit constant
1004 case 'L': // Signed 20-bit displacement (on all targets we support)
1005 case 'M': // 0x7fffffff
1006 return C_Immediate;
1007
1008 default:
1009 break;
1010 }
1011 }
1012 return TargetLowering::getConstraintType(Constraint);
1013}
1014
1015TargetLowering::ConstraintWeight SystemZTargetLowering::
1016getSingleConstraintMatchWeight(AsmOperandInfo &info,
1017 const char *constraint) const {
1018 ConstraintWeight weight = CW_Invalid;
1019 Value *CallOperandVal = info.CallOperandVal;
1020 // If we don't have a value, we can't do a match,
1021 // but allow it at the lowest weight.
1022 if (!CallOperandVal)
1023 return CW_Default;
1024 Type *type = CallOperandVal->getType();
1025 // Look at the constraint type.
1026 switch (*constraint) {
1027 default:
1028 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
1029 break;
1030
1031 case 'a': // Address register
1032 case 'd': // Data register (equivalent to 'r')
1033 case 'h': // High-part register
1034 case 'r': // General-purpose register
1035 if (CallOperandVal->getType()->isIntegerTy())
1036 weight = CW_Register;
1037 break;
1038
1039 case 'f': // Floating-point register
1040 if (type->isFloatingPointTy())
1041 weight = CW_Register;
1042 break;
1043
1044 case 'v': // Vector register
1045 if ((type->isVectorTy() || type->isFloatingPointTy()) &&
1046 Subtarget.hasVector())
1047 weight = CW_Register;
1048 break;
1049
1050 case 'I': // Unsigned 8-bit constant
1051 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1052 if (isUInt<8>(C->getZExtValue()))
1053 weight = CW_Constant;
1054 break;
1055
1056 case 'J': // Unsigned 12-bit constant
1057 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1058 if (isUInt<12>(C->getZExtValue()))
1059 weight = CW_Constant;
1060 break;
1061
1062 case 'K': // Signed 16-bit constant
1063 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1064 if (isInt<16>(C->getSExtValue()))
1065 weight = CW_Constant;
1066 break;
1067
1068 case 'L': // Signed 20-bit displacement (on all targets we support)
1069 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1070 if (isInt<20>(C->getSExtValue()))
1071 weight = CW_Constant;
1072 break;
1073
1074 case 'M': // 0x7fffffff
1075 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1076 if (C->getZExtValue() == 0x7fffffff)
1077 weight = CW_Constant;
1078 break;
1079 }
1080 return weight;
1081}
1082
1083// Parse a "{tNNN}" register constraint for which the register type "t"
1084// has already been verified. MC is the class associated with "t" and
1085// Map maps 0-based register numbers to LLVM register numbers.
1086static std::pair<unsigned, const TargetRegisterClass *>
1087parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC,
1088 const unsigned *Map, unsigned Size) {
1089 assert(*(Constraint.end()-1) == '}' && "Missing '}'")((*(Constraint.end()-1) == '}' && "Missing '}'") ? static_cast
<void> (0) : __assert_fail ("*(Constraint.end()-1) == '}' && \"Missing '}'\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1089, __PRETTY_FUNCTION__))
;
1090 if (isdigit(Constraint[2])) {
1091 unsigned Index;
1092 bool Failed =
1093 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index);
1094 if (!Failed && Index < Size && Map[Index])
1095 return std::make_pair(Map[Index], RC);
1096 }
1097 return std::make_pair(0U, nullptr);
1098}
1099
1100std::pair<unsigned, const TargetRegisterClass *>
1101SystemZTargetLowering::getRegForInlineAsmConstraint(
1102 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
1103 if (Constraint.size() == 1) {
1104 // GCC Constraint Letters
1105 switch (Constraint[0]) {
1106 default: break;
1107 case 'd': // Data register (equivalent to 'r')
1108 case 'r': // General-purpose register
1109 if (VT == MVT::i64)
1110 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1111 else if (VT == MVT::i128)
1112 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1113 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1114
1115 case 'a': // Address register
1116 if (VT == MVT::i64)
1117 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1118 else if (VT == MVT::i128)
1119 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1120 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1121
1122 case 'h': // High-part register (an LLVM extension)
1123 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1124
1125 case 'f': // Floating-point register
1126 if (VT == MVT::f64)
1127 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1128 else if (VT == MVT::f128)
1129 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1130 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1131
1132 case 'v': // Vector register
1133 if (Subtarget.hasVector()) {
1134 if (VT == MVT::f32)
1135 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1136 if (VT == MVT::f64)
1137 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1138 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1139 }
1140 break;
1141 }
1142 }
1143 if (Constraint.size() > 0 && Constraint[0] == '{') {
1144 // We need to override the default register parsing for GPRs and FPRs
1145 // because the interpretation depends on VT. The internal names of
1146 // the registers are also different from the external names
1147 // (F0D and F0S instead of F0, etc.).
1148 if (Constraint[1] == 'r') {
1149 if (VT == MVT::i32)
1150 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
1151 SystemZMC::GR32Regs, 16);
1152 if (VT == MVT::i128)
1153 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
1154 SystemZMC::GR128Regs, 16);
1155 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
1156 SystemZMC::GR64Regs, 16);
1157 }
1158 if (Constraint[1] == 'f') {
1159 if (VT == MVT::f32)
1160 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
1161 SystemZMC::FP32Regs, 16);
1162 if (VT == MVT::f128)
1163 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
1164 SystemZMC::FP128Regs, 16);
1165 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
1166 SystemZMC::FP64Regs, 16);
1167 }
1168 if (Constraint[1] == 'v') {
1169 if (VT == MVT::f32)
1170 return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass,
1171 SystemZMC::VR32Regs, 32);
1172 if (VT == MVT::f64)
1173 return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass,
1174 SystemZMC::VR64Regs, 32);
1175 return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass,
1176 SystemZMC::VR128Regs, 32);
1177 }
1178 }
1179 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1180}
1181
1182void SystemZTargetLowering::
1183LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
1184 std::vector<SDValue> &Ops,
1185 SelectionDAG &DAG) const {
1186 // Only support length 1 constraints for now.
1187 if (Constraint.length() == 1) {
1188 switch (Constraint[0]) {
1189 case 'I': // Unsigned 8-bit constant
1190 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1191 if (isUInt<8>(C->getZExtValue()))
1192 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1193 Op.getValueType()));
1194 return;
1195
1196 case 'J': // Unsigned 12-bit constant
1197 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1198 if (isUInt<12>(C->getZExtValue()))
1199 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1200 Op.getValueType()));
1201 return;
1202
1203 case 'K': // Signed 16-bit constant
1204 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1205 if (isInt<16>(C->getSExtValue()))
1206 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
1207 Op.getValueType()));
1208 return;
1209
1210 case 'L': // Signed 20-bit displacement (on all targets we support)
1211 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1212 if (isInt<20>(C->getSExtValue()))
1213 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
1214 Op.getValueType()));
1215 return;
1216
1217 case 'M': // 0x7fffffff
1218 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1219 if (C->getZExtValue() == 0x7fffffff)
1220 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1221 Op.getValueType()));
1222 return;
1223 }
1224 }
1225 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1226}
1227
1228//===----------------------------------------------------------------------===//
1229// Calling conventions
1230//===----------------------------------------------------------------------===//
1231
1232#include "SystemZGenCallingConv.inc"
1233
1234const MCPhysReg *SystemZTargetLowering::getScratchRegisters(
1235 CallingConv::ID) const {
1236 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1237 SystemZ::R14D, 0 };
1238 return ScratchRegs;
1239}
1240
1241bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
1242 Type *ToType) const {
1243 return isTruncateFree(FromType, ToType);
1244}
1245
1246bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
1247 return CI->isTailCall();
1248}
1249
1250// We do not yet support 128-bit single-element vector types. If the user
1251// attempts to use such types as function argument or return type, prefer
1252// to error out instead of emitting code violating the ABI.
1253static void VerifyVectorType(MVT VT, EVT ArgVT) {
1254 if (ArgVT.isVector() && !VT.isVector())
1255 report_fatal_error("Unsupported vector argument or return type");
1256}
1257
1258static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) {
1259 for (unsigned i = 0; i < Ins.size(); ++i)
1260 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT);
1261}
1262
1263static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) {
1264 for (unsigned i = 0; i < Outs.size(); ++i)
1265 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT);
1266}
1267
1268// Value is a value that has been passed to us in the location described by VA
1269// (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
1270// any loads onto Chain.
1271static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
1272 CCValAssign &VA, SDValue Chain,
1273 SDValue Value) {
1274 // If the argument has been promoted from a smaller type, insert an
1275 // assertion to capture this.
1276 if (VA.getLocInfo() == CCValAssign::SExt)
1277 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
1278 DAG.getValueType(VA.getValVT()));
1279 else if (VA.getLocInfo() == CCValAssign::ZExt)
1280 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
1281 DAG.getValueType(VA.getValVT()));
1282
1283 if (VA.isExtInLoc())
1284 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
1285 else if (VA.getLocInfo() == CCValAssign::BCvt) {
1286 // If this is a short vector argument loaded from the stack,
1287 // extend from i64 to full vector size and then bitcast.
1288 assert(VA.getLocVT() == MVT::i64)((VA.getLocVT() == MVT::i64) ? static_cast<void> (0) : __assert_fail
("VA.getLocVT() == MVT::i64", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1288, __PRETTY_FUNCTION__))
;
1289 assert(VA.getValVT().isVector())((VA.getValVT().isVector()) ? static_cast<void> (0) : __assert_fail
("VA.getValVT().isVector()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1289, __PRETTY_FUNCTION__))
;
1290 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
1291 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
1292 } else
1293 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo")((VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"
) ? static_cast<void> (0) : __assert_fail ("VA.getLocInfo() == CCValAssign::Full && \"Unsupported getLocInfo\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1293, __PRETTY_FUNCTION__))
;
1294 return Value;
1295}
1296
1297// Value is a value of type VA.getValVT() that we need to copy into
1298// the location described by VA. Return a copy of Value converted to
1299// VA.getValVT(). The caller is responsible for handling indirect values.
1300static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
1301 CCValAssign &VA, SDValue Value) {
1302 switch (VA.getLocInfo()) {
1303 case CCValAssign::SExt:
1304 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
1305 case CCValAssign::ZExt:
1306 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
1307 case CCValAssign::AExt:
1308 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
1309 case CCValAssign::BCvt:
1310 // If this is a short vector argument to be stored to the stack,
1311 // bitcast to v2i64 and then extract first element.
1312 assert(VA.getLocVT() == MVT::i64)((VA.getLocVT() == MVT::i64) ? static_cast<void> (0) : __assert_fail
("VA.getLocVT() == MVT::i64", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1312, __PRETTY_FUNCTION__))
;
1313 assert(VA.getValVT().isVector())((VA.getValVT().isVector()) ? static_cast<void> (0) : __assert_fail
("VA.getValVT().isVector()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1313, __PRETTY_FUNCTION__))
;
1314 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value);
1315 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value,
1316 DAG.getConstant(0, DL, MVT::i32));
1317 case CCValAssign::Full:
1318 return Value;
1319 default:
1320 llvm_unreachable("Unhandled getLocInfo()")::llvm::llvm_unreachable_internal("Unhandled getLocInfo()", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1320)
;
1321 }
1322}
1323
1324SDValue SystemZTargetLowering::LowerFormalArguments(
1325 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1326 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1327 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1328 MachineFunction &MF = DAG.getMachineFunction();
1329 MachineFrameInfo &MFI = MF.getFrameInfo();
1330 MachineRegisterInfo &MRI = MF.getRegInfo();
1331 SystemZMachineFunctionInfo *FuncInfo =
1332 MF.getInfo<SystemZMachineFunctionInfo>();
1333 auto *TFL =
1334 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
1335 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1336
1337 // Detect unsupported vector argument types.
1338 if (Subtarget.hasVector())
1339 VerifyVectorTypes(Ins);
1340
1341 // Assign locations to all of the incoming arguments.
1342 SmallVector<CCValAssign, 16> ArgLocs;
1343 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1344 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
1345
1346 unsigned NumFixedGPRs = 0;
1347 unsigned NumFixedFPRs = 0;
1348 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1349 SDValue ArgValue;
1350 CCValAssign &VA = ArgLocs[I];
1351 EVT LocVT = VA.getLocVT();
1352 if (VA.isRegLoc()) {
1353 // Arguments passed in registers
1354 const TargetRegisterClass *RC;
1355 switch (LocVT.getSimpleVT().SimpleTy) {
1356 default:
1357 // Integers smaller than i64 should be promoted to i64.
1358 llvm_unreachable("Unexpected argument type")::llvm::llvm_unreachable_internal("Unexpected argument type",
"/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1358)
;
1359 case MVT::i32:
1360 NumFixedGPRs += 1;
1361 RC = &SystemZ::GR32BitRegClass;
1362 break;
1363 case MVT::i64:
1364 NumFixedGPRs += 1;
1365 RC = &SystemZ::GR64BitRegClass;
1366 break;
1367 case MVT::f32:
1368 NumFixedFPRs += 1;
1369 RC = &SystemZ::FP32BitRegClass;
1370 break;
1371 case MVT::f64:
1372 NumFixedFPRs += 1;
1373 RC = &SystemZ::FP64BitRegClass;
1374 break;
1375 case MVT::v16i8:
1376 case MVT::v8i16:
1377 case MVT::v4i32:
1378 case MVT::v2i64:
1379 case MVT::v4f32:
1380 case MVT::v2f64:
1381 RC = &SystemZ::VR128BitRegClass;
1382 break;
1383 }
1384
1385 Register VReg = MRI.createVirtualRegister(RC);
1386 MRI.addLiveIn(VA.getLocReg(), VReg);
1387 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1388 } else {
1389 assert(VA.isMemLoc() && "Argument not register or memory")((VA.isMemLoc() && "Argument not register or memory")
? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1389, __PRETTY_FUNCTION__))
;
1390
1391 // Create the frame index object for this incoming parameter.
1392 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1393 VA.getLocMemOffset(), true);
1394
1395 // Create the SelectionDAG nodes corresponding to a load
1396 // from this parameter. Unpromoted ints and floats are
1397 // passed as right-justified 8-byte values.
1398 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1399 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1400 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
1401 DAG.getIntPtrConstant(4, DL));
1402 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
1403 MachinePointerInfo::getFixedStack(MF, FI));
1404 }
1405
1406 // Convert the value of the argument register into the value that's
1407 // being passed.
1408 if (VA.getLocInfo() == CCValAssign::Indirect) {
1409 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
1410 MachinePointerInfo()));
1411 // If the original argument was split (e.g. i128), we need
1412 // to load all parts of it here (using the same address).
1413 unsigned ArgIndex = Ins[I].OrigArgIndex;
1414 assert (Ins[I].PartOffset == 0)((Ins[I].PartOffset == 0) ? static_cast<void> (0) : __assert_fail
("Ins[I].PartOffset == 0", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1414, __PRETTY_FUNCTION__))
;
1415 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
1416 CCValAssign &PartVA = ArgLocs[I + 1];
1417 unsigned PartOffset = Ins[I + 1].PartOffset;
1418 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1419 DAG.getIntPtrConstant(PartOffset, DL));
1420 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1421 MachinePointerInfo()));
1422 ++I;
1423 }
1424 } else
1425 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
1426 }
1427
1428 if (IsVarArg) {
1429 // Save the number of non-varargs registers for later use by va_start, etc.
1430 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
1431 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
1432
1433 // Likewise the address (in the form of a frame index) of where the
1434 // first stack vararg would be. The 1-byte size here is arbitrary.
1435 int64_t StackSize = CCInfo.getNextStackOffset();
1436 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1437
1438 // ...and a similar frame index for the caller-allocated save area
1439 // that will be used to store the incoming registers.
1440 int64_t RegSaveOffset = -SystemZMC::CallFrameSize;
1441 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true);
1442 FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
1443
1444 // Store the FPR varargs in the reserved frame slots. (We store the
1445 // GPRs as part of the prologue.)
1446 if (NumFixedFPRs < SystemZ::NumArgFPRs) {
1447 SDValue MemOps[SystemZ::NumArgFPRs];
1448 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
1449 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
1450 int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true);
1451 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1452 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
1453 &SystemZ::FP64BitRegClass);
1454 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
1455 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
1456 MachinePointerInfo::getFixedStack(MF, FI));
1457 }
1458 // Join the stores, which are independent of one another.
1459 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1460 makeArrayRef(&MemOps[NumFixedFPRs],
1461 SystemZ::NumArgFPRs-NumFixedFPRs));
1462 }
1463 }
1464
1465 return Chain;
1466}
1467
1468static bool canUseSiblingCall(const CCState &ArgCCInfo,
1469 SmallVectorImpl<CCValAssign> &ArgLocs,
1470 SmallVectorImpl<ISD::OutputArg> &Outs) {
1471 // Punt if there are any indirect or stack arguments, or if the call
1472 // needs the callee-saved argument register R6, or if the call uses
1473 // the callee-saved register arguments SwiftSelf and SwiftError.
1474 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1475 CCValAssign &VA = ArgLocs[I];
1476 if (VA.getLocInfo() == CCValAssign::Indirect)
1477 return false;
1478 if (!VA.isRegLoc())
1479 return false;
1480 Register Reg = VA.getLocReg();
1481 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1482 return false;
1483 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
1484 return false;
1485 }
1486 return true;
1487}
1488
1489SDValue
1490SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
1491 SmallVectorImpl<SDValue> &InVals) const {
1492 SelectionDAG &DAG = CLI.DAG;
1493 SDLoc &DL = CLI.DL;
1494 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1495 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1496 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1497 SDValue Chain = CLI.Chain;
1498 SDValue Callee = CLI.Callee;
1499 bool &IsTailCall = CLI.IsTailCall;
1500 CallingConv::ID CallConv = CLI.CallConv;
1501 bool IsVarArg = CLI.IsVarArg;
1502 MachineFunction &MF = DAG.getMachineFunction();
1503 EVT PtrVT = getPointerTy(MF.getDataLayout());
1504
1505 // Detect unsupported vector argument and return types.
1506 if (Subtarget.hasVector()) {
1507 VerifyVectorTypes(Outs);
1508 VerifyVectorTypes(Ins);
1509 }
1510
1511 // Analyze the operands of the call, assigning locations to each operand.
1512 SmallVector<CCValAssign, 16> ArgLocs;
1513 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1514 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
1515
1516 // We don't support GuaranteedTailCallOpt, only automatically-detected
1517 // sibling calls.
1518 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
1519 IsTailCall = false;
1520
1521 // Get a count of how many bytes are to be pushed on the stack.
1522 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1523
1524 // Mark the start of the call.
1525 if (!IsTailCall)
1526 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1527
1528 // Copy argument values to their designated locations.
1529 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
1530 SmallVector<SDValue, 8> MemOpChains;
1531 SDValue StackPtr;
1532 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1533 CCValAssign &VA = ArgLocs[I];
1534 SDValue ArgValue = OutVals[I];
1535
1536 if (VA.getLocInfo() == CCValAssign::Indirect) {
1537 // Store the argument in a stack slot and pass its address.
1538 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
1539 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1540 MemOpChains.push_back(
1541 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1542 MachinePointerInfo::getFixedStack(MF, FI)));
1543 // If the original argument was split (e.g. i128), we need
1544 // to store all parts of it here (and pass just one address).
1545 unsigned ArgIndex = Outs[I].OrigArgIndex;
1546 assert (Outs[I].PartOffset == 0)((Outs[I].PartOffset == 0) ? static_cast<void> (0) : __assert_fail
("Outs[I].PartOffset == 0", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1546, __PRETTY_FUNCTION__))
;
1547 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
1548 SDValue PartValue = OutVals[I + 1];
1549 unsigned PartOffset = Outs[I + 1].PartOffset;
1550 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1551 DAG.getIntPtrConstant(PartOffset, DL));
1552 MemOpChains.push_back(
1553 DAG.getStore(Chain, DL, PartValue, Address,
1554 MachinePointerInfo::getFixedStack(MF, FI)));
1555 ++I;
1556 }
1557 ArgValue = SpillSlot;
1558 } else
1559 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
1560
1561 if (VA.isRegLoc())
1562 // Queue up the argument copies and emit them at the end.
1563 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1564 else {
1565 assert(VA.isMemLoc() && "Argument not register or memory")((VA.isMemLoc() && "Argument not register or memory")
? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1565, __PRETTY_FUNCTION__))
;
1566
1567 // Work out the address of the stack slot. Unpromoted ints and
1568 // floats are passed as right-justified 8-byte values.
1569 if (!StackPtr.getNode())
1570 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
1571 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
1572 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1573 Offset += 4;
1574 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1575 DAG.getIntPtrConstant(Offset, DL));
1576
1577 // Emit the store.
1578 MemOpChains.push_back(
1579 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1580 }
1581 }
1582
1583 // Join the stores, which are independent of one another.
1584 if (!MemOpChains.empty())
1585 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1586
1587 // Accept direct calls by converting symbolic call addresses to the
1588 // associated Target* opcodes. Force %r1 to be used for indirect
1589 // tail calls.
1590 SDValue Glue;
1591 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1592 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
1593 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1594 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1595 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1596 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1597 } else if (IsTailCall) {
1598 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
1599 Glue = Chain.getValue(1);
1600 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
1601 }
1602
1603 // Build a sequence of copy-to-reg nodes, chained and glued together.
1604 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
1605 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
1606 RegsToPass[I].second, Glue);
1607 Glue = Chain.getValue(1);
1608 }
1609
1610 // The first call operand is the chain and the second is the target address.
1611 SmallVector<SDValue, 8> Ops;
1612 Ops.push_back(Chain);
1613 Ops.push_back(Callee);
1614
1615 // Add argument registers to the end of the list so that they are
1616 // known live into the call.
1617 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
1618 Ops.push_back(DAG.getRegister(RegsToPass[I].first,
1619 RegsToPass[I].second.getValueType()));
1620
1621 // Add a register mask operand representing the call-preserved registers.
1622 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1623 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1624 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1624, __PRETTY_FUNCTION__))
;
1625 Ops.push_back(DAG.getRegisterMask(Mask));
1626
1627 // Glue the call to the argument copies, if any.
1628 if (Glue.getNode())
1629 Ops.push_back(Glue);
1630
1631 // Emit the call.
1632 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1633 if (IsTailCall)
1634 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
1635 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
1636 Glue = Chain.getValue(1);
1637
1638 // Mark the end of the call, which is glued to the call itself.
1639 Chain = DAG.getCALLSEQ_END(Chain,
1640 DAG.getConstant(NumBytes, DL, PtrVT, true),
1641 DAG.getConstant(0, DL, PtrVT, true),
1642 Glue, DL);
1643 Glue = Chain.getValue(1);
1644
1645 // Assign locations to each value returned by this call.
1646 SmallVector<CCValAssign, 16> RetLocs;
1647 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1648 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
1649
1650 // Copy all of the result registers out of their specified physreg.
1651 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1652 CCValAssign &VA = RetLocs[I];
1653
1654 // Copy the value out, gluing the copy to the end of the call sequence.
1655 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
1656 VA.getLocVT(), Glue);
1657 Chain = RetValue.getValue(1);
1658 Glue = RetValue.getValue(2);
1659
1660 // Convert the value of the return register into the value that's
1661 // being returned.
1662 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
1663 }
1664
1665 return Chain;
1666}
1667
1668bool SystemZTargetLowering::
1669CanLowerReturn(CallingConv::ID CallConv,
1670 MachineFunction &MF, bool isVarArg,
1671 const SmallVectorImpl<ISD::OutputArg> &Outs,
1672 LLVMContext &Context) const {
1673 // Detect unsupported vector return types.
1674 if (Subtarget.hasVector())
1675 VerifyVectorTypes(Outs);
1676
1677 // Special case that we cannot easily detect in RetCC_SystemZ since
1678 // i128 is not a legal type.
1679 for (auto &Out : Outs)
1680 if (Out.ArgVT == MVT::i128)
1681 return false;
1682
1683 SmallVector<CCValAssign, 16> RetLocs;
1684 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
1685 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
1686}
1687
1688SDValue
1689SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1690 bool IsVarArg,
1691 const SmallVectorImpl<ISD::OutputArg> &Outs,
1692 const SmallVectorImpl<SDValue> &OutVals,
1693 const SDLoc &DL, SelectionDAG &DAG) const {
1694 MachineFunction &MF = DAG.getMachineFunction();
1695
1696 // Detect unsupported vector return types.
1697 if (Subtarget.hasVector())
1698 VerifyVectorTypes(Outs);
1699
1700 // Assign locations to each returned value.
1701 SmallVector<CCValAssign, 16> RetLocs;
1702 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1703 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
1704
1705 // Quick exit for void returns
1706 if (RetLocs.empty())
1707 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
1708
1709 if (CallConv == CallingConv::GHC)
1710 report_fatal_error("GHC functions return void only");
1711
1712 // Copy the result values into the output registers.
1713 SDValue Glue;
1714 SmallVector<SDValue, 4> RetOps;
1715 RetOps.push_back(Chain);
1716 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1717 CCValAssign &VA = RetLocs[I];
1718 SDValue RetValue = OutVals[I];
1719
1720 // Make the return register live on exit.
1721 assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1721, __PRETTY_FUNCTION__))
;
1722
1723 // Promote the value as required.
1724 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
1725
1726 // Chain and glue the copies together.
1727 Register Reg = VA.getLocReg();
1728 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1729 Glue = Chain.getValue(1);
1730 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
1731 }
1732
1733 // Update chain and glue.
1734 RetOps[0] = Chain;
1735 if (Glue.getNode())
1736 RetOps.push_back(Glue);
1737
1738 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
1739}
1740
1741// Return true if Op is an intrinsic node with chain that returns the CC value
1742// as its only (other) argument. Provide the associated SystemZISD opcode and
1743// the mask of valid CC values if so.
1744static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
1745 unsigned &CCValid) {
1746 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1747 switch (Id) {
1748 case Intrinsic::s390_tbegin:
1749 Opcode = SystemZISD::TBEGIN;
1750 CCValid = SystemZ::CCMASK_TBEGIN;
1751 return true;
1752
1753 case Intrinsic::s390_tbegin_nofloat:
1754 Opcode = SystemZISD::TBEGIN_NOFLOAT;
1755 CCValid = SystemZ::CCMASK_TBEGIN;
1756 return true;
1757
1758 case Intrinsic::s390_tend:
1759 Opcode = SystemZISD::TEND;
1760 CCValid = SystemZ::CCMASK_TEND;
1761 return true;
1762
1763 default:
1764 return false;
1765 }
1766}
1767
1768// Return true if Op is an intrinsic node without chain that returns the
1769// CC value as its final argument. Provide the associated SystemZISD
1770// opcode and the mask of valid CC values if so.
1771static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
1772 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1773 switch (Id) {
1774 case Intrinsic::s390_vpkshs:
1775 case Intrinsic::s390_vpksfs:
1776 case Intrinsic::s390_vpksgs:
1777 Opcode = SystemZISD::PACKS_CC;
1778 CCValid = SystemZ::CCMASK_VCMP;
1779 return true;
1780
1781 case Intrinsic::s390_vpklshs:
1782 case Intrinsic::s390_vpklsfs:
1783 case Intrinsic::s390_vpklsgs:
1784 Opcode = SystemZISD::PACKLS_CC;
1785 CCValid = SystemZ::CCMASK_VCMP;
1786 return true;
1787
1788 case Intrinsic::s390_vceqbs:
1789 case Intrinsic::s390_vceqhs:
1790 case Intrinsic::s390_vceqfs:
1791 case Intrinsic::s390_vceqgs:
1792 Opcode = SystemZISD::VICMPES;
1793 CCValid = SystemZ::CCMASK_VCMP;
1794 return true;
1795
1796 case Intrinsic::s390_vchbs:
1797 case Intrinsic::s390_vchhs:
1798 case Intrinsic::s390_vchfs:
1799 case Intrinsic::s390_vchgs:
1800 Opcode = SystemZISD::VICMPHS;
1801 CCValid = SystemZ::CCMASK_VCMP;
1802 return true;
1803
1804 case Intrinsic::s390_vchlbs:
1805 case Intrinsic::s390_vchlhs:
1806 case Intrinsic::s390_vchlfs:
1807 case Intrinsic::s390_vchlgs:
1808 Opcode = SystemZISD::VICMPHLS;
1809 CCValid = SystemZ::CCMASK_VCMP;
1810 return true;
1811
1812 case Intrinsic::s390_vtm:
1813 Opcode = SystemZISD::VTM;
1814 CCValid = SystemZ::CCMASK_VCMP;
1815 return true;
1816
1817 case Intrinsic::s390_vfaebs:
1818 case Intrinsic::s390_vfaehs:
1819 case Intrinsic::s390_vfaefs:
1820 Opcode = SystemZISD::VFAE_CC;
1821 CCValid = SystemZ::CCMASK_ANY;
1822 return true;
1823
1824 case Intrinsic::s390_vfaezbs:
1825 case Intrinsic::s390_vfaezhs:
1826 case Intrinsic::s390_vfaezfs:
1827 Opcode = SystemZISD::VFAEZ_CC;
1828 CCValid = SystemZ::CCMASK_ANY;
1829 return true;
1830
1831 case Intrinsic::s390_vfeebs:
1832 case Intrinsic::s390_vfeehs:
1833 case Intrinsic::s390_vfeefs:
1834 Opcode = SystemZISD::VFEE_CC;
1835 CCValid = SystemZ::CCMASK_ANY;
1836 return true;
1837
1838 case Intrinsic::s390_vfeezbs:
1839 case Intrinsic::s390_vfeezhs:
1840 case Intrinsic::s390_vfeezfs:
1841 Opcode = SystemZISD::VFEEZ_CC;
1842 CCValid = SystemZ::CCMASK_ANY;
1843 return true;
1844
1845 case Intrinsic::s390_vfenebs:
1846 case Intrinsic::s390_vfenehs:
1847 case Intrinsic::s390_vfenefs:
1848 Opcode = SystemZISD::VFENE_CC;
1849 CCValid = SystemZ::CCMASK_ANY;
1850 return true;
1851
1852 case Intrinsic::s390_vfenezbs:
1853 case Intrinsic::s390_vfenezhs:
1854 case Intrinsic::s390_vfenezfs:
1855 Opcode = SystemZISD::VFENEZ_CC;
1856 CCValid = SystemZ::CCMASK_ANY;
1857 return true;
1858
1859 case Intrinsic::s390_vistrbs:
1860 case Intrinsic::s390_vistrhs:
1861 case Intrinsic::s390_vistrfs:
1862 Opcode = SystemZISD::VISTR_CC;
1863 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3;
1864 return true;
1865
1866 case Intrinsic::s390_vstrcbs:
1867 case Intrinsic::s390_vstrchs:
1868 case Intrinsic::s390_vstrcfs:
1869 Opcode = SystemZISD::VSTRC_CC;
1870 CCValid = SystemZ::CCMASK_ANY;
1871 return true;
1872
1873 case Intrinsic::s390_vstrczbs:
1874 case Intrinsic::s390_vstrczhs:
1875 case Intrinsic::s390_vstrczfs:
1876 Opcode = SystemZISD::VSTRCZ_CC;
1877 CCValid = SystemZ::CCMASK_ANY;
1878 return true;
1879
1880 case Intrinsic::s390_vstrsb:
1881 case Intrinsic::s390_vstrsh:
1882 case Intrinsic::s390_vstrsf:
1883 Opcode = SystemZISD::VSTRS_CC;
1884 CCValid = SystemZ::CCMASK_ANY;
1885 return true;
1886
1887 case Intrinsic::s390_vstrszb:
1888 case Intrinsic::s390_vstrszh:
1889 case Intrinsic::s390_vstrszf:
1890 Opcode = SystemZISD::VSTRSZ_CC;
1891 CCValid = SystemZ::CCMASK_ANY;
1892 return true;
1893
1894 case Intrinsic::s390_vfcedbs:
1895 case Intrinsic::s390_vfcesbs:
1896 Opcode = SystemZISD::VFCMPES;
1897 CCValid = SystemZ::CCMASK_VCMP;
1898 return true;
1899
1900 case Intrinsic::s390_vfchdbs:
1901 case Intrinsic::s390_vfchsbs:
1902 Opcode = SystemZISD::VFCMPHS;
1903 CCValid = SystemZ::CCMASK_VCMP;
1904 return true;
1905
1906 case Intrinsic::s390_vfchedbs:
1907 case Intrinsic::s390_vfchesbs:
1908 Opcode = SystemZISD::VFCMPHES;
1909 CCValid = SystemZ::CCMASK_VCMP;
1910 return true;
1911
1912 case Intrinsic::s390_vftcidb:
1913 case Intrinsic::s390_vftcisb:
1914 Opcode = SystemZISD::VFTCI;
1915 CCValid = SystemZ::CCMASK_VCMP;
1916 return true;
1917
1918 case Intrinsic::s390_tdc:
1919 Opcode = SystemZISD::TDC;
1920 CCValid = SystemZ::CCMASK_TDC;
1921 return true;
1922
1923 default:
1924 return false;
1925 }
1926}
1927
1928// Emit an intrinsic with chain and an explicit CC register result.
1929static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op,
1930 unsigned Opcode) {
1931 // Copy all operands except the intrinsic ID.
1932 unsigned NumOps = Op.getNumOperands();
1933 SmallVector<SDValue, 6> Ops;
1934 Ops.reserve(NumOps - 1);
1935 Ops.push_back(Op.getOperand(0));
1936 for (unsigned I = 2; I < NumOps; ++I)
1937 Ops.push_back(Op.getOperand(I));
1938
1939 assert(Op->getNumValues() == 2 && "Expected only CC result and chain")((Op->getNumValues() == 2 && "Expected only CC result and chain"
) ? static_cast<void> (0) : __assert_fail ("Op->getNumValues() == 2 && \"Expected only CC result and chain\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1939, __PRETTY_FUNCTION__))
;
1940 SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other);
1941 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1942 SDValue OldChain = SDValue(Op.getNode(), 1);
1943 SDValue NewChain = SDValue(Intr.getNode(), 1);
1944 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
1945 return Intr.getNode();
1946}
1947
1948// Emit an intrinsic with an explicit CC register result.
1949static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op,
1950 unsigned Opcode) {
1951 // Copy all operands except the intrinsic ID.
1952 unsigned NumOps = Op.getNumOperands();
1953 SmallVector<SDValue, 6> Ops;
1954 Ops.reserve(NumOps - 1);
1955 for (unsigned I = 1; I < NumOps; ++I)
1956 Ops.push_back(Op.getOperand(I));
1957
1958 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops);
1959 return Intr.getNode();
1960}
1961
1962// CC is a comparison that will be implemented using an integer or
1963// floating-point comparison. Return the condition code mask for
1964// a branch on true. In the integer case, CCMASK_CMP_UO is set for
1965// unsigned comparisons and clear for signed ones. In the floating-point
1966// case, CCMASK_CMP_UO has its normal mask meaning (unordered).
1967static unsigned CCMaskForCondCode(ISD::CondCode CC) {
1968#define CONV(X) \
1969 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1970 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1971 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1972
1973 switch (CC) {
1974 default:
1975 llvm_unreachable("Invalid integer condition!")::llvm::llvm_unreachable_internal("Invalid integer condition!"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1975)
;
1976
1977 CONV(EQ);
1978 CONV(NE);
1979 CONV(GT);
1980 CONV(GE);
1981 CONV(LT);
1982 CONV(LE);
1983
1984 case ISD::SETO: return SystemZ::CCMASK_CMP_O;
1985 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
1986 }
1987#undef CONV
1988}
1989
1990// If C can be converted to a comparison against zero, adjust the operands
1991// as necessary.
1992static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
1993 if (C.ICmpType == SystemZICMP::UnsignedOnly)
1994 return;
1995
1996 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
1997 if (!ConstOp1)
1998 return;
1999
2000 int64_t Value = ConstOp1->getSExtValue();
2001 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
2002 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
2003 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
2004 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
2005 C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
2006 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
2007 }
2008}
2009
2010// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
2011// adjust the operands as necessary.
2012static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
2013 Comparison &C) {
2014 // For us to make any changes, it must a comparison between a single-use
2015 // load and a constant.
2016 if (!C.Op0.hasOneUse() ||
2017 C.Op0.getOpcode() != ISD::LOAD ||
2018 C.Op1.getOpcode() != ISD::Constant)
2019 return;
2020
2021 // We must have an 8- or 16-bit load.
2022 auto *Load = cast<LoadSDNode>(C.Op0);
2023 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
2024 if (NumBits != 8 && NumBits != 16)
2025 return;
2026
2027 // The load must be an extending one and the constant must be within the
2028 // range of the unextended value.
2029 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
2030 uint64_t Value = ConstOp1->getZExtValue();
2031 uint64_t Mask = (1 << NumBits) - 1;
2032 if (Load->getExtensionType() == ISD::SEXTLOAD) {
2033 // Make sure that ConstOp1 is in range of C.Op0.
2034 int64_t SignedValue = ConstOp1->getSExtValue();
2035 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
2036 return;
2037 if (C.ICmpType != SystemZICMP::SignedOnly) {
2038 // Unsigned comparison between two sign-extended values is equivalent
2039 // to unsigned comparison between two zero-extended values.
2040 Value &= Mask;
2041 } else if (NumBits == 8) {
2042 // Try to treat the comparison as unsigned, so that we can use CLI.
2043 // Adjust CCMask and Value as necessary.
2044 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
2045 // Test whether the high bit of the byte is set.
2046 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
2047 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
2048 // Test whether the high bit of the byte is clear.
2049 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
2050 else
2051 // No instruction exists for this combination.
2052 return;
2053 C.ICmpType = SystemZICMP::UnsignedOnly;
2054 }
2055 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
2056 if (Value > Mask)
2057 return;
2058 // If the constant is in range, we can use any comparison.
2059 C.ICmpType = SystemZICMP::Any;
2060 } else
2061 return;
2062
2063 // Make sure that the first operand is an i32 of the right extension type.
2064 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
2065 ISD::SEXTLOAD :
2066 ISD::ZEXTLOAD);
2067 if (C.Op0.getValueType() != MVT::i32 ||
2068 Load->getExtensionType() != ExtType) {
2069 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
2070 Load->getBasePtr(), Load->getPointerInfo(),
2071 Load->getMemoryVT(), Load->getAlignment(),
2072 Load->getMemOperand()->getFlags());
2073 // Update the chain uses.
2074 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1));
2075 }
2076
2077 // Make sure that the second operand is an i32 with the right value.
2078 if (C.Op1.getValueType() != MVT::i32 ||
2079 Value != ConstOp1->getZExtValue())
2080 C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
2081}
2082
2083// Return true if Op is either an unextended load, or a load suitable
2084// for integer register-memory comparisons of type ICmpType.
2085static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
2086 auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
2087 if (Load) {
2088 // There are no instructions to compare a register with a memory byte.
2089 if (Load->getMemoryVT() == MVT::i8)
2090 return false;
2091 // Otherwise decide on extension type.
2092 switch (Load->getExtensionType()) {
2093 case ISD::NON_EXTLOAD:
2094 return true;
2095 case ISD::SEXTLOAD:
2096 return ICmpType != SystemZICMP::UnsignedOnly;
2097 case ISD::ZEXTLOAD:
2098 return ICmpType != SystemZICMP::SignedOnly;
2099 default:
2100 break;
2101 }
2102 }
2103 return false;
2104}
2105
2106// Return true if it is better to swap the operands of C.
2107static bool shouldSwapCmpOperands(const Comparison &C) {
2108 // Leave f128 comparisons alone, since they have no memory forms.
2109 if (C.Op0.getValueType() == MVT::f128)
2110 return false;
2111
2112 // Always keep a floating-point constant second, since comparisons with
2113 // zero can use LOAD TEST and comparisons with other constants make a
2114 // natural memory operand.
2115 if (isa<ConstantFPSDNode>(C.Op1))
2116 return false;
2117
2118 // Never swap comparisons with zero since there are many ways to optimize
2119 // those later.
2120 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2121 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2122 return false;
2123
2124 // Also keep natural memory operands second if the loaded value is
2125 // only used here. Several comparisons have memory forms.
2126 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
2127 return false;
2128
2129 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
2130 // In that case we generally prefer the memory to be second.
2131 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
2132 // The only exceptions are when the second operand is a constant and
2133 // we can use things like CHHSI.
2134 if (!ConstOp1)
2135 return true;
2136 // The unsigned memory-immediate instructions can handle 16-bit
2137 // unsigned integers.
2138 if (C.ICmpType != SystemZICMP::SignedOnly &&
2139 isUInt<16>(ConstOp1->getZExtValue()))
2140 return false;
2141 // The signed memory-immediate instructions can handle 16-bit
2142 // signed integers.
2143 if (C.ICmpType != SystemZICMP::UnsignedOnly &&
2144 isInt<16>(ConstOp1->getSExtValue()))
2145 return false;
2146 return true;
2147 }
2148
2149 // Try to promote the use of CGFR and CLGFR.
2150 unsigned Opcode0 = C.Op0.getOpcode();
2151 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
2152 return true;
2153 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
2154 return true;
2155 if (C.ICmpType != SystemZICMP::SignedOnly &&
2156 Opcode0 == ISD::AND &&
2157 C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
2158 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
2159 return true;
2160
2161 return false;
2162}
2163
2164// Return a version of comparison CC mask CCMask in which the LT and GT
2165// actions are swapped.
2166static unsigned reverseCCMask(unsigned CCMask) {
2167 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
2168 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
2169 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
2170 (CCMask & SystemZ::CCMASK_CMP_UO));
2171}
2172
2173// Check whether C tests for equality between X and Y and whether X - Y
2174// or Y - X is also computed. In that case it's better to compare the
2175// result of the subtraction against zero.
2176static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
2177 Comparison &C) {
2178 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2179 C.CCMask == SystemZ::CCMASK_CMP_NE) {
2180 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
2181 SDNode *N = *I;
2182 if (N->getOpcode() == ISD::SUB &&
2183 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
2184 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
2185 C.Op0 = SDValue(N, 0);
2186 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
2187 return;
2188 }
2189 }
2190 }
2191}
2192
2193// Check whether C compares a floating-point value with zero and if that
2194// floating-point value is also negated. In this case we can use the
2195// negation to set CC, so avoiding separate LOAD AND TEST and
2196// LOAD (NEGATIVE/COMPLEMENT) instructions.
2197static void adjustForFNeg(Comparison &C) {
2198 // This optimization is invalid for strict comparisons, since FNEG
2199 // does not raise any exceptions.
2200 if (C.Chain)
2201 return;
2202 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
2203 if (C1 && C1->isZero()) {
2204 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
2205 SDNode *N = *I;
2206 if (N->getOpcode() == ISD::FNEG) {
2207 C.Op0 = SDValue(N, 0);
2208 C.CCMask = reverseCCMask(C.CCMask);
2209 return;
2210 }
2211 }
2212 }
2213}
2214
2215// Check whether C compares (shl X, 32) with 0 and whether X is
2216// also sign-extended. In that case it is better to test the result
2217// of the sign extension using LTGFR.
2218//
2219// This case is important because InstCombine transforms a comparison
2220// with (sext (trunc X)) into a comparison with (shl X, 32).
2221static void adjustForLTGFR(Comparison &C) {
2222 // Check for a comparison between (shl X, 32) and 0.
2223 if (C.Op0.getOpcode() == ISD::SHL &&
2224 C.Op0.getValueType() == MVT::i64 &&
2225 C.Op1.getOpcode() == ISD::Constant &&
2226 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2227 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
2228 if (C1 && C1->getZExtValue() == 32) {
2229 SDValue ShlOp0 = C.Op0.getOperand(0);
2230 // See whether X has any SIGN_EXTEND_INREG uses.
2231 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
2232 SDNode *N = *I;
2233 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
2234 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
2235 C.Op0 = SDValue(N, 0);
2236 return;
2237 }
2238 }
2239 }
2240 }
2241}
2242
2243// If C compares the truncation of an extending load, try to compare
2244// the untruncated value instead. This exposes more opportunities to
2245// reuse CC.
2246static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
2247 Comparison &C) {
2248 if (C.Op0.getOpcode() == ISD::TRUNCATE &&
2249 C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
2250 C.Op1.getOpcode() == ISD::Constant &&
2251 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2252 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
2253 if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
2254 unsigned Type = L->getExtensionType();
2255 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
2256 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
2257 C.Op0 = C.Op0.getOperand(0);
2258 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
2259 }
2260 }
2261 }
2262}
2263
2264// Return true if shift operation N has an in-range constant shift value.
2265// Store it in ShiftVal if so.
2266static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
2267 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
2268 if (!Shift)
2269 return false;
2270
2271 uint64_t Amount = Shift->getZExtValue();
2272 if (Amount >= N.getValueSizeInBits())
2273 return false;
2274
2275 ShiftVal = Amount;
2276 return true;
2277}
2278
2279// Check whether an AND with Mask is suitable for a TEST UNDER MASK
2280// instruction and whether the CC value is descriptive enough to handle
2281// a comparison of type Opcode between the AND result and CmpVal.
2282// CCMask says which comparison result is being tested and BitSize is
2283// the number of bits in the operands. If TEST UNDER MASK can be used,
2284// return the corresponding CC mask, otherwise return 0.
2285static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
2286 uint64_t Mask, uint64_t CmpVal,
2287 unsigned ICmpType) {
2288 assert(Mask != 0 && "ANDs with zero should have been removed by now")((Mask != 0 && "ANDs with zero should have been removed by now"
) ? static_cast<void> (0) : __assert_fail ("Mask != 0 && \"ANDs with zero should have been removed by now\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2288, __PRETTY_FUNCTION__))
;
2289
2290 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
2291 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
2292 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
2293 return 0;
2294
2295 // Work out the masks for the lowest and highest bits.
2296 unsigned HighShift = 63 - countLeadingZeros(Mask);
2297 uint64_t High = uint64_t(1) << HighShift;
2298 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
2299
2300 // Signed ordered comparisons are effectively unsigned if the sign
2301 // bit is dropped.
2302 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
2303
2304 // Check for equality comparisons with 0, or the equivalent.
2305 if (CmpVal == 0) {
2306 if (CCMask == SystemZ::CCMASK_CMP_EQ)
2307 return SystemZ::CCMASK_TM_ALL_0;
2308 if (CCMask == SystemZ::CCMASK_CMP_NE)
2309 return SystemZ::CCMASK_TM_SOME_1;
2310 }
2311 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
2312 if (CCMask == SystemZ::CCMASK_CMP_LT)
2313 return SystemZ::CCMASK_TM_ALL_0;
2314 if (CCMask == SystemZ::CCMASK_CMP_GE)
2315 return SystemZ::CCMASK_TM_SOME_1;
2316 }
2317 if (EffectivelyUnsigned && CmpVal < Low) {
2318 if (CCMask == SystemZ::CCMASK_CMP_LE)
2319 return SystemZ::CCMASK_TM_ALL_0;
2320 if (CCMask == SystemZ::CCMASK_CMP_GT)
2321 return SystemZ::CCMASK_TM_SOME_1;
2322 }
2323
2324 // Check for equality comparisons with the mask, or the equivalent.
2325 if (CmpVal == Mask) {
2326 if (CCMask == SystemZ::CCMASK_CMP_EQ)
2327 return SystemZ::CCMASK_TM_ALL_1;
2328 if (CCMask == SystemZ::CCMASK_CMP_NE)
2329 return SystemZ::CCMASK_TM_SOME_0;
2330 }
2331 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
2332 if (CCMask == SystemZ::CCMASK_CMP_GT)
2333 return SystemZ::CCMASK_TM_ALL_1;
2334 if (CCMask == SystemZ::CCMASK_CMP_LE)
2335 return SystemZ::CCMASK_TM_SOME_0;
2336 }
2337 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
2338 if (CCMask == SystemZ::CCMASK_CMP_GE)
2339 return SystemZ::CCMASK_TM_ALL_1;
2340 if (CCMask == SystemZ::CCMASK_CMP_LT)
2341 return SystemZ::CCMASK_TM_SOME_0;
2342 }
2343
2344 // Check for ordered comparisons with the top bit.
2345 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
2346 if (CCMask == SystemZ::CCMASK_CMP_LE)
2347 return SystemZ::CCMASK_TM_MSB_0;
2348 if (CCMask == SystemZ::CCMASK_CMP_GT)
2349 return SystemZ::CCMASK_TM_MSB_1;
2350 }
2351 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
2352 if (CCMask == SystemZ::CCMASK_CMP_LT)
2353 return SystemZ::CCMASK_TM_MSB_0;
2354 if (CCMask == SystemZ::CCMASK_CMP_GE)
2355 return SystemZ::CCMASK_TM_MSB_1;
2356 }
2357
2358 // If there are just two bits, we can do equality checks for Low and High
2359 // as well.
2360 if (Mask == Low + High) {
2361 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
2362 return SystemZ::CCMASK_TM_MIXED_MSB_0;
2363 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
2364 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
2365 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
2366 return SystemZ::CCMASK_TM_MIXED_MSB_1;
2367 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
2368 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
2369 }
2370
2371 // Looks like we've exhausted our options.
2372 return 0;
2373}
2374
2375// See whether C can be implemented as a TEST UNDER MASK instruction.
2376// Update the arguments with the TM version if so.
2377static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
2378 Comparison &C) {
2379 // Check that we have a comparison with a constant.
2380 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2381 if (!ConstOp1)
2382 return;
2383 uint64_t CmpVal = ConstOp1->getZExtValue();
2384
2385 // Check whether the nonconstant input is an AND with a constant mask.
2386 Comparison NewC(C);
2387 uint64_t MaskVal;
2388 ConstantSDNode *Mask = nullptr;
2389 if (C.Op0.getOpcode() == ISD::AND) {
2390 NewC.Op0 = C.Op0.getOperand(0);
2391 NewC.Op1 = C.Op0.getOperand(1);
2392 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2393 if (!Mask)
2394 return;
2395 MaskVal = Mask->getZExtValue();
2396 } else {
2397 // There is no instruction to compare with a 64-bit immediate
2398 // so use TMHH instead if possible. We need an unsigned ordered
2399 // comparison with an i64 immediate.
2400 if (NewC.Op0.getValueType() != MVT::i64 ||
2401 NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
2402 NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
2403 NewC.ICmpType == SystemZICMP::SignedOnly)
2404 return;
2405 // Convert LE and GT comparisons into LT and GE.
2406 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
2407 NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
2408 if (CmpVal == uint64_t(-1))
2409 return;
2410 CmpVal += 1;
2411 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
2412 }
2413 // If the low N bits of Op1 are zero than the low N bits of Op0 can
2414 // be masked off without changing the result.
2415 MaskVal = -(CmpVal & -CmpVal);
2416 NewC.ICmpType = SystemZICMP::UnsignedOnly;
2417 }
2418 if (!MaskVal)
2419 return;
2420
2421 // Check whether the combination of mask, comparison value and comparison
2422 // type are suitable.
2423 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2424 unsigned NewCCMask, ShiftVal;
2425 if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2426 NewC.Op0.getOpcode() == ISD::SHL &&
2427 isSimpleShift(NewC.Op0, ShiftVal) &&
2428 (MaskVal >> ShiftVal != 0) &&
2429 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2430 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2431 MaskVal >> ShiftVal,
2432 CmpVal >> ShiftVal,
2433 SystemZICMP::Any))) {
2434 NewC.Op0 = NewC.Op0.getOperand(0);
2435 MaskVal >>= ShiftVal;
2436 } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2437 NewC.Op0.getOpcode() == ISD::SRL &&
2438 isSimpleShift(NewC.Op0, ShiftVal) &&
2439 (MaskVal << ShiftVal != 0) &&
2440 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2441 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2442 MaskVal << ShiftVal,
2443 CmpVal << ShiftVal,
2444 SystemZICMP::UnsignedOnly))) {
2445 NewC.Op0 = NewC.Op0.getOperand(0);
2446 MaskVal <<= ShiftVal;
2447 } else {
2448 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
2449 NewC.ICmpType);
2450 if (!NewCCMask)
2451 return;
2452 }
2453
2454 // Go ahead and make the change.
2455 C.Opcode = SystemZISD::TM;
2456 C.Op0 = NewC.Op0;
2457 if (Mask && Mask->getZExtValue() == MaskVal)
2458 C.Op1 = SDValue(Mask, 0);
2459 else
2460 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
2461 C.CCValid = SystemZ::CCMASK_TM;
2462 C.CCMask = NewCCMask;
2463}
2464
2465// See whether the comparison argument contains a redundant AND
2466// and remove it if so. This sometimes happens due to the generic
2467// BRCOND expansion.
2468static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL,
2469 Comparison &C) {
2470 if (C.Op0.getOpcode() != ISD::AND)
2471 return;
2472 auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
2473 if (!Mask)
2474 return;
2475 KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0));
2476 if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue())
2477 return;
2478
2479 C.Op0 = C.Op0.getOperand(0);
2480}
2481
2482// Return a Comparison that tests the condition-code result of intrinsic
2483// node Call against constant integer CC using comparison code Cond.
2484// Opcode is the opcode of the SystemZISD operation for the intrinsic
2485// and CCValid is the set of possible condition-code results.
2486static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
2487 SDValue Call, unsigned CCValid, uint64_t CC,
2488 ISD::CondCode Cond) {
2489 Comparison C(Call, SDValue(), SDValue());
2490 C.Opcode = Opcode;
2491 C.CCValid = CCValid;
2492 if (Cond == ISD::SETEQ)
2493 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2494 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2495 else if (Cond == ISD::SETNE)
2496 // ...and the inverse of that.
2497 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2498 else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
2499 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2500 // always true for CC>3.
2501 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2502 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
2503 // ...and the inverse of that.
2504 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2505 else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
2506 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2507 // always true for CC>3.
2508 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2509 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
2510 // ...and the inverse of that.
2511 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2512 else
2513 llvm_unreachable("Unexpected integer comparison type")::llvm::llvm_unreachable_internal("Unexpected integer comparison type"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2513)
;
2514 C.CCMask &= CCValid;
2515 return C;
2516}
2517
2518// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
2519static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
2520 ISD::CondCode Cond, const SDLoc &DL,
2521 SDValue Chain = SDValue(),
2522 bool IsSignaling = false) {
2523 if (CmpOp1.getOpcode() == ISD::Constant) {
2524 assert(!Chain)((!Chain) ? static_cast<void> (0) : __assert_fail ("!Chain"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2524, __PRETTY_FUNCTION__))
;
2525 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
2526 unsigned Opcode, CCValid;
2527 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
2528 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
2529 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
2530 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2531 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2532 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
2533 isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
2534 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2535 }
2536 Comparison C(CmpOp0, CmpOp1, Chain);
2537 C.CCMask = CCMaskForCondCode(Cond);
2538 if (C.Op0.getValueType().isFloatingPoint()) {
2539 C.CCValid = SystemZ::CCMASK_FCMP;
2540 if (!C.Chain)
2541 C.Opcode = SystemZISD::FCMP;
2542 else if (!IsSignaling)
2543 C.Opcode = SystemZISD::STRICT_FCMP;
2544 else
2545 C.Opcode = SystemZISD::STRICT_FCMPS;
2546 adjustForFNeg(C);
2547 } else {
2548 assert(!C.Chain)((!C.Chain) ? static_cast<void> (0) : __assert_fail ("!C.Chain"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2548, __PRETTY_FUNCTION__))
;
2549 C.CCValid = SystemZ::CCMASK_ICMP;
2550 C.Opcode = SystemZISD::ICMP;
2551 // Choose the type of comparison. Equality and inequality tests can
2552 // use either signed or unsigned comparisons. The choice also doesn't
2553 // matter if both sign bits are known to be clear. In those cases we
2554 // want to give the main isel code the freedom to choose whichever
2555 // form fits best.
2556 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2557 C.CCMask == SystemZ::CCMASK_CMP_NE ||
2558 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
2559 C.ICmpType = SystemZICMP::Any;
2560 else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
2561 C.ICmpType = SystemZICMP::UnsignedOnly;
2562 else
2563 C.ICmpType = SystemZICMP::SignedOnly;
2564 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
2565 adjustForRedundantAnd(DAG, DL, C);
2566 adjustZeroCmp(DAG, DL, C);
2567 adjustSubwordCmp(DAG, DL, C);
2568 adjustForSubtraction(DAG, DL, C);
2569 adjustForLTGFR(C);
2570 adjustICmpTruncate(DAG, DL, C);
2571 }
2572
2573 if (shouldSwapCmpOperands(C)) {
2574 std::swap(C.Op0, C.Op1);
2575 C.CCMask = reverseCCMask(C.CCMask);
2576 }
2577
2578 adjustForTestUnderMask(DAG, DL, C);
2579 return C;
2580}
2581
2582// Emit the comparison instruction described by C.
2583static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
2584 if (!C.Op1.getNode()) {
2585 SDNode *Node;
2586 switch (C.Op0.getOpcode()) {
2587 case ISD::INTRINSIC_W_CHAIN:
2588 Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode);
2589 return SDValue(Node, 0);
2590 case ISD::INTRINSIC_WO_CHAIN:
2591 Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode);
2592 return SDValue(Node, Node->getNumValues() - 1);
2593 default:
2594 llvm_unreachable("Invalid comparison operands")::llvm::llvm_unreachable_internal("Invalid comparison operands"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2594)
;
2595 }
2596 }
2597 if (C.Opcode == SystemZISD::ICMP)
2598 return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1,
2599 DAG.getTargetConstant(C.ICmpType, DL, MVT::i32));
2600 if (C.Opcode == SystemZISD::TM) {
2601 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
2602 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
2603 return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1,
2604 DAG.getTargetConstant(RegisterOnly, DL, MVT::i32));
2605 }
2606 if (C.Chain) {
2607 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
2608 return DAG.getNode(C.Opcode, DL, VTs, C.Chain, C.Op0, C.Op1);
2609 }
2610 return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1);
2611}
2612
2613// Implement a 32-bit *MUL_LOHI operation by extending both operands to
2614// 64 bits. Extend is the extension type to use. Store the high part
2615// in Hi and the low part in Lo.
2616static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
2617 SDValue Op0, SDValue Op1, SDValue &Hi,
2618 SDValue &Lo) {
2619 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
2620 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
2621 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
2622 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
2623 DAG.getConstant(32, DL, MVT::i64));
2624 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
2625 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
2626}
2627
2628// Lower a binary operation that produces two VT results, one in each
2629// half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2630// and Opcode performs the GR128 operation. Store the even register result
2631// in Even and the odd register result in Odd.
2632static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
2633 unsigned Opcode, SDValue Op0, SDValue Op1,
2634 SDValue &Even, SDValue &Odd) {
2635 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1);
2636 bool Is32Bit = is32Bit(VT);
2637 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
2638 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
2639}
2640
2641// Return an i32 value that is 1 if the CC value produced by CCReg is
2642// in the mask CCMask and 0 otherwise. CC is known to have a value
2643// in CCValid, so other values can be ignored.
2644static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg,
2645 unsigned CCValid, unsigned CCMask) {
2646 SDValue Ops[] = {DAG.getConstant(1, DL, MVT::i32),
2647 DAG.getConstant(0, DL, MVT::i32),
2648 DAG.getTargetConstant(CCValid, DL, MVT::i32),
2649 DAG.getTargetConstant(CCMask, DL, MVT::i32), CCReg};
2650 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops);
2651}
2652
2653// Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2654// be done directly. Mode is CmpMode::Int for integer comparisons, CmpMode::FP
2655// for regular floating-point comparisons, CmpMode::StrictFP for strict (quiet)
2656// floating-point comparisons, and CmpMode::SignalingFP for strict signaling
2657// floating-point comparisons.
2658enum class CmpMode { Int, FP, StrictFP, SignalingFP };
2659static unsigned getVectorComparison(ISD::CondCode CC, CmpMode Mode) {
2660 switch (CC) {
2661 case ISD::SETOEQ:
2662 case ISD::SETEQ:
2663 switch (Mode) {
2664 case CmpMode::Int: return SystemZISD::VICMPE;
2665 case CmpMode::FP: return SystemZISD::VFCMPE;
2666 case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPE;
2667 case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPES;
2668 }
2669 llvm_unreachable("Bad mode")::llvm::llvm_unreachable_internal("Bad mode", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2669)
;
2670
2671 case ISD::SETOGE:
2672 case ISD::SETGE:
2673 switch (Mode) {
2674 case CmpMode::Int: return 0;
2675 case CmpMode::FP: return SystemZISD::VFCMPHE;
2676 case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPHE;
2677 case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPHES;
2678 }
2679 llvm_unreachable("Bad mode")::llvm::llvm_unreachable_internal("Bad mode", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2679)
;
2680
2681 case ISD::SETOGT:
2682 case ISD::SETGT:
2683 switch (Mode) {
2684 case CmpMode::Int: return SystemZISD::VICMPH;
2685 case CmpMode::FP: return SystemZISD::VFCMPH;
2686 case CmpMode::StrictFP: return SystemZISD::STRICT_VFCMPH;
2687 case CmpMode::SignalingFP: return SystemZISD::STRICT_VFCMPHS;
2688 }
2689 llvm_unreachable("Bad mode")::llvm::llvm_unreachable_internal("Bad mode", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2689)
;
2690
2691 case ISD::SETUGT:
2692 switch (Mode) {
2693 case CmpMode::Int: return SystemZISD::VICMPHL;
2694 case CmpMode::FP: return 0;
2695 case CmpMode::StrictFP: return 0;
2696 case CmpMode::SignalingFP: return 0;
2697 }
2698 llvm_unreachable("Bad mode")::llvm::llvm_unreachable_internal("Bad mode", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2698)
;
2699
2700 default:
2701 return 0;
2702 }
2703}
2704
2705// Return the SystemZISD vector comparison operation for CC or its inverse,
2706// or 0 if neither can be done directly. Indicate in Invert whether the
2707// result is for the inverse of CC. Mode is as above.
2708static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, CmpMode Mode,
2709 bool &Invert) {
2710 if (unsigned Opcode = getVectorComparison(CC, Mode)) {
2711 Invert = false;
2712 return Opcode;
2713 }
2714
2715 CC = ISD::getSetCCInverse(CC, Mode == CmpMode::Int ? MVT::i32 : MVT::f32);
2716 if (unsigned Opcode = getVectorComparison(CC, Mode)) {
2717 Invert = true;
2718 return Opcode;
2719 }
2720
2721 return 0;
2722}
2723
2724// Return a v2f64 that contains the extended form of elements Start and Start+1
2725// of v4f32 value Op. If Chain is nonnull, return the strict form.
2726static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
2727 SDValue Op, SDValue Chain) {
2728 int Mask[] = { Start, -1, Start + 1, -1 };
2729 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
2730 if (Chain) {
2731 SDVTList VTs = DAG.getVTList(MVT::v2f64, MVT::Other);
2732 return DAG.getNode(SystemZISD::STRICT_VEXTEND, DL, VTs, Chain, Op);
2733 }
2734 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op);
2735}
2736
2737// Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2738// producing a result of type VT. If Chain is nonnull, return the strict form.
2739SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
2740 const SDLoc &DL, EVT VT,
2741 SDValue CmpOp0,
2742 SDValue CmpOp1,
2743 SDValue Chain) const {
2744 // There is no hardware support for v4f32 (unless we have the vector
2745 // enhancements facility 1), so extend the vector into two v2f64s
2746 // and compare those.
2747 if (CmpOp0.getValueType() == MVT::v4f32 &&
2748 !Subtarget.hasVectorEnhancements1()) {
2749 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0, Chain);
2750 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0, Chain);
2751 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1, Chain);
2752 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1, Chain);
2753 if (Chain) {
2754 SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::Other);
2755 SDValue HRes = DAG.getNode(Opcode, DL, VTs, Chain, H0, H1);
2756 SDValue LRes = DAG.getNode(Opcode, DL, VTs, Chain, L0, L1);
2757 SDValue Res = DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
2758 SDValue Chains[6] = { H0.getValue(1), L0.getValue(1),
2759 H1.getValue(1), L1.getValue(1),
2760 HRes.getValue(1), LRes.getValue(1) };
2761 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
2762 SDValue Ops[2] = { Res, NewChain };
2763 return DAG.getMergeValues(Ops, DL);
2764 }
2765 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1);
2766 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1);
2767 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
2768 }
2769 if (Chain) {
2770 SDVTList VTs = DAG.getVTList(VT, MVT::Other);
2771 return DAG.getNode(Opcode, DL, VTs, Chain, CmpOp0, CmpOp1);
2772 }
2773 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
2774}
2775
2776// Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2777// an integer mask of type VT. If Chain is nonnull, we have a strict
2778// floating-point comparison. If in addition IsSignaling is true, we have
2779// a strict signaling floating-point comparison.
2780SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
2781 const SDLoc &DL, EVT VT,
2782 ISD::CondCode CC,
2783 SDValue CmpOp0,
2784 SDValue CmpOp1,
2785 SDValue Chain,
2786 bool IsSignaling) const {
2787 bool IsFP = CmpOp0.getValueType().isFloatingPoint();
2788 assert (!Chain || IsFP)((!Chain || IsFP) ? static_cast<void> (0) : __assert_fail
("!Chain || IsFP", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2788, __PRETTY_FUNCTION__))
;
2789 assert (!IsSignaling || Chain)((!IsSignaling || Chain) ? static_cast<void> (0) : __assert_fail
("!IsSignaling || Chain", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2789, __PRETTY_FUNCTION__))
;
2790 CmpMode Mode = IsSignaling ? CmpMode::SignalingFP :
2791 Chain ? CmpMode::StrictFP : IsFP ? CmpMode::FP : CmpMode::Int;
2792 bool Invert = false;
2793 SDValue Cmp;
2794 switch (CC) {
2795 // Handle tests for order using (or (ogt y x) (oge x y)).
2796 case ISD::SETUO:
2797 Invert = true;
2798 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2799 case ISD::SETO: {
2800 assert(IsFP && "Unexpected integer comparison")((IsFP && "Unexpected integer comparison") ? static_cast
<void> (0) : __assert_fail ("IsFP && \"Unexpected integer comparison\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2800, __PRETTY_FUNCTION__))
;
2801 SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode),
2802 DL, VT, CmpOp1, CmpOp0, Chain);
2803 SDValue GE = getVectorCmp(DAG, getVectorComparison(ISD::SETOGE, Mode),
2804 DL, VT, CmpOp0, CmpOp1, Chain);
2805 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE);
2806 if (Chain)
2807 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
2808 LT.getValue(1), GE.getValue(1));
2809 break;
2810 }
2811
2812 // Handle <> tests using (or (ogt y x) (ogt x y)).
2813 case ISD::SETUEQ:
2814 Invert = true;
2815 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2816 case ISD::SETONE: {
2817 assert(IsFP && "Unexpected integer comparison")((IsFP && "Unexpected integer comparison") ? static_cast
<void> (0) : __assert_fail ("IsFP && \"Unexpected integer comparison\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2817, __PRETTY_FUNCTION__))
;
2818 SDValue LT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode),
2819 DL, VT, CmpOp1, CmpOp0, Chain);
2820 SDValue GT = getVectorCmp(DAG, getVectorComparison(ISD::SETOGT, Mode),
2821 DL, VT, CmpOp0, CmpOp1, Chain);
2822 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT);
2823 if (Chain)
2824 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
2825 LT.getValue(1), GT.getValue(1));
2826 break;
2827 }
2828
2829 // Otherwise a single comparison is enough. It doesn't really
2830 // matter whether we try the inversion or the swap first, since
2831 // there are no cases where both work.
2832 default:
2833 if (unsigned Opcode = getVectorComparisonOrInvert(CC, Mode, Invert))
2834 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1, Chain);
2835 else {
2836 CC = ISD::getSetCCSwappedOperands(CC);
2837 if (unsigned Opcode = getVectorComparisonOrInvert(CC, Mode, Invert))
2838 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0, Chain);
2839 else
2840 llvm_unreachable("Unhandled comparison")::llvm::llvm_unreachable_internal("Unhandled comparison", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2840)
;
2841 }
2842 if (Chain)
2843 Chain = Cmp.getValue(1);
2844 break;
2845 }
2846 if (Invert) {
2847 SDValue Mask =
2848 DAG.getSplatBuildVector(VT, DL, DAG.getConstant(-1, DL, MVT::i64));
2849 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask);
2850 }
2851 if (Chain && Chain.getNode() != Cmp.getNode()) {
2852 SDValue Ops[2] = { Cmp, Chain };
2853 Cmp = DAG.getMergeValues(Ops, DL);
2854 }
2855 return Cmp;
2856}
2857
2858SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
2859 SelectionDAG &DAG) const {
2860 SDValue CmpOp0 = Op.getOperand(0);
2861 SDValue CmpOp1 = Op.getOperand(1);
2862 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2863 SDLoc DL(Op);
2864 EVT VT = Op.getValueType();
2865 if (VT.isVector())
2866 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);
2867
2868 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2869 SDValue CCReg = emitCmp(DAG, DL, C);
2870 return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask);
2871}
2872
2873SDValue SystemZTargetLowering::lowerSTRICT_FSETCC(SDValue Op,
2874 SelectionDAG &DAG,
2875 bool IsSignaling) const {
2876 SDValue Chain = Op.getOperand(0);
2877 SDValue CmpOp0 = Op.getOperand(1);
2878 SDValue CmpOp1 = Op.getOperand(2);
2879 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get();
2880 SDLoc DL(Op);
2881 EVT VT = Op.getNode()->getValueType(0);
2882 if (VT.isVector()) {
2883 SDValue Res = lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1,
2884 Chain, IsSignaling);
2885 return Res.getValue(Op.getResNo());
2886 }
2887
2888 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL, Chain, IsSignaling));
2889 SDValue CCReg = emitCmp(DAG, DL, C);
2890 CCReg->setFlags(Op->getFlags());
2891 SDValue Result = emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask);
2892 SDValue Ops[2] = { Result, CCReg.getValue(1) };
2893 return DAG.getMergeValues(Ops, DL);
2894}
2895
2896SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2897 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2898 SDValue CmpOp0 = Op.getOperand(2);
2899 SDValue CmpOp1 = Op.getOperand(3);
2900 SDValue Dest = Op.getOperand(4);
2901 SDLoc DL(Op);
2902
2903 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2904 SDValue CCReg = emitCmp(DAG, DL, C);
2905 return DAG.getNode(
2906 SystemZISD::BR_CCMASK, DL, Op.getValueType(), Op.getOperand(0),
2907 DAG.getTargetConstant(C.CCValid, DL, MVT::i32),
2908 DAG.getTargetConstant(C.CCMask, DL, MVT::i32), Dest, CCReg);
2909}
2910
2911// Return true if Pos is CmpOp and Neg is the negative of CmpOp,
2912// allowing Pos and Neg to be wider than CmpOp.
2913static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
2914 return (Neg.getOpcode() == ISD::SUB &&
2915 Neg.getOperand(0).getOpcode() == ISD::Constant &&
2916 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
2917 Neg.getOperand(1) == Pos &&
2918 (Pos == CmpOp ||
2919 (Pos.getOpcode() == ISD::SIGN_EXTEND &&
2920 Pos.getOperand(0) == CmpOp)));
2921}
2922
2923// Return the absolute or negative absolute of Op; IsNegative decides which.
2924static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
2925 bool IsNegative) {
2926 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
2927 if (IsNegative)
2928 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
2929 DAG.getConstant(0, DL, Op.getValueType()), Op);
2930 return Op;
2931}
2932
2933SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
2934 SelectionDAG &DAG) const {
2935 SDValue CmpOp0 = Op.getOperand(0);
2936 SDValue CmpOp1 = Op.getOperand(1);
2937 SDValue TrueOp = Op.getOperand(2);
2938 SDValue FalseOp = Op.getOperand(3);
2939 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2940 SDLoc DL(Op);
2941
2942 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2943
2944 // Check for absolute and negative-absolute selections, including those
2945 // where the comparison value is sign-extended (for LPGFR and LNGFR).
2946 // This check supplements the one in DAGCombiner.
2947 if (C.Opcode == SystemZISD::ICMP &&
2948 C.CCMask != SystemZ::CCMASK_CMP_EQ &&
2949 C.CCMask != SystemZ::CCMASK_CMP_NE &&
2950 C.Op1.getOpcode() == ISD::Constant &&
2951 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2952 if (isAbsolute(C.Op0, TrueOp, FalseOp))
2953 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
2954 if (isAbsolute(C.Op0, FalseOp, TrueOp))
2955 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
2956 }
2957
2958 SDValue CCReg = emitCmp(DAG, DL, C);
2959 SDValue Ops[] = {TrueOp, FalseOp,
2960 DAG.getTargetConstant(C.CCValid, DL, MVT::i32),
2961 DAG.getTargetConstant(C.CCMask, DL, MVT::i32), CCReg};
2962
2963 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops);
2964}
2965
2966SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
2967 SelectionDAG &DAG) const {
2968 SDLoc DL(Node);
2969 const GlobalValue *GV = Node->getGlobal();
2970 int64_t Offset = Node->getOffset();
2971 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2972 CodeModel::Model CM = DAG.getTarget().getCodeModel();
2973
2974 SDValue Result;
2975 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
2976 if (isInt<32>(Offset)) {
2977 // Assign anchors at 1<<12 byte boundaries.
2978 uint64_t Anchor = Offset & ~uint64_t(0xfff);
2979 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
2980 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2981
2982 // The offset can be folded into the address if it is aligned to a
2983 // halfword.
2984 Offset -= Anchor;
2985 if (Offset != 0 && (Offset & 1) == 0) {
2986 SDValue Full =
2987 DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
2988 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
2989 Offset = 0;
2990 }
2991 } else {
2992 // Conservatively load a constant offset greater than 32 bits into a
2993 // register below.
2994 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT);
2995 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2996 }
2997 } else {
2998 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
2999 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3000 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3001 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3002 }
3003
3004 // If there was a non-zero offset that we didn't fold, create an explicit
3005 // addition for it.
3006 if (Offset != 0)
3007 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
3008 DAG.getConstant(Offset, DL, PtrVT));
3009
3010 return Result;
3011}
3012
3013SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
3014 SelectionDAG &DAG,
3015 unsigned Opcode,
3016 SDValue GOTOffset) const {
3017 SDLoc DL(Node);
3018 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3019 SDValue Chain = DAG.getEntryNode();
3020 SDValue Glue;
3021
3022 if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3023 CallingConv::GHC)
3024 report_fatal_error("In GHC calling convention TLS is not supported");
3025
3026 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
3027 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
3028 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
3029 Glue = Chain.getValue(1);
3030 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
3031 Glue = Chain.getValue(1);
3032
3033 // The first call operand is the chain and the second is the TLS symbol.
3034 SmallVector<SDValue, 8> Ops;
3035 Ops.push_back(Chain);
3036 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
3037 Node->getValueType(0),
3038 0, 0));
3039
3040 // Add argument registers to the end of the list so that they are
3041 // known live into the call.
3042 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
3043 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
3044
3045 // Add a register mask operand representing the call-preserved registers.
3046 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3047 const uint32_t *Mask =
3048 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
3049 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3049, __PRETTY_FUNCTION__))
;
3050 Ops.push_back(DAG.getRegisterMask(Mask));
3051
3052 // Glue the call to the argument copies.
3053 Ops.push_back(Glue);
3054
3055 // Emit the call.
3056 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3057 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
3058 Glue = Chain.getValue(1);
3059
3060 // Copy the return value from %r2.
3061 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
3062}
3063
3064SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
3065 SelectionDAG &DAG) const {
3066 SDValue Chain = DAG.getEntryNode();
3067 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3068
3069 // The high part of the thread pointer is in access register 0.
3070 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32);
3071 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
3072
3073 // The low part of the thread pointer is in access register 1.
3074 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32);
3075 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
3076
3077 // Merge them into a single 64-bit address.
3078 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
3079 DAG.getConstant(32, DL, PtrVT));
3080 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
3081}
3082
3083SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
3084 SelectionDAG &DAG) const {
3085 if (DAG.getTarget().useEmulatedTLS())
3086 return LowerToTLSEmulatedModel(Node, DAG);
3087 SDLoc DL(Node);
3088 const GlobalValue *GV = Node->getGlobal();
3089 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3090 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
3091
3092 if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3093 CallingConv::GHC)
3094 report_fatal_error("In GHC calling convention TLS is not supported");
3095
3096 SDValue TP = lowerThreadPointer(DL, DAG);
3097
3098 // Get the offset of GA from the thread pointer, based on the TLS model.
3099 SDValue Offset;
3100 switch (model) {
3101 case TLSModel::GeneralDynamic: {
3102 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
3103 SystemZConstantPoolValue *CPV =
3104 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);
3105
3106 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
3107 Offset = DAG.getLoad(
3108 PtrVT, DL, DAG.getEntryNode(), Offset,
3109 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3110
3111 // Call __tls_get_offset to retrieve the offset.
3112 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
3113 break;
3114 }
3115
3116 case TLSModel::LocalDynamic: {
3117 // Load the GOT offset of the module ID.
3118 SystemZConstantPoolValue *CPV =
3119 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);
3120
3121 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
3122 Offset = DAG.getLoad(
3123 PtrVT, DL, DAG.getEntryNode(), Offset,
3124 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3125
3126 // Call __tls_get_offset to retrieve the module base offset.
3127 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
3128
3129 // Note: The SystemZLDCleanupPass will remove redundant computations
3130 // of the module base offset. Count total number of local-dynamic
3131 // accesses to trigger execution of that pass.
3132 SystemZMachineFunctionInfo* MFI =
3133 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
3134 MFI->incNumLocalDynamicTLSAccesses();
3135
3136 // Add the per-symbol offset.
3137 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);
3138
3139 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
3140 DTPOffset = DAG.getLoad(
3141 PtrVT, DL, DAG.getEntryNode(), DTPOffset,
3142 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3143
3144 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
3145 break;
3146 }
3147
3148 case TLSModel::InitialExec: {
3149 // Load the offset from the GOT.
3150 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
3151 SystemZII::MO_INDNTPOFF);
3152 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
3153 Offset =
3154 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
3155 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3156 break;
3157 }
3158
3159 case TLSModel::LocalExec: {
3160 // Force the offset into the constant pool and load it from there.
3161 SystemZConstantPoolValue *CPV =
3162 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
3163
3164 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
3165 Offset = DAG.getLoad(
3166 PtrVT, DL, DAG.getEntryNode(), Offset,
3167 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3168 break;
3169 }
3170 }
3171
3172 // Add the base and offset together.
3173 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
3174}
3175
3176SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
3177 SelectionDAG &DAG) const {
3178 SDLoc DL(Node);
3179 const BlockAddress *BA = Node->getBlockAddress();
3180 int64_t Offset = Node->getOffset();
3181 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3182
3183 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
3184 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3185 return Result;
3186}
3187
3188SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
3189 SelectionDAG &DAG) const {
3190 SDLoc DL(JT);
3191 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3192 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3193
3194 // Use LARL to load the address of the table.
3195 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3196}
3197
3198SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
3199 SelectionDAG &DAG) const {
3200 SDLoc DL(CP);
3201 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3202
3203 SDValue Result;
3204 if (CP->isMachineConstantPoolEntry())
3205 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
3206 CP->getAlignment());
3207 else
3208 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
3209 CP->getAlignment(), CP->getOffset());
3210
3211 // Use LARL to load the address of the constant pool entry.
3212 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3213}
3214
3215SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
3216 SelectionDAG &DAG) const {
3217 MachineFunction &MF = DAG.getMachineFunction();
3218 MachineFrameInfo &MFI = MF.getFrameInfo();
3219 MFI.setFrameAddressIsTaken(true);
3220
3221 SDLoc DL(Op);
3222 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3223 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3224
3225 // By definition, the frame address is the address of the back chain.
3226 auto *TFL =
3227 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
3228 int BackChainIdx = TFL->getOrCreateFramePointerSaveIndex(MF);
3229 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);
3230
3231 // FIXME The frontend should detect this case.
3232 if (Depth > 0) {
3233 report_fatal_error("Unsupported stack frame traversal count");
3234 }
3235
3236 return BackChain;
3237}
3238
3239SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
3240 SelectionDAG &DAG) const {
3241 MachineFunction &MF = DAG.getMachineFunction();
3242 MachineFrameInfo &MFI = MF.getFrameInfo();
3243 MFI.setReturnAddressIsTaken(true);
3244
3245 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3246 return SDValue();
3247
3248 SDLoc DL(Op);
3249 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3250 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3251
3252 // FIXME The frontend should detect this case.
3253 if (Depth > 0) {
3254 report_fatal_error("Unsupported stack frame traversal count");
3255 }
3256
3257 // Return R14D, which has the return address. Mark it an implicit live-in.
3258 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
3259 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
3260}
3261
3262SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
3263 SelectionDAG &DAG) const {
3264 SDLoc DL(Op);
3265 SDValue In = Op.getOperand(0);
3266 EVT InVT = In.getValueType();
3267 EVT ResVT = Op.getValueType();
3268
3269 // Convert loads directly. This is normally done by DAGCombiner,
3270 // but we need this case for bitcasts that are created during lowering
3271 // and which are then lowered themselves.
3272 if (auto *LoadN = dyn_cast<LoadSDNode>(In))
3273 if (ISD::isNormalLoad(LoadN)) {
3274 SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(),
3275 LoadN->getBasePtr(), LoadN->getMemOperand());
3276 // Update the chain uses.
3277 DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1));
3278 return NewLoad;
3279 }
3280
3281 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3282 SDValue In64;
3283 if (Subtarget.hasHighWord()) {
3284 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
3285 MVT::i64);
3286 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
3287 MVT::i64, SDValue(U64, 0), In);
3288 } else {
3289 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
3290 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
3291 DAG.getConstant(32, DL, MVT::i64));
3292 }
3293 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
3294 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
3295 DL, MVT::f32, Out64);
3296 }
3297 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3298 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
3299 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
3300 MVT::f64, SDValue(U64, 0), In);
3301 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
3302 if (Subtarget.hasHighWord())
3303 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
3304 MVT::i32, Out64);
3305 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
3306 DAG.getConstant(32, DL, MVT::i64));
3307 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
3308 }
3309 llvm_unreachable("Unexpected bitcast combination")::llvm::llvm_unreachable_internal("Unexpected bitcast combination"
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3309)
;
3310}
3311
3312SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
3313 SelectionDAG &DAG) const {
3314 MachineFunction &MF = DAG.getMachineFunction();
3315 SystemZMachineFunctionInfo *FuncInfo =
3316 MF.getInfo<SystemZMachineFunctionInfo>();
3317 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3318
3319 SDValue Chain = Op.getOperand(0);
3320 SDValue Addr = Op.getOperand(1);
3321 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3322 SDLoc DL(Op);
3323
3324 // The initial values of each field.
3325 const unsigned NumFields = 4;
3326 SDValue Fields[NumFields] = {
3327 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
3328 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
3329 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
3330 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
3331 };
3332
3333 // Store each field into its respective slot.
3334 SDValue MemOps[NumFields];
3335 unsigned Offset = 0;
3336 for (unsigned I = 0; I < NumFields; ++I) {
3337 SDValue FieldAddr = Addr;
3338 if (Offset != 0)
3339 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
3340 DAG.getIntPtrConstant(Offset, DL));
3341 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
3342 MachinePointerInfo(SV, Offset));
3343 Offset += 8;
3344 }
3345 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
3346}
3347
3348SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
3349 SelectionDAG &DAG) const {
3350 SDValue Chain = Op.getOperand(0);
3351 SDValue DstPtr = Op.getOperand(1);
3352 SDValue SrcPtr = Op.getOperand(2);
3353 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
3354 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
3355 SDLoc DL(Op);
3356
3357 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
3358 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
3359 /*isTailCall*/false,
3360 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
3361}
3362
3363SDValue SystemZTargetLowering::
3364lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
3365 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
3366 MachineFunction &MF = DAG.getMachineFunction();
3367 bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack");
3368 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
3369
3370 SDValue Chain = Op.getOperand(0);
3371 SDValue Size = Op.getOperand(1);
3372 SDValue Align = Op.getOperand(2);
3373 SDLoc DL(Op);
3374
3375 // If user has set the no alignment function attribute, ignore
3376 // alloca alignments.
3377 uint64_t AlignVal = (RealignOpt ?
3378 dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);
3379
3380 uint64_t StackAlign = TFI->getStackAlignment();
3381 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3382 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3383
3384 unsigned SPReg = getStackPointerRegisterToSaveRestore();
3385 SDValue NeededSpace = Size;
3386
3387 // Get a reference to the stack pointer.
3388 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
3389
3390 // If we need a backchain, save it now.
3391 SDValue Backchain;
3392 if (StoreBackchain)
3393 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
3394
3395 // Add extra space for alignment if needed.
3396 if (ExtraAlignSpace)
3397 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
3398 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
3399
3400 // Get the new stack pointer value.
3401 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);
3402
3403 // Copy the new stack pointer back.
3404 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
3405
3406 // The allocated data lives above the 160 bytes allocated for the standard
3407 // frame, plus any outgoing stack arguments. We don't know how much that
3408 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
3409 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
3410 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
3411
3412 // Dynamically realign if needed.
3413 if (RequiredAlign > StackAlign) {
3414 Result =
3415 DAG.getNode(ISD::ADD, DL, MVT::i64, Result,
3416 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
3417 Result =
3418 DAG.getNode(ISD::AND, DL, MVT::i64, Result,
3419 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
3420 }
3421
3422 if (StoreBackchain)
3423 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3424
3425 SDValue Ops[2] = { Result, Chain };
3426 return DAG.getMergeValues(Ops, DL);
3427}
3428
3429SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3430 SDValue Op, SelectionDAG &DAG) const {
3431 SDLoc DL(Op);
3432
3433 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
3434}
3435
3436SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
3437 SelectionDAG &DAG) const {
3438 EVT VT = Op.getValueType();
3439 SDLoc DL(Op);
3440 SDValue Ops[2];
3441 if (is32Bit(VT))
3442 // Just do a normal 64-bit multiplication and extract the results.
3443 // We define this so that it can be used for constant division.
3444 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
3445 Op.getOperand(1), Ops[1], Ops[0]);
3446 else if (Subtarget.hasMiscellaneousExtensions2())
3447 // SystemZISD::SMUL_LOHI returns the low result in the odd register and
3448 // the high result in the even register. ISD::SMUL_LOHI is defined to
3449 // return the low half first, so the results are in reverse order.
3450 lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI,
3451 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3452 else {
3453 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
3454 //
3455 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
3456 //
3457 // but using the fact that the upper halves are either all zeros
3458 // or all ones:
3459 //
3460 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
3461 //
3462 // and grouping the right terms together since they are quicker than the
3463 // multiplication:
3464 //
3465 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
3466 SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
3467 SDValue LL = Op.getOperand(0);
3468 SDValue RL = Op.getOperand(1);
3469 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
3470 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
3471 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3472 // the high result in the even register. ISD::SMUL_LOHI is defined to
3473 // return the low half first, so the results are in reverse order.
3474 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
3475 LL, RL, Ops[1], Ops[0]);
3476 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
3477 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
3478 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
3479 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
3480 }
3481 return DAG.getMergeValues(Ops, DL);
3482}
3483
3484SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
3485 SelectionDAG &DAG) const {
3486 EVT VT = Op.getValueType();
3487 SDLoc DL(Op);
3488 SDValue Ops[2];
3489 if (is32Bit(VT))
3490 // Just do a normal 64-bit multiplication and extract the results.
3491 // We define this so that it can be used for constant division.
3492 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
3493 Op.getOperand(1), Ops[1], Ops[0]);
3494 else
3495 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3496 // the high result in the even register. ISD::UMUL_LOHI is defined to
3497 // return the low half first, so the results are in reverse order.
3498 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
3499 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3500 return DAG.getMergeValues(Ops, DL);
3501}
3502
3503SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
3504 SelectionDAG &DAG) const {
3505 SDValue Op0 = Op.getOperand(0);
3506 SDValue Op1 = Op.getOperand(1);
3507 EVT VT = Op.getValueType();
3508 SDLoc DL(Op);
3509
3510 // We use DSGF for 32-bit division. This means the first operand must
3511 // always be 64-bit, and the second operand should be 32-bit whenever
3512 // that is possible, to improve performance.
3513 if (is32Bit(VT))
3514 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
3515 else if (DAG.ComputeNumSignBits(Op1) > 32)
3516 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
3517
3518 // DSG(F) returns the remainder in the even register and the
3519 // quotient in the odd register.
3520 SDValue Ops[2];
3521 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]);
3522 return DAG.getMergeValues(Ops, DL);
3523}
3524
3525SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
3526 SelectionDAG &DAG) const {
3527 EVT VT = Op.getValueType();
3528 SDLoc DL(Op);
3529
3530 // DL(G) returns the remainder in the even register and the
3531 // quotient in the odd register.
3532 SDValue Ops[2];
3533 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM,
3534 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3535 return DAG.getMergeValues(Ops, DL);
3536}
3537
3538SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
3539 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation")((Op.getValueType() == MVT::i64 && "Should be 64-bit operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::i64 && \"Should be 64-bit operation\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3539, __PRETTY_FUNCTION__))
;
3540
3541 // Get the known-zero masks for each operand.
3542 SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)};
3543 KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]),
3544 DAG.computeKnownBits(Ops[1])};
3545
3546 // See if the upper 32 bits of one operand and the lower 32 bits of the
3547 // other are known zero. They are the low and high operands respectively.
3548 uint64_t Masks[] = { Known[0].Zero.getZExtValue(),
3549 Known[1].Zero.getZExtValue() };
3550 unsigned High, Low;
3551 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
3552 High = 1, Low = 0;
3553 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
3554 High = 0, Low = 1;
3555 else
3556 return Op;
3557
3558 SDValue LowOp = Ops[Low];
3559 SDValue HighOp = Ops[High];
3560
3561 // If the high part is a constant, we're better off using IILH.
3562 if (HighOp.getOpcode() == ISD::Constant)
3563 return Op;
3564
3565 // If the low part is a constant that is outside the range of LHI,
3566 // then we're better off using IILF.
3567 if (LowOp.getOpcode() == ISD::Constant) {
3568 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3569 if (!isInt<16>(Value))
3570 return Op;
3571 }
3572
3573 // Check whether the high part is an AND that doesn't change the
3574 // high 32 bits and just masks out low bits. We can skip it if so.
3575 if (HighOp.getOpcode() == ISD::AND &&
3576 HighOp.getOperand(1).getOpcode() == ISD::Constant) {
3577 SDValue HighOp0 = HighOp.getOperand(0);
3578 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
3579 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
3580 HighOp = HighOp0;
3581 }
3582
3583 // Take advantage of the fact that all GR32 operations only change the
3584 // low 32 bits by truncating Low to an i32 and inserting it directly
3585 // using a subreg. The interesting cases are those where the truncation
3586 // can be folded.
3587 SDLoc DL(Op);
3588 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
3589 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
3590 MVT::i64, HighOp, Low32);
3591}
3592
3593// Lower SADDO/SSUBO/UADDO/USUBO nodes.
3594SDValue SystemZTargetLowering::lowerXALUO(SDValue Op,
3595 SelectionDAG &DAG) const {
3596 SDNode *N = Op.getNode();
3597 SDValue LHS = N->getOperand(0);
3598 SDValue RHS = N->getOperand(1);
3599 SDLoc DL(N);
3600 unsigned BaseOp = 0;
3601 unsigned CCValid = 0;
3602 unsigned CCMask = 0;
3603
3604 switch (Op.getOpcode()) {
3605 default: llvm_unreachable("Unknown instruction!")::llvm::llvm_unreachable_internal("Unknown instruction!", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3605)
;
3606 case ISD::SADDO:
3607 BaseOp = SystemZISD::SADDO;
3608 CCValid = SystemZ::CCMASK_ARITH;
3609 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
3610 break;
3611 case ISD::SSUBO:
3612 BaseOp = SystemZISD::SSUBO;
3613 CCValid = SystemZ::CCMASK_ARITH;
3614 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
3615 break;
3616 case ISD::UADDO:
3617 BaseOp = SystemZISD::UADDO;
3618 CCValid = SystemZ::CCMASK_LOGICAL;
3619 CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
3620 break;
3621 case ISD::USUBO:
3622 BaseOp = SystemZISD::USUBO;
3623 CCValid = SystemZ::CCMASK_LOGICAL;
3624 CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
3625 break;
3626 }
3627
3628 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
3629 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
3630
3631 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
3632 if (N->getValueType(1) == MVT::i1)
3633 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
3634
3635 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
3636}
3637
3638static bool isAddCarryChain(SDValue Carry) {
3639 while (Carry.getOpcode() == ISD::ADDCARRY)
3640 Carry = Carry.getOperand(2);
3641 return Carry.getOpcode() == ISD::UADDO;
3642}
3643
3644static bool isSubBorrowChain(SDValue Carry) {
3645 while (Carry.getOpcode() == ISD::SUBCARRY)
3646 Carry = Carry.getOperand(2);
3647 return Carry.getOpcode() == ISD::USUBO;
3648}
3649
3650// Lower ADDCARRY/SUBCARRY nodes.
3651SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op,
3652 SelectionDAG &DAG) const {
3653
3654 SDNode *N = Op.getNode();
3655 MVT VT = N->getSimpleValueType(0);
3656
3657 // Let legalize expand this if it isn't a legal type yet.
3658 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3659 return SDValue();
3660
3661 SDValue LHS = N->getOperand(0);
3662 SDValue RHS = N->getOperand(1);
3663 SDValue Carry = Op.getOperand(2);
3664 SDLoc DL(N);
3665 unsigned BaseOp = 0;
3666 unsigned CCValid = 0;
3667 unsigned CCMask = 0;
3668
3669 switch (Op.getOpcode()) {
3670 default: llvm_unreachable("Unknown instruction!")::llvm::llvm_unreachable_internal("Unknown instruction!", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3670)
;
3671 case ISD::ADDCARRY:
3672 if (!isAddCarryChain(Carry))
3673 return SDValue();
3674
3675 BaseOp = SystemZISD::ADDCARRY;
3676 CCValid = SystemZ::CCMASK_LOGICAL;
3677 CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
3678 break;
3679 case ISD::SUBCARRY:
3680 if (!isSubBorrowChain(Carry))
3681 return SDValue();
3682
3683 BaseOp = SystemZISD::SUBCARRY;
3684 CCValid = SystemZ::CCMASK_LOGICAL;
3685 CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
3686 break;
3687 }
3688
3689 // Set the condition code from the carry flag.
3690 Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry,
3691 DAG.getConstant(CCValid, DL, MVT::i32),
3692 DAG.getConstant(CCMask, DL, MVT::i32));
3693
3694 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3695 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry);
3696
3697 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
3698 if (N->getValueType(1) == MVT::i1)
3699 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
3700
3701 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
3702}
3703
3704SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
3705 SelectionDAG &DAG) const {
3706 EVT VT = Op.getValueType();
3707 SDLoc DL(Op);
3708 Op = Op.getOperand(0);
3709
3710 // Handle vector types via VPOPCT.
3711 if (VT.isVector()) {
3712 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
3713 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
3714 switch (VT.getScalarSizeInBits()) {
3715 case 8:
3716 break;
3717 case 16: {
3718 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
3719 SDValue Shift = DAG.getConstant(8, DL, MVT::i32);
3720 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift);
3721 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3722 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift);
3723 break;
3724 }
3725 case 32: {
3726 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
3727 DAG.getConstant(0, DL, MVT::i32));
3728 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3729 break;
3730 }
3731 case 64: {
3732 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
3733 DAG.getConstant(0, DL, MVT::i32));
3734 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp);
3735 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3736 break;
3737 }
3738 default:
3739 llvm_unreachable("Unexpected type")::llvm::llvm_unreachable_internal("Unexpected type", "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3739)
;
3740 }
3741 return Op;
3742 }
3743
3744 // Get the known-zero mask for the operand.
3745 KnownBits Known = DAG.computeKnownBits(Op);
3746 unsigned NumSignificantBits = Known.getMaxValue().getActiveBits();
3747 if (NumSignificantBits == 0)
3748 return DAG.getConstant(0, DL, VT);
3749
3750 // Skip known-zero high parts of the operand.
3751 int64_t OrigBitSize = VT.getSizeInBits();
3752 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
3753 BitSize = std::min(BitSize, OrigBitSize);
3754
3755 // The POPCNT instruction counts the number of bits in each byte.
3756 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
3757 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
3758 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
3759
3760 // Add up per-byte counts in a binary tree. All bits of Op at
3761 // position larger than BitSize remain zero throughout.
3762 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
3763 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
3764 if (BitSize != OrigBitSize)
3765 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
3766 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
3767 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3768 }
3769
3770 // Extract overall result from high byte.
3771 if (BitSize > 8)
3772 Op = DAG.getNode(ISD::SRL, DL, VT, Op,
3773 DAG.getConstant(BitSize - 8, DL, VT));
3774
3775 return Op;
3776}
3777
3778SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
3779 SelectionDAG &DAG) const {
3780 SDLoc DL(Op);
3781 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
3782 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
3783 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
3784 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
3785
3786 // The only fence that needs an instruction is a sequentially-consistent
3787 // cross-thread fence.
3788 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
3789 FenceSSID == SyncScope::System) {
3790 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
3791 Op.getOperand(0)),
3792 0);
3793 }
3794
3795 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3796 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
3797}
3798
3799// Op is an atomic load. Lower it into a normal volatile load.
3800SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
3801 SelectionDAG &DAG) const {
3802 auto *Node = cast<AtomicSDNode>(Op.getNode());
3803 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
3804 Node->getChain(), Node->getBasePtr(),
3805 Node->getMemoryVT(), Node->getMemOperand());
3806}
3807
3808// Op is an atomic store. Lower it into a normal volatile store.
3809SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
3810 SelectionDAG &DAG) const {
3811 auto *Node = cast<AtomicSDNode>(Op.getNode());
3812 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
3813 Node->getBasePtr(), Node->getMemoryVT(),
3814 Node->getMemOperand());
3815 // We have to enforce sequential consistency by performing a
3816 // serialization operation after the store.
3817 if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent)
3818 Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op),
3819 MVT::Other, Chain), 0);
3820 return Chain;
3821}
3822
3823// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3824// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
3825SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
3826 SelectionDAG &DAG,
3827 unsigned Opcode) const {
3828 auto *Node = cast<AtomicSDNode>(Op.getNode());
3829
3830 // 32-bit operations need no code outside the main loop.
3831 EVT NarrowVT = Node->getMemoryVT();
3832 EVT WideVT = MVT::i32;
3833 if (NarrowVT == WideVT)
3834 return Op;
3835
3836 int64_t BitSize = NarrowVT.getSizeInBits();
3837 SDValue ChainIn = Node->getChain();
3838 SDValue Addr = Node->getBasePtr();
3839 SDValue Src2 = Node->getVal();
3840 MachineMemOperand *MMO = Node->getMemOperand();
3841 SDLoc DL(Node);
3842 EVT PtrVT = Addr.getValueType();
3843
3844 // Convert atomic subtracts of constants into additions.
3845 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
3846 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
3847 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
3848 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
3849 }
3850
3851 // Get the address of the containing word.
3852 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3853 DAG.getConstant(-4, DL, PtrVT));
3854
3855 // Get the number of bits that the word must be rotated left in order
3856 // to bring the field to the top bits of a GR32.
3857 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3858 DAG.getConstant(3, DL, PtrVT));
3859 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3860
3861 // Get the complementing shift amount, for rotating a field in the top
3862 // bits back to its proper position.
3863 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3864 DAG.getConstant(0, DL, WideVT), BitShift);
3865
3866 // Extend the source operand to 32 bits and prepare it for the inner loop.
3867 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3868 // operations require the source to be shifted in advance. (This shift
3869 // can be folded if the source is constant.) For AND and NAND, the lower
3870 // bits must be set, while for other opcodes they should be left clear.
3871 if (Opcode != SystemZISD::ATOMIC_SWAPW)
3872 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
3873 DAG.getConstant(32 - BitSize, DL, WideVT));
3874 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
3875 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
3876 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
3877 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
3878
3879 // Construct the ATOMIC_LOADW_* node.
3880 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3881 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
3882 DAG.getConstant(BitSize, DL, WideVT) };
3883 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
3884 NarrowVT, MMO);
3885
3886 // Rotate the result of the final CS so that the field is in the lower
3887 // bits of a GR32, then truncate it.
3888 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
3889 DAG.getConstant(BitSize, DL, WideVT));
3890 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
3891
3892 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
3893 return DAG.getMergeValues(RetOps, DL);
3894}
3895
3896// Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
3897// into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
3898// operations into additions.
3899SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
3900 SelectionDAG &DAG) const {
3901 auto *Node = cast<AtomicSDNode>(Op.getNode());
3902 EVT MemVT = Node->getMemoryVT();
3903 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
3904 // A full-width operation.
3905 assert(Op.getValueType() == MemVT && "Mismatched VTs")((Op.getValueType() == MemVT && "Mismatched VTs") ? static_cast
<void> (0) : __assert_fail ("Op.getValueType() == MemVT && \"Mismatched VTs\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3905, __PRETTY_FUNCTION__))
;
3906 SDValue Src2 = Node->getVal();
3907 SDValue NegSrc2;
3908 SDLoc DL(Src2);
3909
3910 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
3911 // Use an addition if the operand is constant and either LAA(G) is
3912 // available or the negative value is in the range of A(G)FHI.
3913 int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
3914 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
3915 NegSrc2 = DAG.getConstant(Value, DL, MemVT);
3916 } else if (Subtarget.hasInterlockedAccess1())
3917 // Use LAA(G) if available.
3918 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
3919 Src2);
3920
3921 if (NegSrc2.getNode())
3922 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
3923 Node->getChain(), Node->getBasePtr(), NegSrc2,
3924 Node->getMemOperand());
3925
3926 // Use the node as-is.
3927 return Op;
3928 }
3929
3930 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
3931}
3932
3933// Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
3934SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
3935 SelectionDAG &DAG) const {
3936 auto *Node = cast<AtomicSDNode>(Op.getNode());
3937 SDValue ChainIn = Node->getOperand(0);
3938 SDValue Addr = Node->getOperand(1);
3939 SDValue CmpVal = Node->getOperand(2);
3940 SDValue SwapVal = Node->getOperand(3);
3941 MachineMemOperand *MMO = Node->getMemOperand();
3942 SDLoc DL(Node);
3943
3944 // We have native support for 32-bit and 64-bit compare and swap, but we
3945 // still need to expand extracting the "success" result from the CC.
3946 EVT NarrowVT = Node->getMemoryVT();
3947 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
3948 if (NarrowVT == WideVT) {
3949 SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
3950 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
3951 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP,
3952 DL, Tys, Ops, NarrowVT, MMO);
3953 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
3954 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);
3955
3956 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
3957 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
3958 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
3959 return SDValue();
3960 }
3961
3962 // Convert 8-bit and 16-bit compare and swap to a loop, implemented
3963 // via a fullword ATOMIC_CMP_SWAPW operation.
3964 int64_t BitSize = NarrowVT.getSizeInBits();
3965 EVT PtrVT = Addr.getValueType();
3966
3967 // Get the address of the containing word.
3968 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3969 DAG.getConstant(-4, DL, PtrVT));
3970
3971 // Get the number of bits that the word must be rotated left in order
3972 // to bring the field to the top bits of a GR32.
3973 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3974 DAG.getConstant(3, DL, PtrVT));
3975 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3976
3977 // Get the complementing shift amount, for rotating a field in the top
3978 // bits back to its proper position.
3979 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3980 DAG.getConstant(0, DL, WideVT), BitShift);
3981
3982 // Construct the ATOMIC_CMP_SWAPW node.
3983 SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
3984 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
3985 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
3986 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
3987 VTList, Ops, NarrowVT, MMO);
3988 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
3989 SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ);
3990
3991 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
3992 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
3993 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
3994 return SDValue();
3995}
3996
3997MachineMemOperand::Flags
3998SystemZTargetLowering::getMMOFlags(const Instruction &I) const {
3999 // Because of how we convert atomic_load and atomic_store to normal loads and
4000 // stores in the DAG, we need to ensure that the MMOs are marked volatile
4001 // since DAGCombine hasn't been updated to account for atomic, but non
4002 // volatile loads. (See D57601)
4003 if (auto *SI = dyn_cast<StoreInst>(&I))
4004 if (SI->isAtomic())
4005 return MachineMemOperand::MOVolatile;
4006 if (auto *LI = dyn_cast<LoadInst>(&I))
4007 if (LI->isAtomic())
4008 return MachineMemOperand::MOVolatile;
4009 if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
4010 if (AI->isAtomic())
4011 return MachineMemOperand::MOVolatile;
4012 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
4013 if (AI->isAtomic())
4014 return MachineMemOperand::MOVolatile;
4015 return MachineMemOperand::MONone;
4016}
4017
4018SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
4019 SelectionDAG &DAG) const {
4020 MachineFunction &MF = DAG.getMachineFunction();
4021 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
4022 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
4023 report_fatal_error("Variable-sized stack allocations are not supported "
4024 "in GHC calling convention");
4025 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
4026 SystemZ::R15D, Op.getValueType());
4027}
4028
4029SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
4030 SelectionDAG &DAG) const {
4031 MachineFunction &MF = DAG.getMachineFunction();
4032 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
4033 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
4034
4035 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
4036 report_fatal_error("Variable-sized stack allocations are not supported "
4037 "in GHC calling convention");
4038
4039 SDValue Chain = Op.getOperand(0);
4040 SDValue NewSP = Op.getOperand(1);
4041 SDValue Backchain;
4042 SDLoc DL(Op);
4043
4044 if (StoreBackchain) {
4045 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
4046 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
4047 }
4048
4049 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
4050
4051 if (StoreBackchain)
4052 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
4053
4054 return Chain;
4055}
4056
4057SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
4058 SelectionDAG &DAG) const {
4059 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
4060 if (!IsData)
4061 // Just preserve the chain.
4062 return Op.getOperand(0);
4063
4064 SDLoc DL(Op);
4065 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
4066 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
4067 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
4068 SDValue Ops[] = {Op.getOperand(0), DAG.getTargetConstant(Code, DL, MVT::i32),
4069 Op.getOperand(1)};
4070 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
4071 Node->getVTList(), Ops,
4072 Node->getMemoryVT(), Node->getMemOperand());
4073}
4074
4075// Convert condition code in CCReg to an i32 value.
4076static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) {
4077 SDLoc DL(CCReg);
4078 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
4079 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
4080 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
4081}
4082
4083SDValue
4084SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
4085 SelectionDAG &DAG) const {
4086 unsigned Opcode, CCValid;
4087 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
4088 assert(Op->getNumValues() == 2 && "Expected only CC result and chain")((Op->getNumValues() == 2 && "Expected only CC result and chain"
) ? static_cast<void> (0) : __assert_fail ("Op->getNumValues() == 2 && \"Expected only CC result and chain\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4088, __PRETTY_FUNCTION__))
;
4089 SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode);
4090 SDValue CC = getCCResult(DAG, SDValue(Node, 0));
4091 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
4092 return SDValue();
4093 }
4094
4095 return SDValue();
4096}
4097
4098SDValue
4099SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
4100 SelectionDAG &DAG) const {
4101 unsigned Opcode, CCValid;
4102 if (isIntrinsicWithCC(Op, Opcode, CCValid)) {
4103 SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode);
4104 if (Op->getNumValues() == 1)
4105 return getCCResult(DAG, SDValue(Node, 0));
4106 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result")((Op->getNumValues() == 2 && "Expected a CC and non-CC result"
) ? static_cast<void> (0) : __assert_fail ("Op->getNumValues() == 2 && \"Expected a CC and non-CC result\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4106, __PRETTY_FUNCTION__))
;
4107 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(),
4108 SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1)));
4109 }
4110
4111 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4112 switch (Id) {
4113 case Intrinsic::thread_pointer:
4114 return lowerThreadPointer(SDLoc(Op), DAG);
4115
4116 case Intrinsic::s390_vpdi:
4117 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
4118 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4119
4120 case Intrinsic::s390_vperm:
4121 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(),
4122 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
4123
4124 case Intrinsic::s390_vuphb:
4125 case Intrinsic::s390_vuphh:
4126 case Intrinsic::s390_vuphf:
4127 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(),
4128 Op.getOperand(1));
4129
4130 case Intrinsic::s390_vuplhb:
4131 case Intrinsic::s390_vuplhh:
4132 case Intrinsic::s390_vuplhf:
4133 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(),
4134 Op.getOperand(1));
4135
4136 case Intrinsic::s390_vuplb:
4137 case Intrinsic::s390_vuplhw:
4138 case Intrinsic::s390_vuplf:
4139 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(),
4140 Op.getOperand(1));
4141
4142 case Intrinsic::s390_vupllb:
4143 case Intrinsic::s390_vupllh:
4144 case Intrinsic::s390_vupllf:
4145 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(),
4146 Op.getOperand(1));
4147
4148 case Intrinsic::s390_vsumb:
4149 case Intrinsic::s390_vsumh:
4150 case Intrinsic::s390_vsumgh:
4151 case Intrinsic::s390_vsumgf:
4152 case Intrinsic::s390_vsumqf:
4153 case Intrinsic::s390_vsumqg:
4154 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(),
4155 Op.getOperand(1), Op.getOperand(2));
4156 }
4157
4158 return SDValue();
4159}
4160
4161namespace {
4162// Says that SystemZISD operation Opcode can be used to perform the equivalent
4163// of a VPERM with permute vector Bytes. If Opcode takes three operands,
4164// Operand is the constant third operand, otherwise it is the number of
4165// bytes in each element of the result.
4166struct Permute {
4167 unsigned Opcode;
4168 unsigned Operand;
4169 unsigned char Bytes[SystemZ::VectorBytes];
4170};
4171}
4172
4173static const Permute PermuteForms[] = {
4174 // VMRHG
4175 { SystemZISD::MERGE_HIGH, 8,
4176 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4177 // VMRHF
4178 { SystemZISD::MERGE_HIGH, 4,
4179 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4180 // VMRHH
4181 { SystemZISD::MERGE_HIGH, 2,
4182 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4183 // VMRHB
4184 { SystemZISD::MERGE_HIGH, 1,
4185 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4186 // VMRLG
4187 { SystemZISD::MERGE_LOW, 8,
4188 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4189 // VMRLF
4190 { SystemZISD::MERGE_LOW, 4,
4191 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4192 // VMRLH
4193 { SystemZISD::MERGE_LOW, 2,
4194 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4195 // VMRLB
4196 { SystemZISD::MERGE_LOW, 1,
4197 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4198 // VPKG
4199 { SystemZISD::PACK, 4,
4200 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4201 // VPKF
4202 { SystemZISD::PACK, 2,
4203 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4204 // VPKH
4205 { SystemZISD::PACK, 1,
4206 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4207 // VPDI V1, V2, 4 (low half of V1, high half of V2)
4208 { SystemZISD::PERMUTE_DWORDS, 4,
4209 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4210 // VPDI V1, V2, 1 (high half of V1, low half of V2)
4211 { SystemZISD::PERMUTE_DWORDS, 1,
4212 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4213};
4214
4215// Called after matching a vector shuffle against a particular pattern.
4216// Both the original shuffle and the pattern have two vector operands.
4217// OpNos[0] is the operand of the original shuffle that should be used for
4218// operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
4219// OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
4220// set OpNo0 and OpNo1 to the shuffle operands that should actually be used
4221// for operands 0 and 1 of the pattern.
4222static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {
4223 if (OpNos[0] < 0) {
4224 if (OpNos[1] < 0)
4225 return false;
4226 OpNo0 = OpNo1 = OpNos[1];
4227 } else if (OpNos[1] < 0) {
4228 OpNo0 = OpNo1 = OpNos[0];
4229 } else {
4230 OpNo0 = OpNos[0];
4231 OpNo1 = OpNos[1];
4232 }
4233 return true;
4234}
4235
4236// Bytes is a VPERM-like permute vector, except that -1 is used for
4237// undefined bytes. Return true if the VPERM can be implemented using P.
4238// When returning true set OpNo0 to the VPERM operand that should be
4239// used for operand 0 of P and likewise OpNo1 for operand 1 of P.
4240//
4241// For example, if swapping the VPERM operands allows P to match, OpNo0
4242// will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
4243// operand, but rewriting it to use two duplicated operands allows it to
4244// match P, then OpNo0 and OpNo1 will be the same.
4245static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
4246 unsigned &OpNo0, unsigned &OpNo1) {
4247 int OpNos[] = { -1, -1 };
4248 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
4249 int Elt = Bytes[I];
4250 if (Elt >= 0) {
4251 // Make sure that the two permute vectors use the same suboperand
4252 // byte number. Only the operand numbers (the high bits) are
4253 // allowed to differ.
4254 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
4255 return false;
4256 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes;
4257 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes;
4258 // Make sure that the operand mappings are consistent with previous
4259 // elements.
4260 if (OpNos[ModelOpNo] == 1 - RealOpNo)
4261 return false;
4262 OpNos[ModelOpNo] = RealOpNo;
4263 }
4264 }
4265 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
4266}
4267
4268// As above, but search for a matching permute.
4269static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
4270 unsigned &OpNo0, unsigned &OpNo1) {
4271 for (auto &P : PermuteForms)
4272 if (matchPermute(Bytes, P, OpNo0, OpNo1))
4273 return &P;
4274 return nullptr;
4275}
4276
4277// Bytes is a VPERM-like permute vector, except that -1 is used for
4278// undefined bytes. This permute is an operand of an outer permute.
4279// See whether redistributing the -1 bytes gives a shuffle that can be
4280// implemented using P. If so, set Transform to a VPERM-like permute vector
4281// that, when applied to the result of P, gives the original permute in Bytes.
4282static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
4283 const Permute &P,
4284 SmallVectorImpl<int> &Transform) {
4285 unsigned To = 0;
4286 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) {
4287 int Elt = Bytes[From];
4288 if (Elt < 0)
4289 // Byte number From of the result is undefined.
4290 Transform[From] = -1;
4291 else {
4292 while (P.Bytes[To] != Elt) {
4293 To += 1;
4294 if (To == SystemZ::VectorBytes)
4295 return false;
4296 }
4297 Transform[From] = To;
4298 }
4299 }
4300 return true;
4301}
4302
4303// As above, but search for a matching permute.
4304static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
4305 SmallVectorImpl<int> &Transform) {
4306 for (auto &P : PermuteForms)
4307 if (matchDoublePermute(Bytes, P, Transform))
4308 return &P;
4309 return nullptr;
4310}
4311
4312// Convert the mask of the given shuffle op into a byte-level mask,
4313// as if it had type vNi8.
4314static bool getVPermMask(SDValue ShuffleOp,
4315 SmallVectorImpl<int> &Bytes) {
4316 EVT VT = ShuffleOp.getValueType();
4317 unsigned NumElements = VT.getVectorNumElements();
4318 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4319
4320 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
4321 Bytes.resize(NumElements * BytesPerElement, -1);
4322 for (unsigned I = 0; I < NumElements; ++I) {
4323 int Index = VSN->getMaskElt(I);
4324 if (Index >= 0)
4325 for (unsigned J = 0; J < BytesPerElement; ++J)
4326 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
4327 }
4328 return true;
4329 }
4330 if (SystemZISD::SPLAT == ShuffleOp.getOpcode() &&
4331 isa<ConstantSDNode>(ShuffleOp.getOperand(1))) {
4332 unsigned Index = ShuffleOp.getConstantOperandVal(1);
4333 Bytes.resize(NumElements * BytesPerElement, -1);
4334 for (unsigned I = 0; I < NumElements; ++I)
4335 for (unsigned J = 0; J < BytesPerElement; ++J)
4336 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
4337 return true;
4338 }
4339 return false;
4340}
4341
4342// Bytes is a VPERM-like permute vector, except that -1 is used for
4343// undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
4344// the result come from a contiguous sequence of bytes from one input.
4345// Set Base to the selector for the first byte if so.
4346static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
4347 unsigned BytesPerElement, int &Base) {
4348 Base = -1;
4349 for (unsigned I = 0; I < BytesPerElement; ++I) {
4350 if (Bytes[Start + I] >= 0) {
4351 unsigned Elem = Bytes[Start + I];
4352 if (Base < 0) {
4353 Base = Elem - I;
4354 // Make sure the bytes would come from one input operand.
4355 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size())
4356 return false;
4357 } else if (unsigned(Base) != Elem - I)
4358 return false;
4359 }
4360 }
4361 return true;
4362}
4363
4364// Bytes is a VPERM-like permute vector, except that -1 is used for
4365// undefined bytes. Return true if it can be performed using VSLDI.
4366// When returning true, set StartIndex to the shift amount and OpNo0
4367// and OpNo1 to the VPERM operands that should be used as the first
4368// and second shift operand respectively.
4369static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
4370 unsigned &StartIndex, unsigned &OpNo0,
4371 unsigned &OpNo1) {
4372 int OpNos[] = { -1, -1 };
4373 int Shift = -1;
4374 for (unsigned I = 0; I < 16; ++I) {
4375 int Index = Bytes[I];
4376 if (Index >= 0) {
4377 int ExpectedShift = (Index - I) % SystemZ::VectorBytes;
4378 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes;
4379 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes;
4380 if (Shift < 0)
4381 Shift = ExpectedShift;
4382 else if (Shift != ExpectedShift)
4383 return false;
4384 // Make sure that the operand mappings are consistent with previous
4385 // elements.
4386 if (OpNos[ModelOpNo] == 1 - RealOpNo)
4387 return false;
4388 OpNos[ModelOpNo] = RealOpNo;
4389 }
4390 }
4391 StartIndex = Shift;
4392 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
4393}
4394
4395// Create a node that performs P on operands Op0 and Op1, casting the
4396// operands to the appropriate type. The type of the result is determined by P.
4397static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
4398 const Permute &P, SDValue Op0, SDValue Op1) {
4399 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
4400 // elements of a PACK are twice as wide as the outputs.
4401 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
4402 P.Opcode == SystemZISD::PACK ? P.Operand * 2 :
4403 P.Operand);
4404 // Cast both operands to the appropriate type.
4405 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8),
4406 SystemZ::VectorBytes / InBytes);
4407 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0);
4408 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);
4409 SDValue Op;
4410 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) {
4411 SDValue Op2 = DAG.getTargetConstant(P.Operand, DL, MVT::i32);
4412 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);
4413 } else if (P.Opcode == SystemZISD::PACK) {
4414 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8),
4415 SystemZ::VectorBytes / P.Operand);
4416 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1);
4417 } else {
4418 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1);
4419 }
4420 return Op;
4421}
4422
4423// Bytes is a VPERM-like permute vector, except that -1 is used for
4424// undefined bytes. Implement it on operands Ops[0] and Ops[1] using
4425// VSLDI or VPERM.
4426static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
4427 SDValue *Ops,
4428 const SmallVectorImpl<int> &Bytes) {
4429 for (unsigned I = 0; I < 2; ++I)
4430 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);
4431
4432 // First see whether VSLDI can be used.
4433 unsigned StartIndex, OpNo0, OpNo1;
4434 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))
4435 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0],
4436 Ops[OpNo1],
4437 DAG.getTargetConstant(StartIndex, DL, MVT::i32));
4438
4439 // Fall back on VPERM. Construct an SDNode for the permute vector.
4440 SDValue IndexNodes[SystemZ::VectorBytes];
4441 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4442 if (Bytes[I] >= 0)
4443 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
4444 else
4445 IndexNodes[I] = DAG.getUNDEF(MVT::i32);
4446 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
4447 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
4448}
4449
4450namespace {
4451// Describes a general N-operand vector shuffle.
4452struct GeneralShuffle {
4453 GeneralShuffle(EVT vt) : VT(vt) {}
4454 void addUndef();
4455 bool add(SDValue, unsigned);
4456 SDValue getNode(SelectionDAG &, const SDLoc &);
4457
4458 // The operands of the shuffle.
4459 SmallVector<SDValue, SystemZ::VectorBytes> Ops;
4460
4461 // Index I is -1 if byte I of the result is undefined. Otherwise the
4462 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
4463 // Bytes[I] / SystemZ::VectorBytes.
4464 SmallVector<int, SystemZ::VectorBytes> Bytes;
4465
4466 // The type of the shuffle result.
4467 EVT VT;
4468};
4469}
4470
4471// Add an extra undefined element to the shuffle.
4472void GeneralShuffle::addUndef() {
4473 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4474 for (unsigned I = 0; I < BytesPerElement; ++I)
4475 Bytes.push_back(-1);
4476}
4477
4478// Add an extra element to the shuffle, taking it from element Elem of Op.
4479// A null Op indicates a vector input whose value will be calculated later;
4480// there is at most one such input per shuffle and it always has the same
4481// type as the result. Aborts and returns false if the source vector elements
4482// of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
4483// LLVM they become implicitly extended, but this is rare and not optimized.
4484bool GeneralShuffle::add(SDValue Op, unsigned Elem) {
4485 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4486
4487 // The source vector can have wider elements than the result,
4488 // either through an explicit TRUNCATE or because of type legalization.
4489 // We want the least significant part.
4490 EVT FromVT = Op.getNode() ? Op.getValueType() : VT;
4491 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize();
4492
4493 // Return false if the source elements are smaller than their destination
4494 // elements.
4495 if (FromBytesPerElement < BytesPerElement)
4496 return false;
4497
4498 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes +
4499 (FromBytesPerElement - BytesPerElement));
4500
4501 // Look through things like shuffles and bitcasts.
4502 while (Op.getNode()) {
4503 if (Op.getOpcode() == ISD::BITCAST)
4504 Op = Op.getOperand(0);
4505 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) {
4506 // See whether the bytes we need come from a contiguous part of one
4507 // operand.
4508 SmallVector<int, SystemZ::VectorBytes> OpBytes;
4509 if (!getVPermMask(Op, OpBytes))
4510 break;
4511 int NewByte;
4512 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte))
4513 break;
4514 if (NewByte < 0) {
4515 addUndef();
4516 return true;
4517 }
4518 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
4519 Byte = unsigned(NewByte) % SystemZ::VectorBytes;
4520 } else if (Op.isUndef()) {
4521 addUndef();
4522 return true;
4523 } else
4524 break;
4525 }
4526
4527 // Make sure that the source of the extraction is in Ops.
4528 unsigned OpNo = 0;
4529 for (; OpNo < Ops.size(); ++OpNo)
4530 if (Ops[OpNo] == Op)
4531 break;
4532 if (OpNo == Ops.size())
4533 Ops.push_back(Op);
4534
4535 // Add the element to Bytes.
4536 unsigned Base = OpNo * SystemZ::VectorBytes + Byte;
4537 for (unsigned I = 0; I < BytesPerElement; ++I)
4538 Bytes.push_back(Base + I);
4539
4540 return true;
4541}
4542
4543// Return SDNodes for the completed shuffle.
4544SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
4545 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector")((Bytes.size() == SystemZ::VectorBytes && "Incomplete vector"
) ? static_cast<void> (0) : __assert_fail ("Bytes.size() == SystemZ::VectorBytes && \"Incomplete vector\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4545, __PRETTY_FUNCTION__))
;
4546
4547 if (Ops.size() == 0)
4548 return DAG.getUNDEF(VT);
4549
4550 // Make sure that there are at least two shuffle operands.
4551 if (Ops.size() == 1)
4552 Ops.push_back(DAG.getUNDEF(MVT::v16i8));
4553
4554 // Create a tree of shuffles, deferring root node until after the loop.
4555 // Try to redistribute the undefined elements of non-root nodes so that
4556 // the non-root shuffles match something like a pack or merge, then adjust
4557 // the parent node's permute vector to compensate for the new order.
4558 // Among other things, this copes with vectors like <2 x i16> that were
4559 // padded with undefined elements during type legalization.
4560 //
4561 // In the best case this redistribution will lead to the whole tree
4562 // using packs and merges. It should rarely be a loss in other cases.
4563 unsigned Stride = 1;
4564 for (; Stride * 2 < Ops.size(); Stride *= 2) {
4565 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
4566 SDValue SubOps[] = { Ops[I], Ops[I + Stride] };
4567
4568 // Create a mask for just these two operands.
4569 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes);
4570 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
4571 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes;
4572 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes;
4573 if (OpNo == I)
4574 NewBytes[J] = Byte;
4575 else if (OpNo == I + Stride)
4576 NewBytes[J] = SystemZ::VectorBytes + Byte;
4577 else
4578 NewBytes[J] = -1;
4579 }
4580 // See if it would be better to reorganize NewMask to avoid using VPERM.
4581 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes);
4582 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) {
4583 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]);
4584 // Applying NewBytesMap to Ops[I] gets back to NewBytes.
4585 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
4586 if (NewBytes[J] >= 0) {
4587 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&((unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
"Invalid double permute") ? static_cast<void> (0) : __assert_fail
("unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && \"Invalid double permute\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4588, __PRETTY_FUNCTION__))
4588 "Invalid double permute")((unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
"Invalid double permute") ? static_cast<void> (0) : __assert_fail
("unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && \"Invalid double permute\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4588, __PRETTY_FUNCTION__))
;
4589 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
4590 } else
4591 assert(NewBytesMap[J] < 0 && "Invalid double permute")((NewBytesMap[J] < 0 && "Invalid double permute") ?
static_cast<void> (0) : __assert_fail ("NewBytesMap[J] < 0 && \"Invalid double permute\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4591, __PRETTY_FUNCTION__))
;
4592 }
4593 } else {
4594 // Just use NewBytes on the operands.
4595 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes);
4596 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J)
4597 if (NewBytes[J] >= 0)
4598 Bytes[J] = I * SystemZ::VectorBytes + J;
4599 }
4600 }
4601 }
4602
4603 // Now we just have 2 inputs. Put the second operand in Ops[1].
4604 if (Stride > 1) {
4605 Ops[1] = Ops[Stride];
4606 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4607 if (Bytes[I] >= int(SystemZ::VectorBytes))
4608 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes;
4609 }
4610
4611 // Look for an instruction that can do the permute without resorting
4612 // to VPERM.
4613 unsigned OpNo0, OpNo1;
4614 SDValue Op;
4615 if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1))
4616 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]);
4617 else
4618 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes);
4619 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4620}
4621
4622// Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
4623static bool isScalarToVector(SDValue Op) {
4624 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
4625 if (!Op.getOperand(I).isUndef())
4626 return false;
4627 return true;
4628}
4629
4630// Return a vector of type VT that contains Value in the first element.
4631// The other elements don't matter.
4632static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4633 SDValue Value) {
4634 // If we have a constant, replicate it to all elements and let the
4635 // BUILD_VECTOR lowering take care of it.
4636 if (Value.getOpcode() == ISD::Constant ||
4637 Value.getOpcode() == ISD::ConstantFP) {
4638 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
4639 return DAG.getBuildVector(VT, DL, Ops);
4640 }
4641 if (Value.isUndef())
4642 return DAG.getUNDEF(VT);
4643 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
4644}
4645
4646// Return a vector of type VT in which Op0 is in element 0 and Op1 is in
4647// element 1. Used for cases in which replication is cheap.
4648static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4649 SDValue Op0, SDValue Op1) {
4650 if (Op0.isUndef()) {
4651 if (Op1.isUndef())
4652 return DAG.getUNDEF(VT);
4653 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
4654 }
4655 if (Op1.isUndef())
4656 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
4657 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
4658 buildScalarToVector(DAG, DL, VT, Op0),
4659 buildScalarToVector(DAG, DL, VT, Op1));
4660}
4661
4662// Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4663// vector for them.
4664static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
4665 SDValue Op1) {
4666 if (Op0.isUndef() && Op1.isUndef())
4667 return DAG.getUNDEF(MVT::v2i64);
4668 // If one of the two inputs is undefined then replicate the other one,
4669 // in order to avoid using another register unnecessarily.
4670 if (Op0.isUndef())
4671 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4672 else if (Op1.isUndef())
4673 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4674 else {
4675 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4676 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4677 }
4678 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1);
4679}
4680
4681// If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4682// better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4683// the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4684// would benefit from this representation and return it if so.
4685static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
4686 BuildVectorSDNode *BVN) {
4687 EVT VT = BVN->getValueType(0);
4688 unsigned NumElements = VT.getVectorNumElements();
4689
4690 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4691 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4692 // need a BUILD_VECTOR, add an additional placeholder operand for that
4693 // BUILD_VECTOR and store its operands in ResidueOps.
4694 GeneralShuffle GS(VT);
4695 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps;
4696 bool FoundOne = false;
4697 for (unsigned I = 0; I < NumElements; ++I) {
4698 SDValue Op = BVN->getOperand(I);
4699 if (Op.getOpcode() == ISD::TRUNCATE)
4700 Op = Op.getOperand(0);
4701 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4702 Op.getOperand(1).getOpcode() == ISD::Constant) {
4703 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4704 if (!GS.add(Op.getOperand(0), Elem))
4705 return SDValue();
4706 FoundOne = true;
4707 } else if (Op.isUndef()) {
4708 GS.addUndef();
4709 } else {
4710 if (!GS.add(SDValue(), ResidueOps.size()))
4711 return SDValue();
4712 ResidueOps.push_back(BVN->getOperand(I));
4713 }
4714 }
4715
4716 // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
4717 if (!FoundOne)
4718 return SDValue();
4719
4720 // Create the BUILD_VECTOR for the remaining elements, if any.
4721 if (!ResidueOps.empty()) {
4722 while (ResidueOps.size() < NumElements)
4723 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
4724 for (auto &Op : GS.Ops) {
4725 if (!Op.getNode()) {
4726 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
4727 break;
4728 }
4729 }
4730 }
4731 return GS.getNode(DAG, SDLoc(BVN));
4732}
4733
4734bool SystemZTargetLowering::isVectorElementLoad(SDValue Op) const {
4735 if (Op.getOpcode() == ISD::LOAD && cast<LoadSDNode>(Op)->isUnindexed())
4736 return true;
4737 if (Subtarget.hasVectorEnhancements2() && Op.getOpcode() == SystemZISD::LRV)
4738 return true;
4739 return false;
4740}
4741
4742// Combine GPR scalar values Elems into a vector of type VT.
4743SDValue
4744SystemZTargetLowering::buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4745 SmallVectorImpl<SDValue> &Elems) const {
4746 // See whether there is a single replicated value.
4747 SDValue Single;
4748 unsigned int NumElements = Elems.size();
4749 unsigned int Count = 0;
4750 for (auto Elem : Elems) {
4751 if (!Elem.isUndef()) {
4752 if (!Single.getNode())
4753 Single = Elem;
4754 else if (Elem != Single) {
4755 Single = SDValue();
4756 break;
4757 }
4758 Count += 1;
4759 }
4760 }
4761 // There are three cases here:
4762 //
4763 // - if the only defined element is a loaded one, the best sequence
4764 // is a replicating load.
4765 //
4766 // - otherwise, if the only defined element is an i64 value, we will
4767 // end up with the same VLVGP sequence regardless of whether we short-cut
4768 // for replication or fall through to the later code.
4769 //
4770 // - otherwise, if the only defined element is an i32 or smaller value,
4771 // we would need 2 instructions to replicate it: VLVGP followed by VREPx.
4772 // This is only a win if the single defined element is used more than once.
4773 // In other cases we're better off using a single VLVGx.
4774 if (Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
4775 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single);
4776
4777 // If all elements are loads, use VLREP/VLEs (below).
4778 bool AllLoads = true;
4779 for (auto Elem : Elems)
4780 if (!isVectorElementLoad(Elem)) {
4781 AllLoads = false;
4782 break;
4783 }
4784
4785 // The best way of building a v2i64 from two i64s is to use VLVGP.
4786 if (VT == MVT::v2i64 && !AllLoads)
4787 return joinDwords(DAG, DL, Elems[0], Elems[1]);
4788
4789 // Use a 64-bit merge high to combine two doubles.
4790 if (VT == MVT::v2f64 && !AllLoads)
4791 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4792
4793 // Build v4f32 values directly from the FPRs:
4794 //
4795 // <Axxx> <Bxxx> <Cxxxx> <Dxxx>
4796 // V V VMRHF
4797 // <ABxx> <CDxx>
4798 // V VMRHG
4799 // <ABCD>
4800 if (VT == MVT::v4f32 && !AllLoads) {
4801 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4802 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]);
4803 // Avoid unnecessary undefs by reusing the other operand.
4804 if (Op01.isUndef())
4805 Op01 = Op23;
4806 else if (Op23.isUndef())
4807 Op23 = Op01;
4808 // Merging identical replications is a no-op.
4809 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
4810 return Op01;
4811 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01);
4812 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23);
4813 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH,
4814 DL, MVT::v2i64, Op01, Op23);
4815 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4816 }
4817
4818 // Collect the constant terms.
4819 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue());
4820 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false);
4821
4822 unsigned NumConstants = 0;
4823 for (unsigned I = 0; I < NumElements; ++I) {
4824 SDValue Elem = Elems[I];
4825 if (Elem.getOpcode() == ISD::Constant ||
4826 Elem.getOpcode() == ISD::ConstantFP) {
4827 NumConstants += 1;
4828 Constants[I] = Elem;
4829 Done[I] = true;
4830 }
4831 }
4832 // If there was at least one constant, fill in the other elements of
4833 // Constants with undefs to get a full vector constant and use that
4834 // as the starting point.
4835 SDValue Result;
4836 SDValue ReplicatedVal;
4837 if (NumConstants > 0) {
4838 for (unsigned I = 0; I < NumElements; ++I)
4839 if (!Constants[I].getNode())
4840 Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
4841 Result = DAG.getBuildVector(VT, DL, Constants);
4842 } else {
4843 // Otherwise try to use VLREP or VLVGP to start the sequence in order to
4844 // avoid a false dependency on any previous contents of the vector
4845 // register.
4846
4847 // Use a VLREP if at least one element is a load. Make sure to replicate
4848 // the load with the most elements having its value.
4849 std::map<const SDNode*, unsigned> UseCounts;
4850 SDNode *LoadMaxUses = nullptr;
4851 for (unsigned I = 0; I < NumElements; ++I)
4852 if (isVectorElementLoad(Elems[I])) {
4853 SDNode *Ld = Elems[I].getNode();
4854 UseCounts[Ld]++;
4855 if (LoadMaxUses == nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
4856 LoadMaxUses = Ld;
4857 }
4858 if (LoadMaxUses != nullptr) {
4859 ReplicatedVal = SDValue(LoadMaxUses, 0);
4860 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal);
4861 } else {
4862 // Try to use VLVGP.
4863 unsigned I1 = NumElements / 2 - 1;
4864 unsigned I2 = NumElements - 1;
4865 bool Def1 = !Elems[I1].isUndef();
4866 bool Def2 = !Elems[I2].isUndef();
4867 if (Def1 || Def2) {
4868 SDValue Elem1 = Elems[Def1 ? I1 : I2];
4869 SDValue Elem2 = Elems[Def2 ? I2 : I1];
4870 Result = DAG.getNode(ISD::BITCAST, DL, VT,
4871 joinDwords(DAG, DL, Elem1, Elem2));
4872 Done[I1] = true;
4873 Done[I2] = true;
4874 } else
4875 Result = DAG.getUNDEF(VT);
4876 }
4877 }
4878
4879 // Use VLVGx to insert the other elements.
4880 for (unsigned I = 0; I < NumElements; ++I)
4881 if (!Done[I] && !Elems[I].isUndef() && Elems[I] != ReplicatedVal)
4882 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I],
4883 DAG.getConstant(I, DL, MVT::i32));
4884 return Result;
4885}
4886
4887SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
4888 SelectionDAG &DAG) const {
4889 auto *BVN = cast<BuildVectorSDNode>(Op.getNode());
4890 SDLoc DL(Op);
4891 EVT VT = Op.getValueType();
4892
4893 if (BVN->isConstant()) {
4894 if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget))
4895 return Op;
4896
4897 // Fall back to loading it from memory.
4898 return SDValue();
4899 }
4900
4901 // See if we should use shuffles to construct the vector from other vectors.
4902 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
4903 return Res;
4904
4905 // Detect SCALAR_TO_VECTOR conversions.
4906 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op))
4907 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0));
4908
4909 // Otherwise use buildVector to build the vector up from GPRs.
4910 unsigned NumElements = Op.getNumOperands();
4911 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements);
4912 for (unsigned I = 0; I < NumElements; ++I)
4913 Ops[I] = Op.getOperand(I);
4914 return buildVector(DAG, DL, VT, Ops);
4915}
4916
4917SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4918 SelectionDAG &DAG) const {
4919 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode());
4920 SDLoc DL(Op);
4921 EVT VT = Op.getValueType();
4922 unsigned NumElements = VT.getVectorNumElements();
4923
4924 if (VSN->isSplat()) {
4925 SDValue Op0 = Op.getOperand(0);
4926 unsigned Index = VSN->getSplatIndex();
4927 assert(Index < VT.getVectorNumElements() &&((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand"
) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4928, __PRETTY_FUNCTION__))
4928 "Splat index should be defined and in first operand")((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand"
) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4928, __PRETTY_FUNCTION__))
;
4929 // See whether the value we're splatting is directly available as a scalar.
4930 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
4931 Op0.getOpcode() == ISD::BUILD_VECTOR)
4932 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index));
4933 // Otherwise keep it as a vector-to-vector operation.
4934 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0),
4935 DAG.getTargetConstant(Index, DL, MVT::i32));
4936 }
4937
4938 GeneralShuffle GS(VT);
4939 for (unsigned I = 0; I < NumElements; ++I) {
4940 int Elt = VSN->getMaskElt(I);
4941 if (Elt < 0)
4942 GS.addUndef();
4943 else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements),
4944 unsigned(Elt) % NumElements))
4945 return SDValue();
4946 }
4947 return GS.getNode(DAG, SDLoc(VSN));
4948}
4949
4950SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
4951 SelectionDAG &DAG) const {
4952 SDLoc DL(Op);
4953 // Just insert the scalar into element 0 of an undefined vector.
4954 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
4955 Op.getValueType(), DAG.getUNDEF(Op.getValueType()),
4956 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32));
4957}
4958
4959SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4960 SelectionDAG &DAG) const {
4961 // Handle insertions of floating-point values.
4962 SDLoc DL(Op);
4963 SDValue Op0 = Op.getOperand(0);
4964 SDValue Op1 = Op.getOperand(1);
4965 SDValue Op2 = Op.getOperand(2);
4966 EVT VT = Op.getValueType();
4967
4968 // Insertions into constant indices of a v2f64 can be done using VPDI.
4969 // However, if the inserted value is a bitcast or a constant then it's
4970 // better to use GPRs, as below.
4971 if (VT == MVT::v2f64 &&
4972 Op1.getOpcode() != ISD::BITCAST &&
4973 Op1.getOpcode() != ISD::ConstantFP &&
4974 Op2.getOpcode() == ISD::Constant) {
4975 uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue();
4976 unsigned Mask = VT.getVectorNumElements() - 1;
4977 if (Index <= Mask)
4978 return Op;
4979 }
4980
4981 // Otherwise bitcast to the equivalent integer form and insert via a GPR.
4982 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
4983 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
4984 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
4985 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
4986 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2);
4987 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
4988}
4989
4990SDValue
4991SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4992 SelectionDAG &DAG) const {
4993 // Handle extractions of floating-point values.
4994 SDLoc DL(Op);
4995 SDValue Op0 = Op.getOperand(0);
4996 SDValue Op1 = Op.getOperand(1);
4997 EVT VT = Op.getValueType();
4998 EVT VecVT = Op0.getValueType();
4999
5000 // Extractions of constant indices can be done directly.
5001 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
5002 uint64_t Index = CIndexN->getZExtValue();
5003 unsigned Mask = VecVT.getVectorNumElements() - 1;
5004 if (Index <= Mask)
5005 return Op;
5006 }
5007
5008 // Otherwise bitcast to the equivalent integer form and extract via a GPR.
5009 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits());
5010 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements());
5011 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT,
5012 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1);
5013 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
5014}
5015
5016SDValue
5017SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG,
5018 unsigned UnpackHigh) const {
5019 SDValue PackedOp = Op.getOperand(0);
5020 EVT OutVT = Op.getValueType();
5021 EVT InVT = PackedOp.getValueType();
5022 unsigned ToBits = OutVT.getScalarSizeInBits();
5023 unsigned FromBits = InVT.getScalarSizeInBits();
5024 do {
5025 FromBits *= 2;
5026 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
5027 SystemZ::VectorBits / FromBits);
5028 PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp);
5029 } while (FromBits != ToBits);
5030 return PackedOp;
5031}
5032
5033SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
5034 unsigned ByScalar) const {
5035 // Look for cases where a vector shift can use the *_BY_SCALAR form.
5036 SDValue Op0 = Op.getOperand(0);
5037 SDValue Op1 = Op.getOperand(1);
5038 SDLoc DL(Op);
5039 EVT VT = Op.getValueType();
5040 unsigned ElemBitSize = VT.getScalarSizeInBits();
5041
5042 // See whether the shift vector is a splat represented as BUILD_VECTOR.
5043 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
5044 APInt SplatBits, SplatUndef;
5045 unsigned SplatBitSize;
5046 bool HasAnyUndefs;
5047 // Check for constant splats. Use ElemBitSize as the minimum element
5048 // width and reject splats that need wider elements.
5049 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
5050 ElemBitSize, true) &&
5051 SplatBitSize == ElemBitSize) {
5052 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff,
5053 DL, MVT::i32);
5054 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
5055 }
5056 // Check for variable splats.
5057 BitVector UndefElements;
5058 SDValue Splat = BVN->getSplatValue(&UndefElements);
5059 if (Splat) {
5060 // Since i32 is the smallest legal type, we either need a no-op
5061 // or a truncation.
5062 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat);
5063 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
5064 }
5065 }
5066
5067 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
5068 // and the shift amount is directly available in a GPR.
5069 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
5070 if (VSN->isSplat()) {
5071 SDValue VSNOp0 = VSN->getOperand(0);
5072 unsigned Index = VSN->getSplatIndex();
5073 assert(Index < VT.getVectorNumElements() &&((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand"
) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 5074, __PRETTY_FUNCTION__))
5074 "Splat index should be defined and in first operand")((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand"
) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\""
, "/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 5074, __PRETTY_FUNCTION__))
;
5075 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
5076 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) {
5077 // Since i32 is the smallest legal type, we either need a no-op
5078 // or a truncation.
5079 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
5080 VSNOp0.getOperand(Index));
5081 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
5082 }
5083 }
5084 }
5085
5086 // Otherwise just treat the current form as legal.
5087 return Op;
5088}
5089
5090SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
5091 SelectionDAG &DAG) const {
5092 switch (Op.getOpcode()) {
5093 case ISD::FRAMEADDR:
5094 return lowerFRAMEADDR(Op, DAG);
5095 case ISD::RETURNADDR:
5096 return lowerRETURNADDR(Op, DAG);
5097 case ISD::BR_CC:
5098 return lowerBR_CC(Op, DAG);
5099 case ISD::SELECT_CC:
5100 return lowerSELECT_CC(Op, DAG);
5101 case ISD::SETCC:
5102 return lowerSETCC(Op, DAG);
5103 case ISD::STRICT_FSETCC:
5104 return lowerSTRICT_FSETCC(Op, DAG, false);
5105 case ISD::STRICT_FSETCCS:
5106 return lowerSTRICT_FSETCC(Op, DAG, true);
5107 case ISD::GlobalAddress:
5108 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
5109 case ISD::GlobalTLSAddress:
5110 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
5111 case ISD::BlockAddress:
5112 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
5113 case ISD::JumpTable:
5114 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
5115 case ISD::ConstantPool:
5116 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
5117 case ISD::BITCAST:
5118 return lowerBITCAST(Op, DAG);
5119 case ISD::VASTART:
5120 return lowerVASTART(Op, DAG);
5121 case ISD::VACOPY:
5122 return lowerVACOPY(Op, DAG);
5123 case ISD::DYNAMIC_STACKALLOC:
5124 return lowerDYNAMIC_STACKALLOC(Op, DAG);
5125 case ISD::GET_DYNAMIC_AREA_OFFSET:
5126 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
5127 case ISD::SMUL_LOHI:
5128 return lowerSMUL_LOHI(Op, DAG);
5129 case ISD::UMUL_LOHI:
5130 return lowerUMUL_LOHI(Op, DAG);
5131 case ISD::SDIVREM:
5132 return lowerSDIVREM(Op, DAG);
5133 case ISD::UDIVREM:
5134 return lowerUDIVREM(Op, DAG);
5135 case ISD::SADDO:
5136 case ISD::SSUBO:
5137 case ISD::UADDO:
5138 case ISD::USUBO:
5139 return lowerXALUO(Op, DAG);
5140 case ISD::ADDCARRY:
5141 case ISD::SUBCARRY:
5142 return lowerADDSUBCARRY(Op, DAG);
5143 case ISD::OR:
5144 return lowerOR(Op, DAG);
5145 case ISD::CTPOP:
5146 return lowerCTPOP(Op, DAG);
5147 case ISD::ATOMIC_FENCE:
5148 return lowerATOMIC_FENCE(Op, DAG);
5149 case ISD::ATOMIC_SWAP:
5150 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
5151 case ISD::ATOMIC_STORE:
5152 return lowerATOMIC_STORE(Op, DAG);
5153 case ISD::ATOMIC_LOAD:
5154 return lowerATOMIC_LOAD(Op, DAG);
5155 case ISD::ATOMIC_LOAD_ADD:
5156 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
5157 case ISD::ATOMIC_LOAD_SUB:
5158 return lowerATOMIC_LOAD_SUB(Op, DAG);
5159 case ISD::ATOMIC_LOAD_AND:
5160 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
5161 case ISD::ATOMIC_LOAD_OR:
5162 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
5163 case ISD::ATOMIC_LOAD_XOR:
5164 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
5165 case ISD::ATOMIC_LOAD_NAND:
5166 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
5167 case ISD::ATOMIC_LOAD_MIN:
5168 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
5169 case ISD::ATOMIC_LOAD_MAX:
5170 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
5171 case ISD::ATOMIC_LOAD_UMIN:
5172 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
5173 case ISD::ATOMIC_LOAD_UMAX:
5174 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
5175 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
5176 return lowerATOMIC_CMP_SWAP(Op, DAG);
5177 case ISD::STACKSAVE:
5178 return lowerSTACKSAVE(Op, DAG);
5179 case ISD::STACKRESTORE:
5180 return lowerSTACKRESTORE(Op, DAG);
5181 case ISD::PREFETCH:
5182 return lowerPREFETCH(Op, DAG);
5183 case ISD::INTRINSIC_W_CHAIN:
5184 return lowerINTRINSIC_W_CHAIN(Op, DAG);
5185 case ISD::INTRINSIC_WO_CHAIN:
5186 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
5187 case ISD::BUILD_VECTOR:
5188 return lowerBUILD_VECTOR(Op, DAG);
5189 case ISD::VECTOR_SHUFFLE:
5190 return lowerVECTOR_SHUFFLE(Op, DAG);
5191 case ISD::SCALAR_TO_VECTOR:
5192 return lowerSCALAR_TO_VECTOR(Op, DAG);
5193 case ISD::INSERT_VECTOR_ELT:
5194 return lowerINSERT_VECTOR_ELT(Op, DAG);
5195 case ISD::EXTRACT_VECTOR_ELT:
5196 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
5197 case ISD::SIGN_EXTEND_VECTOR_INREG:
5198 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH);
5199 case ISD::ZERO_EXTEND_VECTOR_INREG:
5200 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH);
5201 case ISD::SHL:
5202 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR);
5203 case ISD::SRL:
5204 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR);
5205 case ISD::SRA:
5206 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR);
5207 default:
5208 llvm_unreachable("Unexpected node to lower")::llvm::llvm_unreachable_internal("Unexpected node to lower",
"/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 5208)
;
5209 }
5210}
5211
5212// Lower operations with invalid operand or result types (currently used
5213// only for 128-bit integer types).
5214
5215static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) {
5216 SDLoc DL(In);
5217 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
5218 DAG.getIntPtrConstant(0, DL));
5219 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
5220 DAG.getIntPtrConstant(1, DL));
5221 SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL,
5222 MVT::Untyped, Hi, Lo);
5223 return SDValue(Pair, 0);
5224}
5225
5226static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) {
5227 SDLoc DL(In);
5228 SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64,
5229 DL, MVT::i64, In);
5230 SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64,
5231 DL, MVT::i64, In);
5232 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi);
5233}
5234
5235void
5236SystemZTargetLowering::LowerOperationWrapper(SDNode *N,
5237 SmallVectorImpl<SDValue> &Results,
5238 SelectionDAG &DAG) const {
5239 switch (N->getOpcode()) {
5240 case ISD::ATOMIC_LOAD: {
5241 SDLoc DL(N);
5242 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other);
5243 SDValue Ops[] = { N->getOperand(0), N->getOperand(1) };
5244 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
5245 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128,
5246 DL, Tys, Ops, MVT::i128, MMO);
5247 Results.push_back(lowerGR128ToI128(DAG, Res));
5248 Results.push_back(Res.getValue(1));
5249 break;
5250 }
5251 case ISD::ATOMIC_STORE: {
5252 SDLoc DL(N);
5253 SDVTList Tys = DAG.getVTList(MVT::Other);
5254 SDValue Ops[] = { N->getOperand(0),
5255 lowerI128ToGR128(DAG, N->getOperand(2)),
5256 N->getOperand(1) };
5257 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
5258 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128,
5259 DL, Tys, Ops, MVT::i128, MMO);
5260 // We have to enforce sequential consistency by performing a
5261 // serialization operation after the store.
5262 if (cast<AtomicSDNode>(N)->getOrdering() ==
5263 AtomicOrdering::SequentiallyConsistent)
5264 Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL,
5265 MVT::Other, Res), 0);
5266 Results.push_back(Res);
5267 break;
5268 }
5269 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
5270 SDLoc DL(N);
5271 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other);
5272 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
5273 lowerI128ToGR128(DAG, N->getOperand(2)),
5274 lowerI128ToGR128(DAG, N->getOperand(3)) };
5275 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
5276 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128,
5277 DL, Tys, Ops, MVT::i128, MMO);
5278 SDValue Success = emitSETCC(DAG, DL, Res.getValue(1),
5279 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);
5280 Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1));
5281 Results.push_back(lowerGR128ToI128(DAG, Res));
5282 Results.push_back(Success);
5283 Results.push_back(Res.getValue(2));
5284 break;
5285 }
5286 default:
5287 llvm_unreachable("Unexpected node to lower")::llvm::llvm_unreachable_internal("Unexpected node to lower",
"/build/llvm-toolchain-snapshot-10~++20200112100611+7fa5290d5bd/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 5287)
;
5288 }
5289}
5290
5291void
5292SystemZTargetLowering::ReplaceNodeResults(SDNode *N,
5293 SmallVectorImpl<SDValue> &Results,
5294 SelectionDAG &DAG) const {
5295 return LowerOperationWrapper(N, Results, DAG);
5296}
5297
5298const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
5299#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
5300 switch ((SystemZISD::NodeType)Opcode) {
5301 case SystemZISD::FIRST_NUMBER: break;
5302 OPCODE(RET_FLAG);
5303 OPCODE(CALL);
5304 OPCODE(SIBCALL);
5305 OPCODE(TLS_GDCALL);
5306 OPCODE(TLS_LDCALL);
5307 OPCODE(PCREL_WRAPPER);
5308 OPCODE(PCREL_OFFSET);
5309 OPCODE(IABS);
5310 OPCODE(ICMP);
5311 OPCODE(FCMP);
5312 OPCODE(STRICT_FCMP);
5313 OPCODE(STRICT_FCMPS);
5314 OPCODE(TM);
5315 OPCODE(BR_CCMASK);
5316 OPCODE(SELECT_CCMASK);
5317 OPCODE(ADJDYNALLOC);
5318 OPCODE(POPCNT);
5319 OPCODE(SMUL_LOHI);
5320 OPCODE(UMUL_LOHI);
5321 OPCODE(SDIVREM);
5322 OPCODE(UDIVREM);
5323 OPCODE(SADDO);
5324 OPCODE(SSUBO);
5325 OPCODE(UADDO);
5326 OPCODE(USUBO);
5327 OPCODE(ADDCARRY);
5328 OPCODE(SUBCARRY);
5329 OPCODE(GET_CCMASK);
5330 OPCODE(MVC);
5331 OPCODE(MVC_LOOP);
5332 OPCODE(NC);
5333 OPCODE(NC_LOOP);
5334 OPCODE(OC);
5335 OPCODE(OC_LOOP);
5336 OPCODE(XC);
5337 OPCODE(XC_LOOP);
5338 OPCODE(CLC);
5339 OPCODE(CLC_LOOP);
5340 OPCODE(STPCPY);
5341 OPCODE(STRCMP);
5342 OPCODE(SEARCH_STRING);
5343 OPCODE(IPM);
5344 OPCODE(MEMBARRIER);
5345 OPCODE(TBEGIN);
5346 OPCODE(TBEGIN_NOFLOAT);
5347 OPCODE(TEND);
5348 OPCODE(BYTE_MASK);
5349 OPCODE(ROTATE_MASK);
5350 OPCODE(REPLICATE);
5351 OPCODE(JOIN_DWORDS);
5352 OPCODE(SPLAT);
5353 OPCODE(MERGE_HIGH);
5354 OPCODE(MERGE_LOW);
5355 OPCODE(SHL_DOUBLE);
5356 OPCODE(PERMUTE_DWORDS);
5357 OPCODE(PERMUTE);
5358 OPCODE(PACK);
5359 OPCODE(PACKS_CC);
5360 OPCODE(PACKLS_CC);
5361 OPCODE(UNPACK_HIGH);
5362 OPCODE(UNPACKL_HIGH);
5363 OPCODE(UNPACK_LOW);
5364 OPCODE(UNPACKL_LOW);
5365 OPCODE(VSHL_BY_SCALAR);
5366 OPCODE(VSRL_BY_SCALAR);
5367 OPCODE(VSRA_BY_SCALAR);
5368 OPCODE(VSUM);
5369 OPCODE(VICMPE);
5370 OPCODE(VICMPH);
5371 OPCODE(VICMPHL);
5372 OPCODE(VICMPES);
5373 OPCODE(VICMPHS);
5374 OPCODE(VICMPHLS);
5375 OPCODE(VFCMPE);
5376 OPCODE(STRICT_VFCMPE);
5377 OPCODE(STRICT_VFCMPES);
5378 OPCODE(VFCMPH);
5379 OPCODE(STRICT_VFCMPH);
5380 OPCODE(STRICT_VFCMPHS);
5381 OPCODE(VFCMPHE);
5382 OPCODE(STRICT_VFCMPHE);
5383 OPCODE(STRICT_VFCMPHES);
5384 OPCODE(VFCMPES);
5385 OPCODE(VFCMPHS);
5386 OPCODE(VFCMPHES);
5387 OPCODE(VFTCI);
5388 OPCODE(VEXTEND);
5389 OPCODE(STRICT_VEXTEND);
5390 OPCODE(VROUND);
5391 OPCODE(STRICT_VROUND);
5392 OPCODE(VTM);
5393 OPCODE(VFAE_CC);
5394 OPCODE(VFAEZ_CC);
5395 OPCODE(VFEE_CC);
5396 OPCODE(VFEEZ_CC);
5397 OPCODE(VFENE_CC);
5398 OPCODE(VFENEZ_CC);
5399 OPCODE(VISTR_CC);
5400 OPCODE(VSTRC_CC);
5401 OPCODE(VSTRCZ_CC);
5402 OPCODE(VSTRS_CC);
5403 OPCODE(VSTRSZ_CC);
5404 OPCODE(TDC);
5405 OPCODE(ATOMIC_SWAPW);
5406 OPCODE(ATOMIC_LOADW_ADD);
5407 OPCODE(ATOMIC_LOADW_SUB);
5408 OPCODE(ATOMIC_LOADW_AND);
5409 OPCODE(ATOMIC_LOADW_OR);
5410 OPCODE(ATOMIC_LOADW_XOR);
5411 OPCODE(ATOMIC_LOADW_NAND);
5412 OPCODE(ATOMIC_LOADW_MIN);
5413 OPCODE(ATOMIC_LOADW_MAX);
5414 OPCODE(ATOMIC_LOADW_UMIN);
5415 OPCODE(ATOMIC_LOADW_UMAX);
5416 OPCODE(ATOMIC_CMP_SWAPW);
5417 OPCODE(ATOMIC_CMP_SWAP);
5418 OPCODE(ATOMIC_LOAD_128);
5419 OPCODE(ATOMIC_STORE_128);
5420 OPCODE(ATOMIC_CMP_SWAP_128);
5421 OPCODE(LRV);
5422 OPCODE(STRV);
5423 OPCODE(VLER);
5424 OPCODE(VSTER);
5425 OPCODE(PREFETCH);
5426 }
5427 return nullptr;
5428#undef OPCODE
5429}
5430
5431// Return true if VT is a vector whose elements are a whole number of bytes
5432// in width. Also check for presence of vector support.
5433bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const {
5434 if (!Subtarget.hasVector())
5435 return false;
5436
5437 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple();
5438}
5439
5440// Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
5441// producing a result of type ResVT. Op is a possibly bitcast version
5442// of the input vector and Index is the index (based on type VecVT) that
5443// should be extracted. Return the new extraction if a simplification
5444// was possible or if Force is true.
5445SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
5446 EVT VecVT, SDValue Op,
5447 unsigned Index,
5448 DAGCombinerInfo &DCI,
5449 bool Force) const {
5450 SelectionDAG &DAG = DCI.DAG;
5451
5452 // The number of bytes being extracted.
5453 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
5454
5455 for (;;) {
5456 unsigned Opcode = Op.getOpcode();
5457 if (Opcode == ISD::BITCAST)
5458 // Look through bitcasts.
5459 Op = Op.getOperand(0);
5460 else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) &&
5461 canTreatAsByteVector(Op.getValueType())) {
5462 // Get a VPERM-like permute mask and see whether the bytes covered
5463 // by the extracted element are a contiguous sequence from one
5464 // source operand.
5465 SmallVector<int, SystemZ::VectorBytes> Bytes;
5466 if (!getVPermMask(Op, Bytes))
5467 break;
5468 int First;
5469 if (!getShuffleInput(Bytes, Index * BytesPerElement,
5470 BytesPerElement, First))
5471 break;
5472 if (First < 0)
5473 return DAG.getUNDEF(ResVT);
5474 // Make sure the contiguous sequence starts at a multiple of the
5475 // original element size.
5476 unsigned Byte = unsigned(First) % Bytes.size();
5477 if (Byte % BytesPerElement != 0)
5478 break;
5479 // We can get the extracted value directly from an input.
5480 Index = Byte / BytesPerElement;
5481 Op = Op.getOperand(unsigned(First) / Bytes.size());
5482 Force = true;
5483 } else if (Opcode == ISD::BUILD_VECTOR &&
5484 canTreatAsByteVector(Op.getValueType())) {
5485 // We can only optimize this case if the BUILD_VECTOR elements are
5486 // at least as wide as the extracted value.
5487 EVT OpVT = Op.getValueType();
5488 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
5489 if (OpBytesPerElement < BytesPerElement)
5490 break;
5491 // Make sure that the least-significant bit of the extracted value
5492 // is the least significant bit of an input.
5493 unsigned End = (Index + 1) * BytesPerElement;
5494 if (End % OpBytesPerElement != 0)
5495 break;
5496 // We're extracting the low part of one operand of the BUILD_VECTOR.
5497 Op = Op.getOperand(End / OpBytesPerElement - 1);
5498 if (!Op.getValueType().isInteger()) {
5499 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());
5500 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
5501 DCI.AddToWorklist(Op.getNode());
5502 }
5503 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits());
5504 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
5505 if (VT != ResVT) {
5506 DCI.AddToWorklist(Op.getNode());
5507 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op);
5508 }
5509 return Op;
5510 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
5511 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
5512 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
5513 canTreatAsByteVector(Op.getValueType()) &&
5514 canTreatAsByteVector(Op.getOperand(0).getValueType())) {
5515 // Make sure that only the unextended bits are significant.
5516 EVT ExtVT = Op.getValueType();
5517 EVT OpVT = Op.getOperand(0).getValueType();
5518 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize();
5519 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
5520 unsigned Byte = Index * BytesPerElement;
5521 unsigned SubByte = Byte % ExtBytesPerElement;
5522 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
5523 if (SubByte < MinSubByte ||
5524 SubByte + BytesPerElement > ExtBytesPerElement)
5525 break;
5526 // Get the byte offset of the unextended element
5527 Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
5528 // ...then add the byte offset relative to that element.
5529 Byte += SubByte - MinSubByte;
5530 if (Byte % BytesPerElement != 0)
5531 break;
5532 Op = Op.getOperand(0);
5533 Index = Byte / BytesPerElement;
5534 Force = true;
5535 } else
5536 break;
5537 }
5538 if (Force) {
5539 if (Op.getValueType() != VecVT) {
5540 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op);
5541 DCI.AddToWorklist(Op.getNode());
5542 }
5543 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op,
5544 DAG.getConstant(Index, DL, MVT::i32));
5545 }
5546 return SDValue();
5547}
5548
5549// Optimize vector operations in scalar value Op on the basis that Op
5550// is truncated to TruncVT.
5551SDValue SystemZTargetLowering::combineTruncateExtract(
5552 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {
5553 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
5554 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
5555 // of type TruncVT.
5556 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5557 TruncVT.getSizeInBits() % 8 == 0) {
5558 SDValue Vec = Op.getOperand(0);
5559 EVT VecVT = Vec.getValueType();
5560 if (canTreatAsByteVector(VecVT)) {
5561 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
5562 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
5563 unsigned TruncBytes = TruncVT.getStoreSize();
5564 if (BytesPerElement % TruncBytes == 0) {
5565 // Calculate the value of Y' in the above description. We are
5566 // splitting the original elements into Scale equal-sized pieces
5567 // and for truncation purposes want the last (least-significant)
5568 // of these pieces for IndexN. This is easiest to do by calculating
5569 // the start index of the following element and then subtracting 1.
5570 unsigned Scale = BytesPerElement / TruncBytes;
5571 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
5572
5573 // Defer the creation of the bitcast from X to combineExtract,
5574 // which might be able to optimize the extraction.
5575 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8),
5576 VecVT.getStoreSize() / TruncBytes);
5577 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
5578 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true);
5579 }
5580 }
5581 }
5582 }
5583 return SDValue();
5584}
5585
5586SDValue SystemZTargetLowering::combineZERO_EXTEND(
5587 SDNode *N, DAGCombinerInfo &DCI) const {
5588 // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2')
5589 SelectionDAG &DAG = DCI.DAG;
5590 SDValue N0 = N->getOperand(0);
5591 EVT VT = N->getValueType(0);
5592 if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) {
5593 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0));
5594 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5595 if (TrueOp && FalseOp) {
5596 SDLoc DL(N0);
5597 SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT),
5598 DAG.getConstant(FalseOp->getZExtValue(), DL, VT),
5599 N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) };
5600 SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops);
5601 // If N0 has multiple uses, change other uses as well.
5602 if (!N0.hasOneUse()) {
5603 SDValue TruncSelect =
5604 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect);
5605 DCI.CombineTo(N0.getNode(), TruncSelect);
5606 }
5607 return NewSelect;
5608 }
5609 }
5610 return SDValue();
5611}
5612
5613SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
5614 SDNode *N, DAGCombinerInfo &DCI) const {
5615 // Convert (sext_in_reg (setcc LHS, RHS, COND), i1)
5616 // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1)
5617 // into (select_cc LHS, RHS, -1, 0, COND)
5618 SelectionDAG &DAG = DCI.DAG;
5619 SDValue N0 = N->getOperand(0);
5620 EVT VT = N->getValueType(0);
5621 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
5622 if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND)
5623 N0 = N0.getOperand(0);
5624 if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) {
5625 SDLoc DL(N0);
5626 SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1),
5627 DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT),
5628 N0.getOperand(2) };
5629 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
5630 }
5631 return SDValue();
5632}
5633
5634SDValue SystemZTargetLowering::combineSIGN_EXTEND(
5635 SDNode *N, DAGCombinerInfo &DCI) const {
5636 // Convert (sext (ashr (shl X, C1), C2)) to
5637 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
5638 // cheap as narrower ones.
5639 SelectionDAG &DAG = DCI.DAG;
5640 SDValue N0 = N->getOperand(0);
5641 EVT VT = N->getValueType(0);
5642 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
5643 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5644 SDValue Inner = N0.getOperand(0);
5645 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
5646 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
5647 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());
5648 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
5649 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
5650 EVT ShiftVT = N0.getOperand(1).getValueType();
5651 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
5652 Inner.getOperand(0));
5653 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
5654 DAG.getConstant(NewShlAmt, SDLoc(Inner),
5655 ShiftVT));
5656 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
5657 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
5658 }
5659 }
5660 }
5661 return SDValue();
5662}
5663
5664SDValue SystemZTargetLowering::combineMERGE(
5665 SDNode *N, DAGCombinerInfo &DCI) const {
5666 SelectionDAG &DAG = DCI.DAG;
5667 unsigned Opcode = N->getOpcode();
5668 SDValue Op0 = N->getOperand(0);
5669 SDValue Op1 = N->getOperand(1);
5670 if (Op0.getOpcode() == ISD::BITCAST)
5671 Op0 = Op0.getOperand(0);
5672 if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
5673 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
5674 // for v4f32.
5675 if (Op1 == N->getOperand(0))
5676 return Op1;
5677 // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
5678 EVT VT = Op1.getValueType();
5679 unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
5680 if (ElemBytes <= 4) {
5681 Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
5682 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
5683 EVT InVT = VT.changeVectorElementTypeToInteger();
5684 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
5685 SystemZ::VectorBytes / ElemBytes / 2);
5686 if (VT != InVT) {
5687 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
5688 DCI.AddToWorklist(Op1.getNode());
5689 }
5690 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
5691 DCI.AddToWorklist(Op.getNode());
5692 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
5693 }
5694 }
5695 return SDValue();
5696}
5697
5698SDValue SystemZTargetLowering::combineLOAD(
5699 SDNode *N, DAGCombinerInfo &DCI) const {
5700 SelectionDAG &DAG = DCI.DAG;
5701 EVT LdVT = N->getValueType(0);
5702 if (LdVT.isVector() || LdVT.isInteger())
5703 return SDValue();
5704 // Transform a scalar load that is REPLICATEd as well as having other
5705 // use(s) to the form where the other use(s) use the first element of the
5706 // REPLICATE instead of the load. Otherwise instruction selection will not
5707 // produce a VLREP. Avoid extracting to a GPR, so only do this for floating
5708 // point loads.
5709
5710 SDValue Replicate;
5711 SmallVector<SDNode*, 8> OtherUses;
5712 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5713 UI != UE; ++UI) {
5714 if (UI->getOpcode() == SystemZISD::REPLICATE) {
5715 if (Replicate)
5716 return SDValue(); // Should never happen
5717 Replicate = SDValue(*UI, 0);
5718 }
5719 else if (UI.getUse().getResNo() == 0)
5720 OtherUses.push_back(*UI);
5721 }
5722 if (!Replicate || OtherUses.empty())
5723 return SDValue();
5724
5725 SDLoc DL(N);
5726 SDValue Extract0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, LdVT,
5727 Replicate, DAG.getConstant(0, DL, MVT::i32));
5728 // Update uses of the loaded Value while preserving old chains.
5729 for (SDNode *U : OtherUses) {
5730 SmallVector<SDValue, 8> Ops;
5731 for (SDValue Op : U->ops())
5732 Ops.push_back((Op.getNode() == N && Op.getResNo() == 0) ? Extract0 : Op);
5733 DAG.UpdateNodeOperands(U, Ops);
5734 }
5735 return SDValue(N, 0);
5736}
5737
5738bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT) const {
5739 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
5740 return true;
5741 if (Subtarget.hasVectorEnhancements2())
5742 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64)
5743 return true;
5744 return false;
5745}
5746
5747static bool isVectorElementSwap(ArrayRef<int> M, EVT VT) {
5748 if (!VT.isVector() || !VT.isSimple() ||
5749 VT.getSizeInBits() != 128 ||
5750 VT.getScalarSizeInBits() % 8 != 0)
5751 return false;
5752
5753 unsigned NumElts = VT.getVectorNumElements();
5754 for (unsigned i = 0; i < NumElts; ++i) {
5755 if (M[i] < 0) continue; // ignore UNDEF indices
5756 if ((unsigned) M[i] != NumElts - 1 - i)
5757 return false;
5758 }
5759
5760 return true;
5761}
5762
5763SDValue SystemZTargetLowering::combineSTORE(
5764 SDNode *N, DAGCombinerInfo &DCI) const {
5765 SelectionDAG &DAG = DCI.DAG;
5766 auto *SN = cast<StoreSDNode>(N);
5767 auto &Op1 = N->getOperand(1);
5768 EVT MemVT = SN->getMemoryVT();
5769 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
5770 // for the extraction to be done on a vMiN value, so that we can use VSTE.
5771 // If X has wider elements then convert it to:
5772 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
5773 if (MemVT.isInteger() && SN->isTruncatingStore()) {
5774 if (SDValue Value =
5775 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
5776 DCI.AddToWorklist(Value.getNode());
5777
5778 // Rewrite the store with the new form of stored value.
5779 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
5780 SN->getBasePtr(), SN->getMemoryVT(),
5781 SN->getMemOperand());
5782 }
5783 }
5784 // Combine STORE (BSWAP) into STRVH/STRV/STRVG/VSTBR
5785 if (!SN->isTruncatingStore() &&
5786 Op1.getOpcode() == ISD::BSWAP &&
5787 Op1.getNode()->hasOneUse() &&
5788 canLoadStoreByteSwapped(Op1.getValueType())) {
5789
5790 SDValue BSwapOp = Op1.getOperand(0);
5791
5792 if (BSwapOp.getValueType() == MVT::i16)
5793 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp);
5794
5795 SDValue Ops[] = {
5796 N->getOperand(0), BSwapOp, N->getOperand(2)
5797 };
5798
5799 return
5800 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other),
5801 Ops, MemVT, SN->getMemOperand());
5802 }
5803 // Combine STORE (element-swap) into VSTER
5804 if (!SN->isTruncatingStore() &&
5805 Op1.getOpcode() == ISD::VECTOR_SHUFFLE &&
5806 Op1.getNode()->hasOneUse() &&
5807 Subtarget.hasVectorEnhancements2()) {
5808 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op1.getNode());
5809 ArrayRef<int> ShuffleMask = SVN->getMask();
5810 if (isVectorElementSwap(ShuffleMask, Op1.getValueType())) {
5811 SDValue Ops[] = {
5812 N->getOperand(0), Op1.getOperand(0), N->getOperand(2)
5813 };
5814
5815 return DAG.getMemIntrinsicNode(SystemZISD::VSTER, SDLoc(N),
5816 DAG.getVTList(MVT::Other),
5817 Ops, MemVT, SN->getMemOperand());
5818 }
5819 }
5820
5821 return SDValue();
5822}
5823
5824SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
5825 SDNode *N, DAGCombinerInfo &DCI) const {
5826 SelectionDAG &DAG = DCI.DAG;
5827 // Combine element-swap (LOAD) into VLER
5828 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
5829 N->getOperand(0).hasOneUse() &&
5830 Subtarget.hasVectorEnhancements2()) {
5831 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
5832 ArrayRef<int> ShuffleMask = SVN->getMask();
5833 if (isVectorElementSwap(ShuffleMask, N->getValueType(0))) {
5834 SDValue Load = N->getOperand(0);
5835 LoadSDNode *LD = cast<LoadSDNode>(Load);
5836
5837 // Create the element-swapping load.
5838 SDValue Ops[] = {
5839 LD->getChain(), // Chain
5840 LD->getBasePtr() // Ptr
5841 };
5842 SDValue ESLoad =
5843 DAG.getMemIntrinsicNode(SystemZISD::VLER, SDLoc(N),
5844 DAG.getVTList(LD->getValueType(0), MVT::Other),
5845 Ops, LD->getMemoryVT(), LD->getMemOperand());
5846
5847 // First, combine the VECTOR_SHUFFLE away. This makes the value produced
5848 // by the load dead.
5849 DCI.CombineTo(N, ESLoad);
5850
5851 // Next, combine the load away, we give it a bogus result value but a real
5852 // chain result. The result value is dead because the shuffle is dead.
5853 DCI.CombineTo(Load.getNode(), ESLoad, ESLoad.getValue(1));
5854
5855 // Return N so it doesn't get rechecked!
5856 return SDValue(N, 0);
5857 }
5858 }
5859
5860 return SDValue();
5861}
5862
5863SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
5864 SDNode *N, DAGCombinerInfo &DCI) const {
5865 SelectionDAG &DAG = DCI.DAG;
5866
5867 if (!Subtarget.hasVector())
5868 return SDValue();
5869
5870 // Look through bitcasts that retain the number of vector elements.
5871 SDValue Op = N->getOperand(0);
5872 if (Op.getOpcode() == ISD::BITCAST &&
5873 Op.getValueType().isVector() &&
5874 Op.getOperand(0).getValueType().isVector() &&
5875 Op.getValueType().getVectorNumElements() ==
5876 Op.getOperand(0).getValueType().getVectorNumElements())
5877 Op = Op.getOperand(0);
5878
5879 // Pull BSWAP out of a vector extraction.
5880 if (Op.getOpcode() == ISD::BSWAP && Op.hasOneUse()) {
5881 EVT VecVT = Op.getValueType();
5882 EVT EltVT = VecVT.getVectorElementType();
5883 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), EltVT,
5884 Op.getOperand(0), N->getOperand(1));
5885 DCI.AddToWorklist(Op.getNode());
5886 Op = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Op);
5887 if (EltVT != N->getValueType(0)) {
5888 DCI.AddToWorklist(Op.getNode());
5889 Op = DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Op);
5890 }
5891 return Op;
5892 }
5893
5894 // Try to simplify a vector extraction.
5895 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
5896 SDValue Op0 = N->getOperand(0);
5897 EVT VecVT = Op0.getValueType();
5898 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
5899 IndexN->getZExtValue(), DCI, false);
5900 }
5901 return SDValue();
5902}
5903
5904SDValue SystemZTargetLowering::combineJOIN_DWORDS(
5905 SDNode *N, DAGCombinerInfo &DCI) const {
5906 SelectionDAG &DAG = DCI.DAG;
5907 // (join_dwords X, X) == (replicate X)
5908 if (N->getOperand(0) == N->getOperand(1))
5909 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0),
5910 N->getOperand(0));
5911 return SDValue();
5912}
5913
5914static SDValue MergeInputChains(SDNode *N1, SDNode *N2) {
5915 SDValue Chain1 = N1->getOperand(0);
5916 SDValue Chain2 = N2->getOperand(0);
5917
5918 // Trivial case: both nodes take the same chain.
5919 if (Chain1 == Chain2)
5920 return Chain1;
5921
5922 // FIXME - we could handle more complex cases via TokenFactor,
5923 // assuming we can verify that this would not create a cycle.
5924 return SDValue();
5925}
5926
5927SDValue SystemZTargetLowering::combineFP_ROUND(
5928 SDNode *N, DAGCombinerInfo &DCI) const {
5929
5930 if (!Subtarget.hasVector())
5931 return SDValue();
5932
5933 // (fpround (extract_vector_elt X 0))
5934 // (fpround (extract_vector_elt X 1)) ->
5935 // (extract_vector_elt (VROUND X) 0)
5936 // (extract_vector_elt (VROUND X) 2)
5937 //
5938 // This is a special case since the target doesn't really support v2f32s.
5939 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0;
5940 SelectionDAG &DAG = DCI.DAG;
5941 SDValue Op0 = N->getOperand(OpNo);
5942 if (N->getValueType(0) == MVT::f32 &&
5943 Op0.hasOneUse() &&
5944 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5945 Op0.getOperand(0).getValueType() == MVT::v2f64 &&
5946 Op0.getOperand(1).getOpcode() == ISD::Constant &&
5947 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
5948 SDValue Vec = Op0.getOperand(0);
5949 for (auto *U : Vec->uses()) {
5950 if (U != Op0.getNode() &&
5951 U->hasOneUse() &&
5952 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5953 U->getOperand(0) == Vec &&
5954 U->getOperand(1).getOpcode() == ISD::Constant &&
5955 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
5956 SDValue OtherRound = SDValue(*U->use_begin(), 0);
5957 if (OtherRound.getOpcode() == N->getOpcode() &&
5958 OtherRound.getOperand(OpNo) == SDValue(U, 0) &&
5959 OtherRound.getValueType() == MVT::f32) {
5960 SDValue VRound, Chain;
5961 if (N->isStrictFPOpcode()) {
5962 Chain = MergeInputChains(N, OtherRound.getNode());
5963 if (!Chain)
5964 continue;
5965 VRound = DAG.getNode(SystemZISD::STRICT_VROUND, SDLoc(N),
5966 {MVT::v4f32, MVT::Other}, {Chain, Vec});
5967 Chain = VRound.getValue(1);
5968 } else
5969 VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
5970 MVT::v4f32, Vec);
5971 DCI.AddToWorklist(VRound.getNode());
5972 SDValue Extract1 =
5973 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
5974 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
5975 DCI.AddToWorklist(Extract1.getNode());
5976 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
5977 if (Chain)
5978 DAG.ReplaceAllUsesOfValueWith(OtherRound.getValue(1), Chain);
5979 SDValue Extract0 =
5980 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
5981 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
5982 if (Chain)
5983 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op0),
5984 N->getVTList(), Extract0, Chain);
5985 return Extract0;
5986 }
5987 }
5988 }
5989 }
5990 return SDValue();
5991}
5992
5993SDValue SystemZTargetLowering::combineFP_EXTEND(
5994 SDNode *N, DAGCombinerInfo &DCI) const {
5995
5996 if (!Subtarget.hasVector())
5997 return SDValue();
5998
5999 // (fpextend (extract_vector_elt X 0))
6000 // (fpextend (extract_vector_elt X 2)) ->
6001 // (extract_vector_elt (VEXTEND X) 0)
6002 // (extract_vector_elt (VEXTEND X) 1)
6003 //
6004 // This is a special case since the target doesn't really support v2f32s.
6005 unsigned OpNo = N->isStrictFPOpcode() ? 1 : 0;
6006 SelectionDAG &DAG = DCI.DAG;
6007 SDValue Op0 = N->getOperand(OpNo);
6008 if (N->getValueType(0) == MVT::f64 &&
6009 Op0.hasOneUse() &&
6010 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6011 Op0.getOperand(0).getValueType() == MVT::v4f32 &&
6012 Op0.getOperand(1).getOpcode() == ISD::Constant &&
6013 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
6014 SDValue Vec = Op0.getOperand(0);
6015 for (auto *U : Vec->uses()) {
6016 if (U != Op0.getNode() &&
6017 U->hasOneUse() &&
6018 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6019 U->getOperand(0) == Vec &&
6020 U->getOperand(1).getOpcode() == ISD::Constant &&
6021 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) {
6022 SDValue OtherExtend = SDValue(*U->use_begin(), 0);
6023 if (OtherExtend.getOpcode() == N->getOpcode() &&
6024 OtherExtend.getOperand(OpNo) == SDValue(U, 0) &&
6025 OtherExtend.getValueType() == MVT::f64) {
6026 SDValue VExtend, Chain;
6027 if (N->isStrictFPOpcode()) {
6028 Chain = MergeInputChains(N, OtherExtend.getNode());
6029 if (!Chain)
6030 continue;
6031 VExtend = DAG.getNode(SystemZISD::STRICT_VEXTEND, SDLoc(N),
6032 {MVT::v2f64, MVT::Other}, {Chain, Vec});
6033 Chain = VExtend.getValue(1);
6034 } else
6035 VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N),
6036 MVT::v2f64, Vec);
6037 DCI.AddToWorklist(VExtend.getNode());
6038 SDValue Extract1 =
6039 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64,
6040 VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32));
6041 DCI.AddToWorklist(Extract1.getNode());
6042 DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1);
6043 if (Chain)
6044 DAG.ReplaceAllUsesOfValueWith(OtherExtend.getValue(1), Chain);
6045 SDValue Extract0 =
6046 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64,
6047 VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
6048 if (Chain)
6049 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op0),
6050 N->getVTList(), Extract0, Chain);
6051 return Extract0;
6052 }
6053 }
6054 }
6055 }
6056 return SDValue();
6057}
6058
6059SDValue SystemZTargetLowering::combineBSWAP(
6060 SDNode *N, DAGCombinerInfo &DCI) const {
6061 SelectionDAG &DAG = DCI.DAG;
6062 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG/VLBR
6063 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
6064 N->getOperand(0).hasOneUse() &&
6065 canLoadStoreByteSwapped(N->getValueType(0))) {
6066 SDValue Load = N->getOperand(0);
6067 LoadSDNode *LD = cast<LoadSDNode>(Load);
6068
6069 // Create the byte-swapping load.
6070 SDValue Ops[] = {
6071 LD->getChain(), // Chain
6072 LD->getBasePtr() // Ptr
6073 };
6074 EVT LoadVT = N->getValueType(0);
6075 if (LoadVT == MVT::i16)
6076 LoadVT = MVT::i32;
6077 SDValue BSLoad =
6078 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N),
6079 DAG.getVTList(LoadVT, MVT::Other),
6080 Ops, LD->getMemoryVT(), LD->getMemOperand());
6081
6082 // If this is an i16 load, insert the truncate.
6083 SDValue ResVal = BSLoad;
6084 if (N->getValueType(0) == MVT::i16)
6085 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad);
6086
6087 // First, combine the bswap away. This makes the value produced by the
6088 // load dead.
6089 DCI.CombineTo(N, ResVal);
6090
6091 // Next, combine the load away, we give it a bogus result value but a real
6092 // chain result. The result value is dead because the bswap is dead.
6093 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
6094
6095 // Return N so it doesn't get rechecked!
6096 return SDValue(N, 0);
6097 }
6098
6099 // Look through bitcasts that retain the number of vector elements.
6100 SDValue Op = N->getOperand(0);
6101 if (Op.getOpcode() == ISD::BITCAST &&
6102 Op.getValueType().isVector() &&
6103 Op.getOperand(0).getValueType().isVector() &&
6104 Op.getValueType().getVectorNumElements() ==
6105 Op.getOperand(0).getValueType().getVectorNumElements())
6106 Op = Op.getOperand(0);
6107
6108 // Push BSWAP into a vector insertion if at least one side then simplifies.
6109 if (Op.getOpcode() == ISD::INSERT_VECTOR_ELT && Op.hasOneUse()) {
6110 SDValue Vec = Op.getOperand(0);
6111 SDValue Elt = Op.getOperand(1);
6112 SDValue Idx = Op.getOperand(2);
6113
6114 if (DAG.isConstantIntBuildVectorOrConstantInt(Vec) ||
6115 Vec.getOpcode() == ISD::BSWAP || Vec.isUndef() ||
6116 DAG.isConstantIntBuildVectorOrConstantInt(Elt) ||
6117 Elt.getOpcode() == ISD::BSWAP || Elt.isUndef() ||
6118 (canLoadStoreByteSwapped(N->getValueType(0)) &&
6119 ISD::isNON_EXTLoad(Elt.getNode()) && Elt.hasOneUse())) {
6120 EVT VecVT = N->getValueType(0);
6121 EVT EltVT = N->getValueType(0).getVectorElementType();
6122 if (VecVT != Vec.getValueType()) {
6123 Vec = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Vec);
6124 DCI.AddToWorklist(Vec.getNode());
6125 }
6126 if (EltVT != Elt.getValueType()) {
6127 Elt = DAG.getNode(ISD::BITCAST, SDLoc(N), EltVT, Elt);
6128 DCI.AddToWorklist(Elt.getNode());
6129 }
6130 Vec = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Vec);
6131 DCI.AddToWorklist(Vec.getNode());
6132 Elt = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Elt);
6133 DCI.AddToWorklist(Elt.getNode());
6134 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VecVT,
6135 Vec, Elt, Idx);
6136 }
6137 }
6138
6139 // Push BSWAP into a vector shuffle if at least one side then simplifies.
6140 ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(Op);
6141 if (SV && Op.hasOneUse()) {
6142 SDValue Op0 = Op.getOperand(0);
6143 SDValue Op1 = Op.getOperand(1);
6144
6145 if (DAG.isConstantIntBuildVector