Bug Summary

File:llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Warning:line 1163, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name SystemZISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -fno-rounding-math -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Target/SystemZ -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/include -I /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/build-llvm/lib/Target/SystemZ -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-12-11-181444-25759-1 -x c++ /build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp

/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp

1//===-- SystemZISelLowering.cpp - SystemZ DAG lowering implementation -----===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the SystemZTargetLowering class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SystemZISelLowering.h"
14#include "SystemZCallingConv.h"
15#include "SystemZConstantPoolValue.h"
16#include "SystemZMachineFunctionInfo.h"
17#include "SystemZTargetMachine.h"
18#include "llvm/CodeGen/CallingConvLower.h"
19#include "llvm/CodeGen/MachineInstrBuilder.h"
20#include "llvm/CodeGen/MachineRegisterInfo.h"
21#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
22#include "llvm/IR/Intrinsics.h"
23#include "llvm/IR/IntrinsicInst.h"
24#include "llvm/Support/CommandLine.h"
25#include "llvm/Support/KnownBits.h"
26#include <cctype>
27
28using namespace llvm;
29
30#define DEBUG_TYPE"systemz-lower" "systemz-lower"
31
32namespace {
33// Represents information about a comparison.
34struct Comparison {
35 Comparison(SDValue Op0In, SDValue Op1In)
36 : Op0(Op0In), Op1(Op1In), Opcode(0), ICmpType(0), CCValid(0), CCMask(0) {}
37
38 // The operands to the comparison.
39 SDValue Op0, Op1;
40
41 // The opcode that should be used to compare Op0 and Op1.
42 unsigned Opcode;
43
44 // A SystemZICMP value. Only used for integer comparisons.
45 unsigned ICmpType;
46
47 // The mask of CC values that Opcode can produce.
48 unsigned CCValid;
49
50 // The mask of CC values for which the original condition is true.
51 unsigned CCMask;
52};
53} // end anonymous namespace
54
55// Classify VT as either 32 or 64 bit.
56static bool is32Bit(EVT VT) {
57 switch (VT.getSimpleVT().SimpleTy) {
58 case MVT::i32:
59 return true;
60 case MVT::i64:
61 return false;
62 default:
63 llvm_unreachable("Unsupported type")::llvm::llvm_unreachable_internal("Unsupported type", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 63)
;
64 }
65}
66
67// Return a version of MachineOperand that can be safely used before the
68// final use.
69static MachineOperand earlyUseOperand(MachineOperand Op) {
70 if (Op.isReg())
71 Op.setIsKill(false);
72 return Op;
73}
74
75SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
76 const SystemZSubtarget &STI)
77 : TargetLowering(TM), Subtarget(STI) {
78 MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize(0));
79
80 // Set up the register classes.
81 if (Subtarget.hasHighWord())
82 addRegisterClass(MVT::i32, &SystemZ::GRX32BitRegClass);
83 else
84 addRegisterClass(MVT::i32, &SystemZ::GR32BitRegClass);
85 addRegisterClass(MVT::i64, &SystemZ::GR64BitRegClass);
86 if (Subtarget.hasVector()) {
87 addRegisterClass(MVT::f32, &SystemZ::VR32BitRegClass);
88 addRegisterClass(MVT::f64, &SystemZ::VR64BitRegClass);
89 } else {
90 addRegisterClass(MVT::f32, &SystemZ::FP32BitRegClass);
91 addRegisterClass(MVT::f64, &SystemZ::FP64BitRegClass);
92 }
93 if (Subtarget.hasVectorEnhancements1())
94 addRegisterClass(MVT::f128, &SystemZ::VR128BitRegClass);
95 else
96 addRegisterClass(MVT::f128, &SystemZ::FP128BitRegClass);
97
98 if (Subtarget.hasVector()) {
99 addRegisterClass(MVT::v16i8, &SystemZ::VR128BitRegClass);
100 addRegisterClass(MVT::v8i16, &SystemZ::VR128BitRegClass);
101 addRegisterClass(MVT::v4i32, &SystemZ::VR128BitRegClass);
102 addRegisterClass(MVT::v2i64, &SystemZ::VR128BitRegClass);
103 addRegisterClass(MVT::v4f32, &SystemZ::VR128BitRegClass);
104 addRegisterClass(MVT::v2f64, &SystemZ::VR128BitRegClass);
105 }
106
107 // Compute derived properties from the register classes
108 computeRegisterProperties(Subtarget.getRegisterInfo());
109
110 // Set up special registers.
111 setStackPointerRegisterToSaveRestore(SystemZ::R15D);
112
113 // TODO: It may be better to default to latency-oriented scheduling, however
114 // LLVM's current latency-oriented scheduler can't handle physreg definitions
115 // such as SystemZ has with CC, so set this to the register-pressure
116 // scheduler, because it can.
117 setSchedulingPreference(Sched::RegPressure);
118
119 setBooleanContents(ZeroOrOneBooleanContent);
120 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
121
122 // Instructions are strings of 2-byte aligned 2-byte values.
123 setMinFunctionAlignment(Align(2));
124 // For performance reasons we prefer 16-byte alignment.
125 setPrefFunctionAlignment(Align(16));
126
127 // Handle operations that are handled in a similar way for all types.
128 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
129 I <= MVT::LAST_FP_VALUETYPE;
130 ++I) {
131 MVT VT = MVT::SimpleValueType(I);
132 if (isTypeLegal(VT)) {
133 // Lower SET_CC into an IPM-based sequence.
134 setOperationAction(ISD::SETCC, VT, Custom);
135
136 // Expand SELECT(C, A, B) into SELECT_CC(X, 0, A, B, NE).
137 setOperationAction(ISD::SELECT, VT, Expand);
138
139 // Lower SELECT_CC and BR_CC into separate comparisons and branches.
140 setOperationAction(ISD::SELECT_CC, VT, Custom);
141 setOperationAction(ISD::BR_CC, VT, Custom);
142 }
143 }
144
145 // Expand jump table branches as address arithmetic followed by an
146 // indirect jump.
147 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
148
149 // Expand BRCOND into a BR_CC (see above).
150 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
151
152 // Handle integer types.
153 for (unsigned I = MVT::FIRST_INTEGER_VALUETYPE;
154 I <= MVT::LAST_INTEGER_VALUETYPE;
155 ++I) {
156 MVT VT = MVT::SimpleValueType(I);
157 if (isTypeLegal(VT)) {
158 // Expand individual DIV and REMs into DIVREMs.
159 setOperationAction(ISD::SDIV, VT, Expand);
160 setOperationAction(ISD::UDIV, VT, Expand);
161 setOperationAction(ISD::SREM, VT, Expand);
162 setOperationAction(ISD::UREM, VT, Expand);
163 setOperationAction(ISD::SDIVREM, VT, Custom);
164 setOperationAction(ISD::UDIVREM, VT, Custom);
165
166 // Support addition/subtraction with overflow.
167 setOperationAction(ISD::SADDO, VT, Custom);
168 setOperationAction(ISD::SSUBO, VT, Custom);
169
170 // Support addition/subtraction with carry.
171 setOperationAction(ISD::UADDO, VT, Custom);
172 setOperationAction(ISD::USUBO, VT, Custom);
173
174 // Support carry in as value rather than glue.
175 setOperationAction(ISD::ADDCARRY, VT, Custom);
176 setOperationAction(ISD::SUBCARRY, VT, Custom);
177
178 // Lower ATOMIC_LOAD and ATOMIC_STORE into normal volatile loads and
179 // stores, putting a serialization instruction after the stores.
180 setOperationAction(ISD::ATOMIC_LOAD, VT, Custom);
181 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
182
183 // Lower ATOMIC_LOAD_SUB into ATOMIC_LOAD_ADD if LAA and LAAG are
184 // available, or if the operand is constant.
185 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
186
187 // Use POPCNT on z196 and above.
188 if (Subtarget.hasPopulationCount())
189 setOperationAction(ISD::CTPOP, VT, Custom);
190 else
191 setOperationAction(ISD::CTPOP, VT, Expand);
192
193 // No special instructions for these.
194 setOperationAction(ISD::CTTZ, VT, Expand);
195 setOperationAction(ISD::ROTR, VT, Expand);
196
197 // Use *MUL_LOHI where possible instead of MULH*.
198 setOperationAction(ISD::MULHS, VT, Expand);
199 setOperationAction(ISD::MULHU, VT, Expand);
200 setOperationAction(ISD::SMUL_LOHI, VT, Custom);
201 setOperationAction(ISD::UMUL_LOHI, VT, Custom);
202
203 // Only z196 and above have native support for conversions to unsigned.
204 // On z10, promoting to i64 doesn't generate an inexact condition for
205 // values that are outside the i32 range but in the i64 range, so use
206 // the default expansion.
207 if (!Subtarget.hasFPExtension())
208 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
209
210 // Mirror those settings for STRICT_FP_TO_[SU]INT. Note that these all
211 // default to Expand, so need to be modified to Legal where appropriate.
212 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Legal);
213 if (Subtarget.hasFPExtension())
214 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Legal);
215 }
216 }
217
218 // Type legalization will convert 8- and 16-bit atomic operations into
219 // forms that operate on i32s (but still keeping the original memory VT).
220 // Lower them into full i32 operations.
221 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Custom);
222 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Custom);
223 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom);
224 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom);
225 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Custom);
226 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Custom);
227 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Custom);
228 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Custom);
229 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Custom);
230 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Custom);
231 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Custom);
232
233 // Even though i128 is not a legal type, we still need to custom lower
234 // the atomic operations in order to exploit SystemZ instructions.
235 setOperationAction(ISD::ATOMIC_LOAD, MVT::i128, Custom);
236 setOperationAction(ISD::ATOMIC_STORE, MVT::i128, Custom);
237
238 // We can use the CC result of compare-and-swap to implement
239 // the "success" result of ATOMIC_CMP_SWAP_WITH_SUCCESS.
240 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i32, Custom);
241 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i64, Custom);
242 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
243
244 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
245
246 // Traps are legal, as we will convert them to "j .+2".
247 setOperationAction(ISD::TRAP, MVT::Other, Legal);
248
249 // z10 has instructions for signed but not unsigned FP conversion.
250 // Handle unsigned 32-bit types as signed 64-bit types.
251 if (!Subtarget.hasFPExtension()) {
252 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Promote);
253 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
254 }
255
256 // We have native support for a 64-bit CTLZ, via FLOGR.
257 setOperationAction(ISD::CTLZ, MVT::i32, Promote);
258 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Promote);
259 setOperationAction(ISD::CTLZ, MVT::i64, Legal);
260
261 // On z15 we have native support for a 64-bit CTPOP.
262 if (Subtarget.hasMiscellaneousExtensions3()) {
263 setOperationAction(ISD::CTPOP, MVT::i32, Promote);
264 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
265 }
266
267 // Give LowerOperation the chance to replace 64-bit ORs with subregs.
268 setOperationAction(ISD::OR, MVT::i64, Custom);
269
270 // FIXME: Can we support these natively?
271 setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
272 setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
273 setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
274
275 // We have native instructions for i8, i16 and i32 extensions, but not i1.
276 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
277 for (MVT VT : MVT::integer_valuetypes()) {
278 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
279 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
280 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
281 }
282
283 // Handle the various types of symbolic address.
284 setOperationAction(ISD::ConstantPool, PtrVT, Custom);
285 setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
286 setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
287 setOperationAction(ISD::BlockAddress, PtrVT, Custom);
288 setOperationAction(ISD::JumpTable, PtrVT, Custom);
289
290 // We need to handle dynamic allocations specially because of the
291 // 160-byte area at the bottom of the stack.
292 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
293 setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, PtrVT, Custom);
294
295 // Use custom expanders so that we can force the function to use
296 // a frame pointer.
297 setOperationAction(ISD::STACKSAVE, MVT::Other, Custom);
298 setOperationAction(ISD::STACKRESTORE, MVT::Other, Custom);
299
300 // Handle prefetches with PFD or PFDRL.
301 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
302
303 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
304 // Assume by default that all vector operations need to be expanded.
305 for (unsigned Opcode = 0; Opcode < ISD::BUILTIN_OP_END; ++Opcode)
306 if (getOperationAction(Opcode, VT) == Legal)
307 setOperationAction(Opcode, VT, Expand);
308
309 // Likewise all truncating stores and extending loads.
310 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
311 setTruncStoreAction(VT, InnerVT, Expand);
312 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
313 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
314 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
315 }
316
317 if (isTypeLegal(VT)) {
318 // These operations are legal for anything that can be stored in a
319 // vector register, even if there is no native support for the format
320 // as such. In particular, we can do these for v4f32 even though there
321 // are no specific instructions for that format.
322 setOperationAction(ISD::LOAD, VT, Legal);
323 setOperationAction(ISD::STORE, VT, Legal);
324 setOperationAction(ISD::VSELECT, VT, Legal);
325 setOperationAction(ISD::BITCAST, VT, Legal);
326 setOperationAction(ISD::UNDEF, VT, Legal);
327
328 // Likewise, except that we need to replace the nodes with something
329 // more specific.
330 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
331 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
332 }
333 }
334
335 // Handle integer vector types.
336 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
337 if (isTypeLegal(VT)) {
338 // These operations have direct equivalents.
339 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Legal);
340 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Legal);
341 setOperationAction(ISD::ADD, VT, Legal);
342 setOperationAction(ISD::SUB, VT, Legal);
343 if (VT != MVT::v2i64)
344 setOperationAction(ISD::MUL, VT, Legal);
345 setOperationAction(ISD::AND, VT, Legal);
346 setOperationAction(ISD::OR, VT, Legal);
347 setOperationAction(ISD::XOR, VT, Legal);
348 if (Subtarget.hasVectorEnhancements1())
349 setOperationAction(ISD::CTPOP, VT, Legal);
350 else
351 setOperationAction(ISD::CTPOP, VT, Custom);
352 setOperationAction(ISD::CTTZ, VT, Legal);
353 setOperationAction(ISD::CTLZ, VT, Legal);
354
355 // Convert a GPR scalar to a vector by inserting it into element 0.
356 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
357
358 // Use a series of unpacks for extensions.
359 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
360 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
361
362 // Detect shifts by a scalar amount and convert them into
363 // V*_BY_SCALAR.
364 setOperationAction(ISD::SHL, VT, Custom);
365 setOperationAction(ISD::SRA, VT, Custom);
366 setOperationAction(ISD::SRL, VT, Custom);
367
368 // At present ROTL isn't matched by DAGCombiner. ROTR should be
369 // converted into ROTL.
370 setOperationAction(ISD::ROTL, VT, Expand);
371 setOperationAction(ISD::ROTR, VT, Expand);
372
373 // Map SETCCs onto one of VCE, VCH or VCHL, swapping the operands
374 // and inverting the result as necessary.
375 setOperationAction(ISD::SETCC, VT, Custom);
376 }
377 }
378
379 if (Subtarget.hasVector()) {
380 // There should be no need to check for float types other than v2f64
381 // since <2 x f32> isn't a legal type.
382 setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
383 setOperationAction(ISD::FP_TO_SINT, MVT::v2f64, Legal);
384 setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
385 setOperationAction(ISD::FP_TO_UINT, MVT::v2f64, Legal);
386 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
387 setOperationAction(ISD::SINT_TO_FP, MVT::v2f64, Legal);
388 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
389 setOperationAction(ISD::UINT_TO_FP, MVT::v2f64, Legal);
390
391 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
392 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f64, Legal);
393 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
394 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f64, Legal);
395 }
396
397 if (Subtarget.hasVectorEnhancements2()) {
398 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
399 setOperationAction(ISD::FP_TO_SINT, MVT::v4f32, Legal);
400 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
401 setOperationAction(ISD::FP_TO_UINT, MVT::v4f32, Legal);
402 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
403 setOperationAction(ISD::SINT_TO_FP, MVT::v4f32, Legal);
404 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
405 setOperationAction(ISD::UINT_TO_FP, MVT::v4f32, Legal);
406
407 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
408 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f32, Legal);
409 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
410 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f32, Legal);
411 }
412
413 // Handle floating-point types.
414 for (unsigned I = MVT::FIRST_FP_VALUETYPE;
415 I <= MVT::LAST_FP_VALUETYPE;
416 ++I) {
417 MVT VT = MVT::SimpleValueType(I);
418 if (isTypeLegal(VT)) {
419 // We can use FI for FRINT.
420 setOperationAction(ISD::FRINT, VT, Legal);
421
422 // We can use the extended form of FI for other rounding operations.
423 if (Subtarget.hasFPExtension()) {
424 setOperationAction(ISD::FNEARBYINT, VT, Legal);
425 setOperationAction(ISD::FFLOOR, VT, Legal);
426 setOperationAction(ISD::FCEIL, VT, Legal);
427 setOperationAction(ISD::FTRUNC, VT, Legal);
428 setOperationAction(ISD::FROUND, VT, Legal);
429 }
430
431 // No special instructions for these.
432 setOperationAction(ISD::FSIN, VT, Expand);
433 setOperationAction(ISD::FCOS, VT, Expand);
434 setOperationAction(ISD::FSINCOS, VT, Expand);
435 setOperationAction(ISD::FREM, VT, Expand);
436 setOperationAction(ISD::FPOW, VT, Expand);
437
438 // Handle constrained floating-point operations.
439 setOperationAction(ISD::STRICT_FADD, VT, Legal);
440 setOperationAction(ISD::STRICT_FSUB, VT, Legal);
441 setOperationAction(ISD::STRICT_FMUL, VT, Legal);
442 setOperationAction(ISD::STRICT_FDIV, VT, Legal);
443 setOperationAction(ISD::STRICT_FMA, VT, Legal);
444 setOperationAction(ISD::STRICT_FSQRT, VT, Legal);
445 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
446 setOperationAction(ISD::STRICT_FP_ROUND, VT, Legal);
447 setOperationAction(ISD::STRICT_FP_EXTEND, VT, Legal);
448 if (Subtarget.hasFPExtension()) {
449 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
450 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
451 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
452 setOperationAction(ISD::STRICT_FROUND, VT, Legal);
453 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
454 }
455 }
456 }
457
458 // Handle floating-point vector types.
459 if (Subtarget.hasVector()) {
460 // Scalar-to-vector conversion is just a subreg.
461 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
462 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
463
464 // Some insertions and extractions can be done directly but others
465 // need to go via integers.
466 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
467 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
468 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
469 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
470
471 // These operations have direct equivalents.
472 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
473 setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
474 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
475 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
476 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
477 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
478 setOperationAction(ISD::FABS, MVT::v2f64, Legal);
479 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
480 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
481 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
482 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
483 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
484 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
485 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
486
487 // Handle constrained floating-point operations.
488 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
489 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
490 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
491 setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
492 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
493 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
494 setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
495 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
496 setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
497 setOperationAction(ISD::STRICT_FCEIL, MVT::v2f64, Legal);
498 setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
499 setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
500 }
501
502 // The vector enhancements facility 1 has instructions for these.
503 if (Subtarget.hasVectorEnhancements1()) {
504 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
505 setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
506 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
507 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
508 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
509 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
510 setOperationAction(ISD::FABS, MVT::v4f32, Legal);
511 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
512 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
513 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
514 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
515 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
516 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
517 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
518
519 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
520 setOperationAction(ISD::FMAXIMUM, MVT::f64, Legal);
521 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
522 setOperationAction(ISD::FMINIMUM, MVT::f64, Legal);
523
524 setOperationAction(ISD::FMAXNUM, MVT::v2f64, Legal);
525 setOperationAction(ISD::FMAXIMUM, MVT::v2f64, Legal);
526 setOperationAction(ISD::FMINNUM, MVT::v2f64, Legal);
527 setOperationAction(ISD::FMINIMUM, MVT::v2f64, Legal);
528
529 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
530 setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
531 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
532 setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
533
534 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
535 setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
536 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
537 setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
538
539 setOperationAction(ISD::FMAXNUM, MVT::f128, Legal);
540 setOperationAction(ISD::FMAXIMUM, MVT::f128, Legal);
541 setOperationAction(ISD::FMINNUM, MVT::f128, Legal);
542 setOperationAction(ISD::FMINIMUM, MVT::f128, Legal);
543
544 // Handle constrained floating-point operations.
545 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
546 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
547 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
548 setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
549 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
550 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
551 setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
552 setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
553 setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
554 setOperationAction(ISD::STRICT_FCEIL, MVT::v4f32, Legal);
555 setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
556 setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
557 for (auto VT : { MVT::f32, MVT::f64, MVT::f128,
558 MVT::v4f32, MVT::v2f64 }) {
559 setOperationAction(ISD::STRICT_FMAXNUM, VT, Legal);
560 setOperationAction(ISD::STRICT_FMINNUM, VT, Legal);
561 }
562 }
563
564 // We have fused multiply-addition for f32 and f64 but not f128.
565 setOperationAction(ISD::FMA, MVT::f32, Legal);
566 setOperationAction(ISD::FMA, MVT::f64, Legal);
567 if (Subtarget.hasVectorEnhancements1())
568 setOperationAction(ISD::FMA, MVT::f128, Legal);
569 else
570 setOperationAction(ISD::FMA, MVT::f128, Expand);
571
572 // We don't have a copysign instruction on vector registers.
573 if (Subtarget.hasVectorEnhancements1())
574 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
575
576 // Needed so that we don't try to implement f128 constant loads using
577 // a load-and-extend of a f80 constant (in cases where the constant
578 // would fit in an f80).
579 for (MVT VT : MVT::fp_valuetypes())
580 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand);
581
582 // We don't have extending load instruction on vector registers.
583 if (Subtarget.hasVectorEnhancements1()) {
584 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
585 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
586 }
587
588 // Floating-point truncation and stores need to be done separately.
589 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
590 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
591 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
592
593 // We have 64-bit FPR<->GPR moves, but need special handling for
594 // 32-bit forms.
595 if (!Subtarget.hasVector()) {
596 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
597 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
598 }
599
600 // VASTART and VACOPY need to deal with the SystemZ-specific varargs
601 // structure, but VAEND is a no-op.
602 setOperationAction(ISD::VASTART, MVT::Other, Custom);
603 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
604 setOperationAction(ISD::VAEND, MVT::Other, Expand);
605
606 // Codes for which we want to perform some z-specific combinations.
607 setTargetDAGCombine(ISD::ZERO_EXTEND);
608 setTargetDAGCombine(ISD::SIGN_EXTEND);
609 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
610 setTargetDAGCombine(ISD::LOAD);
611 setTargetDAGCombine(ISD::STORE);
612 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
613 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
614 setTargetDAGCombine(ISD::FP_ROUND);
615 setTargetDAGCombine(ISD::FP_EXTEND);
616 setTargetDAGCombine(ISD::BSWAP);
617 setTargetDAGCombine(ISD::SDIV);
618 setTargetDAGCombine(ISD::UDIV);
619 setTargetDAGCombine(ISD::SREM);
620 setTargetDAGCombine(ISD::UREM);
621
622 // Handle intrinsics.
623 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
624 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
625
626 // We want to use MVC in preference to even a single load/store pair.
627 MaxStoresPerMemcpy = 0;
628 MaxStoresPerMemcpyOptSize = 0;
629
630 // The main memset sequence is a byte store followed by an MVC.
631 // Two STC or MV..I stores win over that, but the kind of fused stores
632 // generated by target-independent code don't when the byte value is
633 // variable. E.g. "STC <reg>;MHI <reg>,257;STH <reg>" is not better
634 // than "STC;MVC". Handle the choice in target-specific code instead.
635 MaxStoresPerMemset = 0;
636 MaxStoresPerMemsetOptSize = 0;
637}
638
639EVT SystemZTargetLowering::getSetCCResultType(const DataLayout &DL,
640 LLVMContext &, EVT VT) const {
641 if (!VT.isVector())
642 return MVT::i32;
643 return VT.changeVectorElementTypeToInteger();
644}
645
646bool SystemZTargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
647 VT = VT.getScalarType();
648
649 if (!VT.isSimple())
650 return false;
651
652 switch (VT.getSimpleVT().SimpleTy) {
653 case MVT::f32:
654 case MVT::f64:
655 return true;
656 case MVT::f128:
657 return Subtarget.hasVectorEnhancements1();
658 default:
659 break;
660 }
661
662 return false;
663}
664
665// Return true if the constant can be generated with a vector instruction,
666// such as VGM, VGMB or VREPI.
667bool SystemZVectorConstantInfo::isVectorConstantLegal(
668 const SystemZSubtarget &Subtarget) {
669 const SystemZInstrInfo *TII =
670 static_cast<const SystemZInstrInfo *>(Subtarget.getInstrInfo());
671 if (!Subtarget.hasVector() ||
672 (isFP128 && !Subtarget.hasVectorEnhancements1()))
673 return false;
674
675 // Try using VECTOR GENERATE BYTE MASK. This is the architecturally-
676 // preferred way of creating all-zero and all-one vectors so give it
677 // priority over other methods below.
678 unsigned Mask = 0;
679 unsigned I = 0;
680 for (; I < SystemZ::VectorBytes; ++I) {
681 uint64_t Byte = IntBits.lshr(I * 8).trunc(8).getZExtValue();
682 if (Byte == 0xff)
683 Mask |= 1ULL << I;
684 else if (Byte != 0)
685 break;
686 }
687 if (I == SystemZ::VectorBytes) {
688 Opcode = SystemZISD::BYTE_MASK;
689 OpVals.push_back(Mask);
690 VecVT = MVT::getVectorVT(MVT::getIntegerVT(8), 16);
691 return true;
692 }
693
694 if (SplatBitSize > 64)
695 return false;
696
697 auto tryValue = [&](uint64_t Value) -> bool {
698 // Try VECTOR REPLICATE IMMEDIATE
699 int64_t SignedValue = SignExtend64(Value, SplatBitSize);
700 if (isInt<16>(SignedValue)) {
701 OpVals.push_back(((unsigned) SignedValue));
702 Opcode = SystemZISD::REPLICATE;
703 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
704 SystemZ::VectorBits / SplatBitSize);
705 return true;
706 }
707 // Try VECTOR GENERATE MASK
708 unsigned Start, End;
709 if (TII->isRxSBGMask(Value, SplatBitSize, Start, End)) {
710 // isRxSBGMask returns the bit numbers for a full 64-bit value, with 0
711 // denoting 1 << 63 and 63 denoting 1. Convert them to bit numbers for
712 // an SplatBitSize value, so that 0 denotes 1 << (SplatBitSize-1).
713 OpVals.push_back(Start - (64 - SplatBitSize));
714 OpVals.push_back(End - (64 - SplatBitSize));
715 Opcode = SystemZISD::ROTATE_MASK;
716 VecVT = MVT::getVectorVT(MVT::getIntegerVT(SplatBitSize),
717 SystemZ::VectorBits / SplatBitSize);
718 return true;
719 }
720 return false;
721 };
722
723 // First try assuming that any undefined bits above the highest set bit
724 // and below the lowest set bit are 1s. This increases the likelihood of
725 // being able to use a sign-extended element value in VECTOR REPLICATE
726 // IMMEDIATE or a wraparound mask in VECTOR GENERATE MASK.
727 uint64_t SplatBitsZ = SplatBits.getZExtValue();
728 uint64_t SplatUndefZ = SplatUndef.getZExtValue();
729 uint64_t Lower =
730 (SplatUndefZ & ((uint64_t(1) << findFirstSet(SplatBitsZ)) - 1));
731 uint64_t Upper =
732 (SplatUndefZ & ~((uint64_t(1) << findLastSet(SplatBitsZ)) - 1));
733 if (tryValue(SplatBitsZ | Upper | Lower))
734 return true;
735
736 // Now try assuming that any undefined bits between the first and
737 // last defined set bits are set. This increases the chances of
738 // using a non-wraparound mask.
739 uint64_t Middle = SplatUndefZ & ~Upper & ~Lower;
740 return tryValue(SplatBitsZ | Middle);
741}
742
743SystemZVectorConstantInfo::SystemZVectorConstantInfo(APFloat FPImm) {
744 IntBits = FPImm.bitcastToAPInt().zextOrSelf(128);
745 isFP128 = (&FPImm.getSemantics() == &APFloat::IEEEquad());
746
747 // Find the smallest splat.
748 SplatBits = FPImm.bitcastToAPInt();
749 unsigned Width = SplatBits.getBitWidth();
750 while (Width > 8) {
751 unsigned HalfSize = Width / 2;
752 APInt HighValue = SplatBits.lshr(HalfSize).trunc(HalfSize);
753 APInt LowValue = SplatBits.trunc(HalfSize);
754
755 // If the two halves do not match, stop here.
756 if (HighValue != LowValue || 8 > HalfSize)
757 break;
758
759 SplatBits = HighValue;
760 Width = HalfSize;
761 }
762 SplatUndef = 0;
763 SplatBitSize = Width;
764}
765
766SystemZVectorConstantInfo::SystemZVectorConstantInfo(BuildVectorSDNode *BVN) {
767 assert(BVN->isConstant() && "Expected a constant BUILD_VECTOR")((BVN->isConstant() && "Expected a constant BUILD_VECTOR"
) ? static_cast<void> (0) : __assert_fail ("BVN->isConstant() && \"Expected a constant BUILD_VECTOR\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 767, __PRETTY_FUNCTION__))
;
768 bool HasAnyUndefs;
769
770 // Get IntBits by finding the 128 bit splat.
771 BVN->isConstantSplat(IntBits, SplatUndef, SplatBitSize, HasAnyUndefs, 128,
772 true);
773
774 // Get SplatBits by finding the 8 bit or greater splat.
775 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs, 8,
776 true);
777}
778
779bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
780 bool ForCodeSize) const {
781 // We can load zero using LZ?R and negative zero using LZ?R;LC?BR.
782 if (Imm.isZero() || Imm.isNegZero())
783 return true;
784
785 return SystemZVectorConstantInfo(Imm).isVectorConstantLegal(Subtarget);
786}
787
788bool SystemZTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
789 // We can use CGFI or CLGFI.
790 return isInt<32>(Imm) || isUInt<32>(Imm);
791}
792
793bool SystemZTargetLowering::isLegalAddImmediate(int64_t Imm) const {
794 // We can use ALGFI or SLGFI.
795 return isUInt<32>(Imm) || isUInt<32>(-Imm);
796}
797
798bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
799 EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
800 // Unaligned accesses should never be slower than the expanded version.
801 // We check specifically for aligned accesses in the few cases where
802 // they are required.
803 if (Fast)
804 *Fast = true;
805 return true;
806}
807
808// Information about the addressing mode for a memory access.
809struct AddressingMode {
810 // True if a long displacement is supported.
811 bool LongDisplacement;
812
813 // True if use of index register is supported.
814 bool IndexReg;
815
816 AddressingMode(bool LongDispl, bool IdxReg) :
817 LongDisplacement(LongDispl), IndexReg(IdxReg) {}
818};
819
820// Return the desired addressing mode for a Load which has only one use (in
821// the same block) which is a Store.
822static AddressingMode getLoadStoreAddrMode(bool HasVector,
823 Type *Ty) {
824 // With vector support a Load->Store combination may be combined to either
825 // an MVC or vector operations and it seems to work best to allow the
826 // vector addressing mode.
827 if (HasVector)
828 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
829
830 // Otherwise only the MVC case is special.
831 bool MVC = Ty->isIntegerTy(8);
832 return AddressingMode(!MVC/*LongDispl*/, !MVC/*IdxReg*/);
833}
834
835// Return the addressing mode which seems most desirable given an LLVM
836// Instruction pointer.
837static AddressingMode
838supportedAddressingMode(Instruction *I, bool HasVector) {
839 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
840 switch (II->getIntrinsicID()) {
841 default: break;
842 case Intrinsic::memset:
843 case Intrinsic::memmove:
844 case Intrinsic::memcpy:
845 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
846 }
847 }
848
849 if (isa<LoadInst>(I) && I->hasOneUse()) {
850 auto *SingleUser = cast<Instruction>(*I->user_begin());
851 if (SingleUser->getParent() == I->getParent()) {
852 if (isa<ICmpInst>(SingleUser)) {
853 if (auto *C = dyn_cast<ConstantInt>(SingleUser->getOperand(1)))
854 if (C->getBitWidth() <= 64 &&
855 (isInt<16>(C->getSExtValue()) || isUInt<16>(C->getZExtValue())))
856 // Comparison of memory with 16 bit signed / unsigned immediate
857 return AddressingMode(false/*LongDispl*/, false/*IdxReg*/);
858 } else if (isa<StoreInst>(SingleUser))
859 // Load->Store
860 return getLoadStoreAddrMode(HasVector, I->getType());
861 }
862 } else if (auto *StoreI = dyn_cast<StoreInst>(I)) {
863 if (auto *LoadI = dyn_cast<LoadInst>(StoreI->getValueOperand()))
864 if (LoadI->hasOneUse() && LoadI->getParent() == I->getParent())
865 // Load->Store
866 return getLoadStoreAddrMode(HasVector, LoadI->getType());
867 }
868
869 if (HasVector && (isa<LoadInst>(I) || isa<StoreInst>(I))) {
870
871 // * Use LDE instead of LE/LEY for z13 to avoid partial register
872 // dependencies (LDE only supports small offsets).
873 // * Utilize the vector registers to hold floating point
874 // values (vector load / store instructions only support small
875 // offsets).
876
877 Type *MemAccessTy = (isa<LoadInst>(I) ? I->getType() :
878 I->getOperand(0)->getType());
879 bool IsFPAccess = MemAccessTy->isFloatingPointTy();
880 bool IsVectorAccess = MemAccessTy->isVectorTy();
881
882 // A store of an extracted vector element will be combined into a VSTE type
883 // instruction.
884 if (!IsVectorAccess && isa<StoreInst>(I)) {
885 Value *DataOp = I->getOperand(0);
886 if (isa<ExtractElementInst>(DataOp))
887 IsVectorAccess = true;
888 }
889
890 // A load which gets inserted into a vector element will be combined into a
891 // VLE type instruction.
892 if (!IsVectorAccess && isa<LoadInst>(I) && I->hasOneUse()) {
893 User *LoadUser = *I->user_begin();
894 if (isa<InsertElementInst>(LoadUser))
895 IsVectorAccess = true;
896 }
897
898 if (IsFPAccess || IsVectorAccess)
899 return AddressingMode(false/*LongDispl*/, true/*IdxReg*/);
900 }
901
902 return AddressingMode(true/*LongDispl*/, true/*IdxReg*/);
903}
904
905bool SystemZTargetLowering::isLegalAddressingMode(const DataLayout &DL,
906 const AddrMode &AM, Type *Ty, unsigned AS, Instruction *I) const {
907 // Punt on globals for now, although they can be used in limited
908 // RELATIVE LONG cases.
909 if (AM.BaseGV)
910 return false;
911
912 // Require a 20-bit signed offset.
913 if (!isInt<20>(AM.BaseOffs))
914 return false;
915
916 AddressingMode SupportedAM(true, true);
917 if (I != nullptr)
918 SupportedAM = supportedAddressingMode(I, Subtarget.hasVector());
919
920 if (!SupportedAM.LongDisplacement && !isUInt<12>(AM.BaseOffs))
921 return false;
922
923 if (!SupportedAM.IndexReg)
924 // No indexing allowed.
925 return AM.Scale == 0;
926 else
927 // Indexing is OK but no scale factor can be applied.
928 return AM.Scale == 0 || AM.Scale == 1;
929}
930
931bool SystemZTargetLowering::isTruncateFree(Type *FromType, Type *ToType) const {
932 if (!FromType->isIntegerTy() || !ToType->isIntegerTy())
933 return false;
934 unsigned FromBits = FromType->getPrimitiveSizeInBits();
935 unsigned ToBits = ToType->getPrimitiveSizeInBits();
936 return FromBits > ToBits;
937}
938
939bool SystemZTargetLowering::isTruncateFree(EVT FromVT, EVT ToVT) const {
940 if (!FromVT.isInteger() || !ToVT.isInteger())
941 return false;
942 unsigned FromBits = FromVT.getSizeInBits();
943 unsigned ToBits = ToVT.getSizeInBits();
944 return FromBits > ToBits;
945}
946
947//===----------------------------------------------------------------------===//
948// Inline asm support
949//===----------------------------------------------------------------------===//
950
951TargetLowering::ConstraintType
952SystemZTargetLowering::getConstraintType(StringRef Constraint) const {
953 if (Constraint.size() == 1) {
954 switch (Constraint[0]) {
955 case 'a': // Address register
956 case 'd': // Data register (equivalent to 'r')
957 case 'f': // Floating-point register
958 case 'h': // High-part register
959 case 'r': // General-purpose register
960 case 'v': // Vector register
961 return C_RegisterClass;
962
963 case 'Q': // Memory with base and unsigned 12-bit displacement
964 case 'R': // Likewise, plus an index
965 case 'S': // Memory with base and signed 20-bit displacement
966 case 'T': // Likewise, plus an index
967 case 'm': // Equivalent to 'T'.
968 return C_Memory;
969
970 case 'I': // Unsigned 8-bit constant
971 case 'J': // Unsigned 12-bit constant
972 case 'K': // Signed 16-bit constant
973 case 'L': // Signed 20-bit displacement (on all targets we support)
974 case 'M': // 0x7fffffff
975 return C_Immediate;
976
977 default:
978 break;
979 }
980 }
981 return TargetLowering::getConstraintType(Constraint);
982}
983
984TargetLowering::ConstraintWeight SystemZTargetLowering::
985getSingleConstraintMatchWeight(AsmOperandInfo &info,
986 const char *constraint) const {
987 ConstraintWeight weight = CW_Invalid;
988 Value *CallOperandVal = info.CallOperandVal;
989 // If we don't have a value, we can't do a match,
990 // but allow it at the lowest weight.
991 if (!CallOperandVal)
992 return CW_Default;
993 Type *type = CallOperandVal->getType();
994 // Look at the constraint type.
995 switch (*constraint) {
996 default:
997 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
998 break;
999
1000 case 'a': // Address register
1001 case 'd': // Data register (equivalent to 'r')
1002 case 'h': // High-part register
1003 case 'r': // General-purpose register
1004 if (CallOperandVal->getType()->isIntegerTy())
1005 weight = CW_Register;
1006 break;
1007
1008 case 'f': // Floating-point register
1009 if (type->isFloatingPointTy())
1010 weight = CW_Register;
1011 break;
1012
1013 case 'v': // Vector register
1014 if ((type->isVectorTy() || type->isFloatingPointTy()) &&
1015 Subtarget.hasVector())
1016 weight = CW_Register;
1017 break;
1018
1019 case 'I': // Unsigned 8-bit constant
1020 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1021 if (isUInt<8>(C->getZExtValue()))
1022 weight = CW_Constant;
1023 break;
1024
1025 case 'J': // Unsigned 12-bit constant
1026 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1027 if (isUInt<12>(C->getZExtValue()))
1028 weight = CW_Constant;
1029 break;
1030
1031 case 'K': // Signed 16-bit constant
1032 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1033 if (isInt<16>(C->getSExtValue()))
1034 weight = CW_Constant;
1035 break;
1036
1037 case 'L': // Signed 20-bit displacement (on all targets we support)
1038 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1039 if (isInt<20>(C->getSExtValue()))
1040 weight = CW_Constant;
1041 break;
1042
1043 case 'M': // 0x7fffffff
1044 if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
1045 if (C->getZExtValue() == 0x7fffffff)
1046 weight = CW_Constant;
1047 break;
1048 }
1049 return weight;
1050}
1051
1052// Parse a "{tNNN}" register constraint for which the register type "t"
1053// has already been verified. MC is the class associated with "t" and
1054// Map maps 0-based register numbers to LLVM register numbers.
1055static std::pair<unsigned, const TargetRegisterClass *>
1056parseRegisterNumber(StringRef Constraint, const TargetRegisterClass *RC,
1057 const unsigned *Map, unsigned Size) {
1058 assert(*(Constraint.end()-1) == '}' && "Missing '}'")((*(Constraint.end()-1) == '}' && "Missing '}'") ? static_cast
<void> (0) : __assert_fail ("*(Constraint.end()-1) == '}' && \"Missing '}'\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1058, __PRETTY_FUNCTION__))
;
1059 if (isdigit(Constraint[2])) {
1060 unsigned Index;
1061 bool Failed =
1062 Constraint.slice(2, Constraint.size() - 1).getAsInteger(10, Index);
1063 if (!Failed && Index < Size && Map[Index])
1064 return std::make_pair(Map[Index], RC);
1065 }
1066 return std::make_pair(0U, nullptr);
1067}
1068
1069std::pair<unsigned, const TargetRegisterClass *>
1070SystemZTargetLowering::getRegForInlineAsmConstraint(
1071 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
1072 if (Constraint.size() == 1) {
1073 // GCC Constraint Letters
1074 switch (Constraint[0]) {
1075 default: break;
1076 case 'd': // Data register (equivalent to 'r')
1077 case 'r': // General-purpose register
1078 if (VT == MVT::i64)
1079 return std::make_pair(0U, &SystemZ::GR64BitRegClass);
1080 else if (VT == MVT::i128)
1081 return std::make_pair(0U, &SystemZ::GR128BitRegClass);
1082 return std::make_pair(0U, &SystemZ::GR32BitRegClass);
1083
1084 case 'a': // Address register
1085 if (VT == MVT::i64)
1086 return std::make_pair(0U, &SystemZ::ADDR64BitRegClass);
1087 else if (VT == MVT::i128)
1088 return std::make_pair(0U, &SystemZ::ADDR128BitRegClass);
1089 return std::make_pair(0U, &SystemZ::ADDR32BitRegClass);
1090
1091 case 'h': // High-part register (an LLVM extension)
1092 return std::make_pair(0U, &SystemZ::GRH32BitRegClass);
1093
1094 case 'f': // Floating-point register
1095 if (VT == MVT::f64)
1096 return std::make_pair(0U, &SystemZ::FP64BitRegClass);
1097 else if (VT == MVT::f128)
1098 return std::make_pair(0U, &SystemZ::FP128BitRegClass);
1099 return std::make_pair(0U, &SystemZ::FP32BitRegClass);
1100
1101 case 'v': // Vector register
1102 if (Subtarget.hasVector()) {
1103 if (VT == MVT::f32)
1104 return std::make_pair(0U, &SystemZ::VR32BitRegClass);
1105 if (VT == MVT::f64)
1106 return std::make_pair(0U, &SystemZ::VR64BitRegClass);
1107 return std::make_pair(0U, &SystemZ::VR128BitRegClass);
1108 }
1109 break;
1110 }
1111 }
1112 if (Constraint.size() > 0 && Constraint[0] == '{') {
1113 // We need to override the default register parsing for GPRs and FPRs
1114 // because the interpretation depends on VT. The internal names of
1115 // the registers are also different from the external names
1116 // (F0D and F0S instead of F0, etc.).
1117 if (Constraint[1] == 'r') {
1118 if (VT == MVT::i32)
1119 return parseRegisterNumber(Constraint, &SystemZ::GR32BitRegClass,
1120 SystemZMC::GR32Regs, 16);
1121 if (VT == MVT::i128)
1122 return parseRegisterNumber(Constraint, &SystemZ::GR128BitRegClass,
1123 SystemZMC::GR128Regs, 16);
1124 return parseRegisterNumber(Constraint, &SystemZ::GR64BitRegClass,
1125 SystemZMC::GR64Regs, 16);
1126 }
1127 if (Constraint[1] == 'f') {
1128 if (VT == MVT::f32)
1129 return parseRegisterNumber(Constraint, &SystemZ::FP32BitRegClass,
1130 SystemZMC::FP32Regs, 16);
1131 if (VT == MVT::f128)
1132 return parseRegisterNumber(Constraint, &SystemZ::FP128BitRegClass,
1133 SystemZMC::FP128Regs, 16);
1134 return parseRegisterNumber(Constraint, &SystemZ::FP64BitRegClass,
1135 SystemZMC::FP64Regs, 16);
1136 }
1137 if (Constraint[1] == 'v') {
1138 if (VT == MVT::f32)
1139 return parseRegisterNumber(Constraint, &SystemZ::VR32BitRegClass,
1140 SystemZMC::VR32Regs, 32);
1141 if (VT == MVT::f64)
1142 return parseRegisterNumber(Constraint, &SystemZ::VR64BitRegClass,
1143 SystemZMC::VR64Regs, 32);
1144 return parseRegisterNumber(Constraint, &SystemZ::VR128BitRegClass,
1145 SystemZMC::VR128Regs, 32);
1146 }
1147 }
1148 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
1149}
1150
1151void SystemZTargetLowering::
1152LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
1153 std::vector<SDValue> &Ops,
1154 SelectionDAG &DAG) const {
1155 // Only support length 1 constraints for now.
1156 if (Constraint.length() == 1) {
1157 switch (Constraint[0]) {
1158 case 'I': // Unsigned 8-bit constant
1159 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1160 if (isUInt<8>(C->getZExtValue()))
1161 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1162 Op.getValueType()));
1163 return;
1164
1165 case 'J': // Unsigned 12-bit constant
1166 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1167 if (isUInt<12>(C->getZExtValue()))
1168 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1169 Op.getValueType()));
1170 return;
1171
1172 case 'K': // Signed 16-bit constant
1173 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1174 if (isInt<16>(C->getSExtValue()))
1175 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
1176 Op.getValueType()));
1177 return;
1178
1179 case 'L': // Signed 20-bit displacement (on all targets we support)
1180 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1181 if (isInt<20>(C->getSExtValue()))
1182 Ops.push_back(DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
1183 Op.getValueType()));
1184 return;
1185
1186 case 'M': // 0x7fffffff
1187 if (auto *C = dyn_cast<ConstantSDNode>(Op))
1188 if (C->getZExtValue() == 0x7fffffff)
1189 Ops.push_back(DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
1190 Op.getValueType()));
1191 return;
1192 }
1193 }
1194 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
1195}
1196
1197//===----------------------------------------------------------------------===//
1198// Calling conventions
1199//===----------------------------------------------------------------------===//
1200
1201#include "SystemZGenCallingConv.inc"
1202
1203const MCPhysReg *SystemZTargetLowering::getScratchRegisters(
1204 CallingConv::ID) const {
1205 static const MCPhysReg ScratchRegs[] = { SystemZ::R0D, SystemZ::R1D,
1206 SystemZ::R14D, 0 };
1207 return ScratchRegs;
1208}
1209
1210bool SystemZTargetLowering::allowTruncateForTailCall(Type *FromType,
1211 Type *ToType) const {
1212 return isTruncateFree(FromType, ToType);
1213}
1214
1215bool SystemZTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
1216 return CI->isTailCall();
1217}
1218
1219// We do not yet support 128-bit single-element vector types. If the user
1220// attempts to use such types as function argument or return type, prefer
1221// to error out instead of emitting code violating the ABI.
1222static void VerifyVectorType(MVT VT, EVT ArgVT) {
1223 if (ArgVT.isVector() && !VT.isVector())
1224 report_fatal_error("Unsupported vector argument or return type");
1225}
1226
1227static void VerifyVectorTypes(const SmallVectorImpl<ISD::InputArg> &Ins) {
1228 for (unsigned i = 0; i < Ins.size(); ++i)
1229 VerifyVectorType(Ins[i].VT, Ins[i].ArgVT);
1230}
1231
1232static void VerifyVectorTypes(const SmallVectorImpl<ISD::OutputArg> &Outs) {
1233 for (unsigned i = 0; i < Outs.size(); ++i)
1234 VerifyVectorType(Outs[i].VT, Outs[i].ArgVT);
1235}
1236
1237// Value is a value that has been passed to us in the location described by VA
1238// (and so has type VA.getLocVT()). Convert Value to VA.getValVT(), chaining
1239// any loads onto Chain.
1240static SDValue convertLocVTToValVT(SelectionDAG &DAG, const SDLoc &DL,
1241 CCValAssign &VA, SDValue Chain,
1242 SDValue Value) {
1243 // If the argument has been promoted from a smaller type, insert an
1244 // assertion to capture this.
1245 if (VA.getLocInfo() == CCValAssign::SExt)
1246 Value = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Value,
1247 DAG.getValueType(VA.getValVT()));
1248 else if (VA.getLocInfo() == CCValAssign::ZExt)
1249 Value = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Value,
1250 DAG.getValueType(VA.getValVT()));
1251
1252 if (VA.isExtInLoc())
1253 Value = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Value);
1254 else if (VA.getLocInfo() == CCValAssign::BCvt) {
1255 // If this is a short vector argument loaded from the stack,
1256 // extend from i64 to full vector size and then bitcast.
1257 assert(VA.getLocVT() == MVT::i64)((VA.getLocVT() == MVT::i64) ? static_cast<void> (0) : __assert_fail
("VA.getLocVT() == MVT::i64", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1257, __PRETTY_FUNCTION__))
;
1258 assert(VA.getValVT().isVector())((VA.getValVT().isVector()) ? static_cast<void> (0) : __assert_fail
("VA.getValVT().isVector()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1258, __PRETTY_FUNCTION__))
;
1259 Value = DAG.getBuildVector(MVT::v2i64, DL, {Value, DAG.getUNDEF(MVT::i64)});
1260 Value = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Value);
1261 } else
1262 assert(VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo")((VA.getLocInfo() == CCValAssign::Full && "Unsupported getLocInfo"
) ? static_cast<void> (0) : __assert_fail ("VA.getLocInfo() == CCValAssign::Full && \"Unsupported getLocInfo\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1262, __PRETTY_FUNCTION__))
;
1263 return Value;
1264}
1265
1266// Value is a value of type VA.getValVT() that we need to copy into
1267// the location described by VA. Return a copy of Value converted to
1268// VA.getValVT(). The caller is responsible for handling indirect values.
1269static SDValue convertValVTToLocVT(SelectionDAG &DAG, const SDLoc &DL,
1270 CCValAssign &VA, SDValue Value) {
1271 switch (VA.getLocInfo()) {
1272 case CCValAssign::SExt:
1273 return DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Value);
1274 case CCValAssign::ZExt:
1275 return DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Value);
1276 case CCValAssign::AExt:
1277 return DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Value);
1278 case CCValAssign::BCvt:
1279 // If this is a short vector argument to be stored to the stack,
1280 // bitcast to v2i64 and then extract first element.
1281 assert(VA.getLocVT() == MVT::i64)((VA.getLocVT() == MVT::i64) ? static_cast<void> (0) : __assert_fail
("VA.getLocVT() == MVT::i64", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1281, __PRETTY_FUNCTION__))
;
1282 assert(VA.getValVT().isVector())((VA.getValVT().isVector()) ? static_cast<void> (0) : __assert_fail
("VA.getValVT().isVector()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1282, __PRETTY_FUNCTION__))
;
1283 Value = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Value);
1284 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VA.getLocVT(), Value,
1285 DAG.getConstant(0, DL, MVT::i32));
1286 case CCValAssign::Full:
1287 return Value;
1288 default:
1289 llvm_unreachable("Unhandled getLocInfo()")::llvm::llvm_unreachable_internal("Unhandled getLocInfo()", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1289)
;
1290 }
1291}
1292
1293SDValue SystemZTargetLowering::LowerFormalArguments(
1294 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1295 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1296 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1297 MachineFunction &MF = DAG.getMachineFunction();
1298 MachineFrameInfo &MFI = MF.getFrameInfo();
1299 MachineRegisterInfo &MRI = MF.getRegInfo();
1300 SystemZMachineFunctionInfo *FuncInfo =
1301 MF.getInfo<SystemZMachineFunctionInfo>();
1302 auto *TFL =
1303 static_cast<const SystemZFrameLowering *>(Subtarget.getFrameLowering());
1304 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1305
1306 // Detect unsupported vector argument types.
1307 if (Subtarget.hasVector())
1308 VerifyVectorTypes(Ins);
1309
1310 // Assign locations to all of the incoming arguments.
1311 SmallVector<CCValAssign, 16> ArgLocs;
1312 SystemZCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1313 CCInfo.AnalyzeFormalArguments(Ins, CC_SystemZ);
1314
1315 unsigned NumFixedGPRs = 0;
1316 unsigned NumFixedFPRs = 0;
1317 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1318 SDValue ArgValue;
1319 CCValAssign &VA = ArgLocs[I];
1320 EVT LocVT = VA.getLocVT();
1321 if (VA.isRegLoc()) {
1322 // Arguments passed in registers
1323 const TargetRegisterClass *RC;
1324 switch (LocVT.getSimpleVT().SimpleTy) {
1325 default:
1326 // Integers smaller than i64 should be promoted to i64.
1327 llvm_unreachable("Unexpected argument type")::llvm::llvm_unreachable_internal("Unexpected argument type",
"/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1327)
;
1328 case MVT::i32:
1329 NumFixedGPRs += 1;
1330 RC = &SystemZ::GR32BitRegClass;
1331 break;
1332 case MVT::i64:
1333 NumFixedGPRs += 1;
1334 RC = &SystemZ::GR64BitRegClass;
1335 break;
1336 case MVT::f32:
1337 NumFixedFPRs += 1;
1338 RC = &SystemZ::FP32BitRegClass;
1339 break;
1340 case MVT::f64:
1341 NumFixedFPRs += 1;
1342 RC = &SystemZ::FP64BitRegClass;
1343 break;
1344 case MVT::v16i8:
1345 case MVT::v8i16:
1346 case MVT::v4i32:
1347 case MVT::v2i64:
1348 case MVT::v4f32:
1349 case MVT::v2f64:
1350 RC = &SystemZ::VR128BitRegClass;
1351 break;
1352 }
1353
1354 Register VReg = MRI.createVirtualRegister(RC);
1355 MRI.addLiveIn(VA.getLocReg(), VReg);
1356 ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1357 } else {
1358 assert(VA.isMemLoc() && "Argument not register or memory")((VA.isMemLoc() && "Argument not register or memory")
? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1358, __PRETTY_FUNCTION__))
;
1359
1360 // Create the frame index object for this incoming parameter.
1361 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
1362 VA.getLocMemOffset(), true);
1363
1364 // Create the SelectionDAG nodes corresponding to a load
1365 // from this parameter. Unpromoted ints and floats are
1366 // passed as right-justified 8-byte values.
1367 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1368 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1369 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN,
1370 DAG.getIntPtrConstant(4, DL));
1371 ArgValue = DAG.getLoad(LocVT, DL, Chain, FIN,
1372 MachinePointerInfo::getFixedStack(MF, FI));
1373 }
1374
1375 // Convert the value of the argument register into the value that's
1376 // being passed.
1377 if (VA.getLocInfo() == CCValAssign::Indirect) {
1378 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
1379 MachinePointerInfo()));
1380 // If the original argument was split (e.g. i128), we need
1381 // to load all parts of it here (using the same address).
1382 unsigned ArgIndex = Ins[I].OrigArgIndex;
1383 assert (Ins[I].PartOffset == 0)((Ins[I].PartOffset == 0) ? static_cast<void> (0) : __assert_fail
("Ins[I].PartOffset == 0", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1383, __PRETTY_FUNCTION__))
;
1384 while (I + 1 != E && Ins[I + 1].OrigArgIndex == ArgIndex) {
1385 CCValAssign &PartVA = ArgLocs[I + 1];
1386 unsigned PartOffset = Ins[I + 1].PartOffset;
1387 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
1388 DAG.getIntPtrConstant(PartOffset, DL));
1389 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
1390 MachinePointerInfo()));
1391 ++I;
1392 }
1393 } else
1394 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, ArgValue));
1395 }
1396
1397 if (IsVarArg) {
1398 // Save the number of non-varargs registers for later use by va_start, etc.
1399 FuncInfo->setVarArgsFirstGPR(NumFixedGPRs);
1400 FuncInfo->setVarArgsFirstFPR(NumFixedFPRs);
1401
1402 // Likewise the address (in the form of a frame index) of where the
1403 // first stack vararg would be. The 1-byte size here is arbitrary.
1404 int64_t StackSize = CCInfo.getNextStackOffset();
1405 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
1406
1407 // ...and a similar frame index for the caller-allocated save area
1408 // that will be used to store the incoming registers.
1409 int64_t RegSaveOffset = TFL->getOffsetOfLocalArea();
1410 unsigned RegSaveIndex = MFI.CreateFixedObject(1, RegSaveOffset, true);
1411 FuncInfo->setRegSaveFrameIndex(RegSaveIndex);
1412
1413 // Store the FPR varargs in the reserved frame slots. (We store the
1414 // GPRs as part of the prologue.)
1415 if (NumFixedFPRs < SystemZ::NumArgFPRs) {
1416 SDValue MemOps[SystemZ::NumArgFPRs];
1417 for (unsigned I = NumFixedFPRs; I < SystemZ::NumArgFPRs; ++I) {
1418 unsigned Offset = TFL->getRegSpillOffset(SystemZ::ArgFPRs[I]);
1419 int FI = MFI.CreateFixedObject(8, RegSaveOffset + Offset, true);
1420 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
1421 unsigned VReg = MF.addLiveIn(SystemZ::ArgFPRs[I],
1422 &SystemZ::FP64BitRegClass);
1423 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f64);
1424 MemOps[I] = DAG.getStore(ArgValue.getValue(1), DL, ArgValue, FIN,
1425 MachinePointerInfo::getFixedStack(MF, FI));
1426 }
1427 // Join the stores, which are independent of one another.
1428 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
1429 makeArrayRef(&MemOps[NumFixedFPRs],
1430 SystemZ::NumArgFPRs-NumFixedFPRs));
1431 }
1432 }
1433
1434 return Chain;
1435}
1436
1437static bool canUseSiblingCall(const CCState &ArgCCInfo,
1438 SmallVectorImpl<CCValAssign> &ArgLocs,
1439 SmallVectorImpl<ISD::OutputArg> &Outs) {
1440 // Punt if there are any indirect or stack arguments, or if the call
1441 // needs the callee-saved argument register R6, or if the call uses
1442 // the callee-saved register arguments SwiftSelf and SwiftError.
1443 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
1444 CCValAssign &VA = ArgLocs[I];
1445 if (VA.getLocInfo() == CCValAssign::Indirect)
1446 return false;
1447 if (!VA.isRegLoc())
1448 return false;
1449 Register Reg = VA.getLocReg();
1450 if (Reg == SystemZ::R6H || Reg == SystemZ::R6L || Reg == SystemZ::R6D)
1451 return false;
1452 if (Outs[I].Flags.isSwiftSelf() || Outs[I].Flags.isSwiftError())
1453 return false;
1454 }
1455 return true;
1456}
1457
1458SDValue
1459SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
1460 SmallVectorImpl<SDValue> &InVals) const {
1461 SelectionDAG &DAG = CLI.DAG;
1462 SDLoc &DL = CLI.DL;
1463 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1464 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1465 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1466 SDValue Chain = CLI.Chain;
1467 SDValue Callee = CLI.Callee;
1468 bool &IsTailCall = CLI.IsTailCall;
1469 CallingConv::ID CallConv = CLI.CallConv;
1470 bool IsVarArg = CLI.IsVarArg;
1471 MachineFunction &MF = DAG.getMachineFunction();
1472 EVT PtrVT = getPointerTy(MF.getDataLayout());
1473
1474 // Detect unsupported vector argument and return types.
1475 if (Subtarget.hasVector()) {
1
Assuming the condition is false
2
Taking false branch
1476 VerifyVectorTypes(Outs);
1477 VerifyVectorTypes(Ins);
1478 }
1479
1480 // Analyze the operands of the call, assigning locations to each operand.
1481 SmallVector<CCValAssign, 16> ArgLocs;
1482 SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
1483 ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
1484
1485 // We don't support GuaranteedTailCallOpt, only automatically-detected
1486 // sibling calls.
1487 if (IsTailCall && !canUseSiblingCall(ArgCCInfo, ArgLocs, Outs))
3
Assuming 'IsTailCall' is true
4
Taking false branch
1488 IsTailCall = false;
1489
1490 // Get a count of how many bytes are to be pushed on the stack.
1491 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
1492
1493 // Mark the start of the call.
1494 if (!IsTailCall
4.1
'IsTailCall' is true
4.1
'IsTailCall' is true
4.1
'IsTailCall' is true
)
5
Taking false branch
1495 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
1496
1497 // Copy argument values to their designated locations.
1498 SmallVector<std::pair<unsigned, SDValue>, 9> RegsToPass;
1499 SmallVector<SDValue, 8> MemOpChains;
1500 SDValue StackPtr;
1501 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
6
Loop condition is false. Execution continues on line 1553
1502 CCValAssign &VA = ArgLocs[I];
1503 SDValue ArgValue = OutVals[I];
1504
1505 if (VA.getLocInfo() == CCValAssign::Indirect) {
1506 // Store the argument in a stack slot and pass its address.
1507 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
1508 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
1509 MemOpChains.push_back(
1510 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
1511 MachinePointerInfo::getFixedStack(MF, FI)));
1512 // If the original argument was split (e.g. i128), we need
1513 // to store all parts of it here (and pass just one address).
1514 unsigned ArgIndex = Outs[I].OrigArgIndex;
1515 assert (Outs[I].PartOffset == 0)((Outs[I].PartOffset == 0) ? static_cast<void> (0) : __assert_fail
("Outs[I].PartOffset == 0", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1515, __PRETTY_FUNCTION__))
;
1516 while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
1517 SDValue PartValue = OutVals[I + 1];
1518 unsigned PartOffset = Outs[I + 1].PartOffset;
1519 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
1520 DAG.getIntPtrConstant(PartOffset, DL));
1521 MemOpChains.push_back(
1522 DAG.getStore(Chain, DL, PartValue, Address,
1523 MachinePointerInfo::getFixedStack(MF, FI)));
1524 ++I;
1525 }
1526 ArgValue = SpillSlot;
1527 } else
1528 ArgValue = convertValVTToLocVT(DAG, DL, VA, ArgValue);
1529
1530 if (VA.isRegLoc())
1531 // Queue up the argument copies and emit them at the end.
1532 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
1533 else {
1534 assert(VA.isMemLoc() && "Argument not register or memory")((VA.isMemLoc() && "Argument not register or memory")
? static_cast<void> (0) : __assert_fail ("VA.isMemLoc() && \"Argument not register or memory\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1534, __PRETTY_FUNCTION__))
;
1535
1536 // Work out the address of the stack slot. Unpromoted ints and
1537 // floats are passed as right-justified 8-byte values.
1538 if (!StackPtr.getNode())
1539 StackPtr = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, PtrVT);
1540 unsigned Offset = SystemZMC::CallFrameSize + VA.getLocMemOffset();
1541 if (VA.getLocVT() == MVT::i32 || VA.getLocVT() == MVT::f32)
1542 Offset += 4;
1543 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
1544 DAG.getIntPtrConstant(Offset, DL));
1545
1546 // Emit the store.
1547 MemOpChains.push_back(
1548 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
1549 }
1550 }
1551
1552 // Join the stores, which are independent of one another.
1553 if (!MemOpChains.empty())
7
Taking false branch
1554 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
1555
1556 // Accept direct calls by converting symbolic call addresses to the
1557 // associated Target* opcodes. Force %r1 to be used for indirect
1558 // tail calls.
1559 SDValue Glue;
1560 if (auto *G
7.1
'G' is null
7.1
'G' is null
7.1
'G' is null
= dyn_cast<GlobalAddressSDNode>(Callee)) {
8
Taking false branch
1561 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT);
1562 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1563 } else if (auto *E = dyn_cast<ExternalSymbolSDNode>(Callee)) {
9
Assuming 'E' is null
10
Taking false branch
1564 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT);
1565 Callee = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Callee);
1566 } else if (IsTailCall
10.1
'IsTailCall' is true
10.1
'IsTailCall' is true
10.1
'IsTailCall' is true
) {
11
Taking true branch
1567 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R1D, Callee, Glue);
12
Value assigned to 'N.Node'
13
Calling 'SelectionDAG::getCopyToReg'
1568 Glue = Chain.getValue(1);
1569 Callee = DAG.getRegister(SystemZ::R1D, Callee.getValueType());
1570 }
1571
1572 // Build a sequence of copy-to-reg nodes, chained and glued together.
1573 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I) {
1574 Chain = DAG.getCopyToReg(Chain, DL, RegsToPass[I].first,
1575 RegsToPass[I].second, Glue);
1576 Glue = Chain.getValue(1);
1577 }
1578
1579 // The first call operand is the chain and the second is the target address.
1580 SmallVector<SDValue, 8> Ops;
1581 Ops.push_back(Chain);
1582 Ops.push_back(Callee);
1583
1584 // Add argument registers to the end of the list so that they are
1585 // known live into the call.
1586 for (unsigned I = 0, E = RegsToPass.size(); I != E; ++I)
1587 Ops.push_back(DAG.getRegister(RegsToPass[I].first,
1588 RegsToPass[I].second.getValueType()));
1589
1590 // Add a register mask operand representing the call-preserved registers.
1591 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
1592 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
1593 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1593, __PRETTY_FUNCTION__))
;
1594 Ops.push_back(DAG.getRegisterMask(Mask));
1595
1596 // Glue the call to the argument copies, if any.
1597 if (Glue.getNode())
1598 Ops.push_back(Glue);
1599
1600 // Emit the call.
1601 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1602 if (IsTailCall)
1603 return DAG.getNode(SystemZISD::SIBCALL, DL, NodeTys, Ops);
1604 Chain = DAG.getNode(SystemZISD::CALL, DL, NodeTys, Ops);
1605 Glue = Chain.getValue(1);
1606
1607 // Mark the end of the call, which is glued to the call itself.
1608 Chain = DAG.getCALLSEQ_END(Chain,
1609 DAG.getConstant(NumBytes, DL, PtrVT, true),
1610 DAG.getConstant(0, DL, PtrVT, true),
1611 Glue, DL);
1612 Glue = Chain.getValue(1);
1613
1614 // Assign locations to each value returned by this call.
1615 SmallVector<CCValAssign, 16> RetLocs;
1616 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1617 RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
1618
1619 // Copy all of the result registers out of their specified physreg.
1620 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1621 CCValAssign &VA = RetLocs[I];
1622
1623 // Copy the value out, gluing the copy to the end of the call sequence.
1624 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
1625 VA.getLocVT(), Glue);
1626 Chain = RetValue.getValue(1);
1627 Glue = RetValue.getValue(2);
1628
1629 // Convert the value of the return register into the value that's
1630 // being returned.
1631 InVals.push_back(convertLocVTToValVT(DAG, DL, VA, Chain, RetValue));
1632 }
1633
1634 return Chain;
1635}
1636
1637bool SystemZTargetLowering::
1638CanLowerReturn(CallingConv::ID CallConv,
1639 MachineFunction &MF, bool isVarArg,
1640 const SmallVectorImpl<ISD::OutputArg> &Outs,
1641 LLVMContext &Context) const {
1642 // Detect unsupported vector return types.
1643 if (Subtarget.hasVector())
1644 VerifyVectorTypes(Outs);
1645
1646 // Special case that we cannot easily detect in RetCC_SystemZ since
1647 // i128 is not a legal type.
1648 for (auto &Out : Outs)
1649 if (Out.ArgVT == MVT::i128)
1650 return false;
1651
1652 SmallVector<CCValAssign, 16> RetLocs;
1653 CCState RetCCInfo(CallConv, isVarArg, MF, RetLocs, Context);
1654 return RetCCInfo.CheckReturn(Outs, RetCC_SystemZ);
1655}
1656
1657SDValue
1658SystemZTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
1659 bool IsVarArg,
1660 const SmallVectorImpl<ISD::OutputArg> &Outs,
1661 const SmallVectorImpl<SDValue> &OutVals,
1662 const SDLoc &DL, SelectionDAG &DAG) const {
1663 MachineFunction &MF = DAG.getMachineFunction();
1664
1665 // Detect unsupported vector return types.
1666 if (Subtarget.hasVector())
1667 VerifyVectorTypes(Outs);
1668
1669 // Assign locations to each returned value.
1670 SmallVector<CCValAssign, 16> RetLocs;
1671 CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
1672 RetCCInfo.AnalyzeReturn(Outs, RetCC_SystemZ);
1673
1674 // Quick exit for void returns
1675 if (RetLocs.empty())
1676 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, Chain);
1677
1678 if (CallConv == CallingConv::GHC)
1679 report_fatal_error("GHC functions return void only");
1680
1681 // Copy the result values into the output registers.
1682 SDValue Glue;
1683 SmallVector<SDValue, 4> RetOps;
1684 RetOps.push_back(Chain);
1685 for (unsigned I = 0, E = RetLocs.size(); I != E; ++I) {
1686 CCValAssign &VA = RetLocs[I];
1687 SDValue RetValue = OutVals[I];
1688
1689 // Make the return register live on exit.
1690 assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1690, __PRETTY_FUNCTION__))
;
1691
1692 // Promote the value as required.
1693 RetValue = convertValVTToLocVT(DAG, DL, VA, RetValue);
1694
1695 // Chain and glue the copies together.
1696 Register Reg = VA.getLocReg();
1697 Chain = DAG.getCopyToReg(Chain, DL, Reg, RetValue, Glue);
1698 Glue = Chain.getValue(1);
1699 RetOps.push_back(DAG.getRegister(Reg, VA.getLocVT()));
1700 }
1701
1702 // Update chain and glue.
1703 RetOps[0] = Chain;
1704 if (Glue.getNode())
1705 RetOps.push_back(Glue);
1706
1707 return DAG.getNode(SystemZISD::RET_FLAG, DL, MVT::Other, RetOps);
1708}
1709
1710// Return true if Op is an intrinsic node with chain that returns the CC value
1711// as its only (other) argument. Provide the associated SystemZISD opcode and
1712// the mask of valid CC values if so.
1713static bool isIntrinsicWithCCAndChain(SDValue Op, unsigned &Opcode,
1714 unsigned &CCValid) {
1715 unsigned Id = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1716 switch (Id) {
1717 case Intrinsic::s390_tbegin:
1718 Opcode = SystemZISD::TBEGIN;
1719 CCValid = SystemZ::CCMASK_TBEGIN;
1720 return true;
1721
1722 case Intrinsic::s390_tbegin_nofloat:
1723 Opcode = SystemZISD::TBEGIN_NOFLOAT;
1724 CCValid = SystemZ::CCMASK_TBEGIN;
1725 return true;
1726
1727 case Intrinsic::s390_tend:
1728 Opcode = SystemZISD::TEND;
1729 CCValid = SystemZ::CCMASK_TEND;
1730 return true;
1731
1732 default:
1733 return false;
1734 }
1735}
1736
1737// Return true if Op is an intrinsic node without chain that returns the
1738// CC value as its final argument. Provide the associated SystemZISD
1739// opcode and the mask of valid CC values if so.
1740static bool isIntrinsicWithCC(SDValue Op, unsigned &Opcode, unsigned &CCValid) {
1741 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1742 switch (Id) {
1743 case Intrinsic::s390_vpkshs:
1744 case Intrinsic::s390_vpksfs:
1745 case Intrinsic::s390_vpksgs:
1746 Opcode = SystemZISD::PACKS_CC;
1747 CCValid = SystemZ::CCMASK_VCMP;
1748 return true;
1749
1750 case Intrinsic::s390_vpklshs:
1751 case Intrinsic::s390_vpklsfs:
1752 case Intrinsic::s390_vpklsgs:
1753 Opcode = SystemZISD::PACKLS_CC;
1754 CCValid = SystemZ::CCMASK_VCMP;
1755 return true;
1756
1757 case Intrinsic::s390_vceqbs:
1758 case Intrinsic::s390_vceqhs:
1759 case Intrinsic::s390_vceqfs:
1760 case Intrinsic::s390_vceqgs:
1761 Opcode = SystemZISD::VICMPES;
1762 CCValid = SystemZ::CCMASK_VCMP;
1763 return true;
1764
1765 case Intrinsic::s390_vchbs:
1766 case Intrinsic::s390_vchhs:
1767 case Intrinsic::s390_vchfs:
1768 case Intrinsic::s390_vchgs:
1769 Opcode = SystemZISD::VICMPHS;
1770 CCValid = SystemZ::CCMASK_VCMP;
1771 return true;
1772
1773 case Intrinsic::s390_vchlbs:
1774 case Intrinsic::s390_vchlhs:
1775 case Intrinsic::s390_vchlfs:
1776 case Intrinsic::s390_vchlgs:
1777 Opcode = SystemZISD::VICMPHLS;
1778 CCValid = SystemZ::CCMASK_VCMP;
1779 return true;
1780
1781 case Intrinsic::s390_vtm:
1782 Opcode = SystemZISD::VTM;
1783 CCValid = SystemZ::CCMASK_VCMP;
1784 return true;
1785
1786 case Intrinsic::s390_vfaebs:
1787 case Intrinsic::s390_vfaehs:
1788 case Intrinsic::s390_vfaefs:
1789 Opcode = SystemZISD::VFAE_CC;
1790 CCValid = SystemZ::CCMASK_ANY;
1791 return true;
1792
1793 case Intrinsic::s390_vfaezbs:
1794 case Intrinsic::s390_vfaezhs:
1795 case Intrinsic::s390_vfaezfs:
1796 Opcode = SystemZISD::VFAEZ_CC;
1797 CCValid = SystemZ::CCMASK_ANY;
1798 return true;
1799
1800 case Intrinsic::s390_vfeebs:
1801 case Intrinsic::s390_vfeehs:
1802 case Intrinsic::s390_vfeefs:
1803 Opcode = SystemZISD::VFEE_CC;
1804 CCValid = SystemZ::CCMASK_ANY;
1805 return true;
1806
1807 case Intrinsic::s390_vfeezbs:
1808 case Intrinsic::s390_vfeezhs:
1809 case Intrinsic::s390_vfeezfs:
1810 Opcode = SystemZISD::VFEEZ_CC;
1811 CCValid = SystemZ::CCMASK_ANY;
1812 return true;
1813
1814 case Intrinsic::s390_vfenebs:
1815 case Intrinsic::s390_vfenehs:
1816 case Intrinsic::s390_vfenefs:
1817 Opcode = SystemZISD::VFENE_CC;
1818 CCValid = SystemZ::CCMASK_ANY;
1819 return true;
1820
1821 case Intrinsic::s390_vfenezbs:
1822 case Intrinsic::s390_vfenezhs:
1823 case Intrinsic::s390_vfenezfs:
1824 Opcode = SystemZISD::VFENEZ_CC;
1825 CCValid = SystemZ::CCMASK_ANY;
1826 return true;
1827
1828 case Intrinsic::s390_vistrbs:
1829 case Intrinsic::s390_vistrhs:
1830 case Intrinsic::s390_vistrfs:
1831 Opcode = SystemZISD::VISTR_CC;
1832 CCValid = SystemZ::CCMASK_0 | SystemZ::CCMASK_3;
1833 return true;
1834
1835 case Intrinsic::s390_vstrcbs:
1836 case Intrinsic::s390_vstrchs:
1837 case Intrinsic::s390_vstrcfs:
1838 Opcode = SystemZISD::VSTRC_CC;
1839 CCValid = SystemZ::CCMASK_ANY;
1840 return true;
1841
1842 case Intrinsic::s390_vstrczbs:
1843 case Intrinsic::s390_vstrczhs:
1844 case Intrinsic::s390_vstrczfs:
1845 Opcode = SystemZISD::VSTRCZ_CC;
1846 CCValid = SystemZ::CCMASK_ANY;
1847 return true;
1848
1849 case Intrinsic::s390_vstrsb:
1850 case Intrinsic::s390_vstrsh:
1851 case Intrinsic::s390_vstrsf:
1852 Opcode = SystemZISD::VSTRS_CC;
1853 CCValid = SystemZ::CCMASK_ANY;
1854 return true;
1855
1856 case Intrinsic::s390_vstrszb:
1857 case Intrinsic::s390_vstrszh:
1858 case Intrinsic::s390_vstrszf:
1859 Opcode = SystemZISD::VSTRSZ_CC;
1860 CCValid = SystemZ::CCMASK_ANY;
1861 return true;
1862
1863 case Intrinsic::s390_vfcedbs:
1864 case Intrinsic::s390_vfcesbs:
1865 Opcode = SystemZISD::VFCMPES;
1866 CCValid = SystemZ::CCMASK_VCMP;
1867 return true;
1868
1869 case Intrinsic::s390_vfchdbs:
1870 case Intrinsic::s390_vfchsbs:
1871 Opcode = SystemZISD::VFCMPHS;
1872 CCValid = SystemZ::CCMASK_VCMP;
1873 return true;
1874
1875 case Intrinsic::s390_vfchedbs:
1876 case Intrinsic::s390_vfchesbs:
1877 Opcode = SystemZISD::VFCMPHES;
1878 CCValid = SystemZ::CCMASK_VCMP;
1879 return true;
1880
1881 case Intrinsic::s390_vftcidb:
1882 case Intrinsic::s390_vftcisb:
1883 Opcode = SystemZISD::VFTCI;
1884 CCValid = SystemZ::CCMASK_VCMP;
1885 return true;
1886
1887 case Intrinsic::s390_tdc:
1888 Opcode = SystemZISD::TDC;
1889 CCValid = SystemZ::CCMASK_TDC;
1890 return true;
1891
1892 default:
1893 return false;
1894 }
1895}
1896
1897// Emit an intrinsic with chain and an explicit CC register result.
1898static SDNode *emitIntrinsicWithCCAndChain(SelectionDAG &DAG, SDValue Op,
1899 unsigned Opcode) {
1900 // Copy all operands except the intrinsic ID.
1901 unsigned NumOps = Op.getNumOperands();
1902 SmallVector<SDValue, 6> Ops;
1903 Ops.reserve(NumOps - 1);
1904 Ops.push_back(Op.getOperand(0));
1905 for (unsigned I = 2; I < NumOps; ++I)
1906 Ops.push_back(Op.getOperand(I));
1907
1908 assert(Op->getNumValues() == 2 && "Expected only CC result and chain")((Op->getNumValues() == 2 && "Expected only CC result and chain"
) ? static_cast<void> (0) : __assert_fail ("Op->getNumValues() == 2 && \"Expected only CC result and chain\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1908, __PRETTY_FUNCTION__))
;
1909 SDVTList RawVTs = DAG.getVTList(MVT::i32, MVT::Other);
1910 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), RawVTs, Ops);
1911 SDValue OldChain = SDValue(Op.getNode(), 1);
1912 SDValue NewChain = SDValue(Intr.getNode(), 1);
1913 DAG.ReplaceAllUsesOfValueWith(OldChain, NewChain);
1914 return Intr.getNode();
1915}
1916
1917// Emit an intrinsic with an explicit CC register result.
1918static SDNode *emitIntrinsicWithCC(SelectionDAG &DAG, SDValue Op,
1919 unsigned Opcode) {
1920 // Copy all operands except the intrinsic ID.
1921 unsigned NumOps = Op.getNumOperands();
1922 SmallVector<SDValue, 6> Ops;
1923 Ops.reserve(NumOps - 1);
1924 for (unsigned I = 1; I < NumOps; ++I)
1925 Ops.push_back(Op.getOperand(I));
1926
1927 SDValue Intr = DAG.getNode(Opcode, SDLoc(Op), Op->getVTList(), Ops);
1928 return Intr.getNode();
1929}
1930
1931// CC is a comparison that will be implemented using an integer or
1932// floating-point comparison. Return the condition code mask for
1933// a branch on true. In the integer case, CCMASK_CMP_UO is set for
1934// unsigned comparisons and clear for signed ones. In the floating-point
1935// case, CCMASK_CMP_UO has its normal mask meaning (unordered).
1936static unsigned CCMaskForCondCode(ISD::CondCode CC) {
1937#define CONV(X) \
1938 case ISD::SET##X: return SystemZ::CCMASK_CMP_##X; \
1939 case ISD::SETO##X: return SystemZ::CCMASK_CMP_##X; \
1940 case ISD::SETU##X: return SystemZ::CCMASK_CMP_UO | SystemZ::CCMASK_CMP_##X
1941
1942 switch (CC) {
1943 default:
1944 llvm_unreachable("Invalid integer condition!")::llvm::llvm_unreachable_internal("Invalid integer condition!"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 1944)
;
1945
1946 CONV(EQ);
1947 CONV(NE);
1948 CONV(GT);
1949 CONV(GE);
1950 CONV(LT);
1951 CONV(LE);
1952
1953 case ISD::SETO: return SystemZ::CCMASK_CMP_O;
1954 case ISD::SETUO: return SystemZ::CCMASK_CMP_UO;
1955 }
1956#undef CONV
1957}
1958
1959// If C can be converted to a comparison against zero, adjust the operands
1960// as necessary.
1961static void adjustZeroCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
1962 if (C.ICmpType == SystemZICMP::UnsignedOnly)
1963 return;
1964
1965 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1.getNode());
1966 if (!ConstOp1)
1967 return;
1968
1969 int64_t Value = ConstOp1->getSExtValue();
1970 if ((Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_GT) ||
1971 (Value == -1 && C.CCMask == SystemZ::CCMASK_CMP_LE) ||
1972 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_LT) ||
1973 (Value == 1 && C.CCMask == SystemZ::CCMASK_CMP_GE)) {
1974 C.CCMask ^= SystemZ::CCMASK_CMP_EQ;
1975 C.Op1 = DAG.getConstant(0, DL, C.Op1.getValueType());
1976 }
1977}
1978
1979// If a comparison described by C is suitable for CLI(Y), CHHSI or CLHHSI,
1980// adjust the operands as necessary.
1981static void adjustSubwordCmp(SelectionDAG &DAG, const SDLoc &DL,
1982 Comparison &C) {
1983 // For us to make any changes, it must a comparison between a single-use
1984 // load and a constant.
1985 if (!C.Op0.hasOneUse() ||
1986 C.Op0.getOpcode() != ISD::LOAD ||
1987 C.Op1.getOpcode() != ISD::Constant)
1988 return;
1989
1990 // We must have an 8- or 16-bit load.
1991 auto *Load = cast<LoadSDNode>(C.Op0);
1992 unsigned NumBits = Load->getMemoryVT().getStoreSizeInBits();
1993 if (NumBits != 8 && NumBits != 16)
1994 return;
1995
1996 // The load must be an extending one and the constant must be within the
1997 // range of the unextended value.
1998 auto *ConstOp1 = cast<ConstantSDNode>(C.Op1);
1999 uint64_t Value = ConstOp1->getZExtValue();
2000 uint64_t Mask = (1 << NumBits) - 1;
2001 if (Load->getExtensionType() == ISD::SEXTLOAD) {
2002 // Make sure that ConstOp1 is in range of C.Op0.
2003 int64_t SignedValue = ConstOp1->getSExtValue();
2004 if (uint64_t(SignedValue) + (uint64_t(1) << (NumBits - 1)) > Mask)
2005 return;
2006 if (C.ICmpType != SystemZICMP::SignedOnly) {
2007 // Unsigned comparison between two sign-extended values is equivalent
2008 // to unsigned comparison between two zero-extended values.
2009 Value &= Mask;
2010 } else if (NumBits == 8) {
2011 // Try to treat the comparison as unsigned, so that we can use CLI.
2012 // Adjust CCMask and Value as necessary.
2013 if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_LT)
2014 // Test whether the high bit of the byte is set.
2015 Value = 127, C.CCMask = SystemZ::CCMASK_CMP_GT;
2016 else if (Value == 0 && C.CCMask == SystemZ::CCMASK_CMP_GE)
2017 // Test whether the high bit of the byte is clear.
2018 Value = 128, C.CCMask = SystemZ::CCMASK_CMP_LT;
2019 else
2020 // No instruction exists for this combination.
2021 return;
2022 C.ICmpType = SystemZICMP::UnsignedOnly;
2023 }
2024 } else if (Load->getExtensionType() == ISD::ZEXTLOAD) {
2025 if (Value > Mask)
2026 return;
2027 // If the constant is in range, we can use any comparison.
2028 C.ICmpType = SystemZICMP::Any;
2029 } else
2030 return;
2031
2032 // Make sure that the first operand is an i32 of the right extension type.
2033 ISD::LoadExtType ExtType = (C.ICmpType == SystemZICMP::SignedOnly ?
2034 ISD::SEXTLOAD :
2035 ISD::ZEXTLOAD);
2036 if (C.Op0.getValueType() != MVT::i32 ||
2037 Load->getExtensionType() != ExtType) {
2038 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(),
2039 Load->getBasePtr(), Load->getPointerInfo(),
2040 Load->getMemoryVT(), Load->getAlignment(),
2041 Load->getMemOperand()->getFlags());
2042 // Update the chain uses.
2043 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1));
2044 }
2045
2046 // Make sure that the second operand is an i32 with the right value.
2047 if (C.Op1.getValueType() != MVT::i32 ||
2048 Value != ConstOp1->getZExtValue())
2049 C.Op1 = DAG.getConstant(Value, DL, MVT::i32);
2050}
2051
2052// Return true if Op is either an unextended load, or a load suitable
2053// for integer register-memory comparisons of type ICmpType.
2054static bool isNaturalMemoryOperand(SDValue Op, unsigned ICmpType) {
2055 auto *Load = dyn_cast<LoadSDNode>(Op.getNode());
2056 if (Load) {
2057 // There are no instructions to compare a register with a memory byte.
2058 if (Load->getMemoryVT() == MVT::i8)
2059 return false;
2060 // Otherwise decide on extension type.
2061 switch (Load->getExtensionType()) {
2062 case ISD::NON_EXTLOAD:
2063 return true;
2064 case ISD::SEXTLOAD:
2065 return ICmpType != SystemZICMP::UnsignedOnly;
2066 case ISD::ZEXTLOAD:
2067 return ICmpType != SystemZICMP::SignedOnly;
2068 default:
2069 break;
2070 }
2071 }
2072 return false;
2073}
2074
2075// Return true if it is better to swap the operands of C.
2076static bool shouldSwapCmpOperands(const Comparison &C) {
2077 // Leave f128 comparisons alone, since they have no memory forms.
2078 if (C.Op0.getValueType() == MVT::f128)
2079 return false;
2080
2081 // Always keep a floating-point constant second, since comparisons with
2082 // zero can use LOAD TEST and comparisons with other constants make a
2083 // natural memory operand.
2084 if (isa<ConstantFPSDNode>(C.Op1))
2085 return false;
2086
2087 // Never swap comparisons with zero since there are many ways to optimize
2088 // those later.
2089 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2090 if (ConstOp1 && ConstOp1->getZExtValue() == 0)
2091 return false;
2092
2093 // Also keep natural memory operands second if the loaded value is
2094 // only used here. Several comparisons have memory forms.
2095 if (isNaturalMemoryOperand(C.Op1, C.ICmpType) && C.Op1.hasOneUse())
2096 return false;
2097
2098 // Look for cases where Cmp0 is a single-use load and Cmp1 isn't.
2099 // In that case we generally prefer the memory to be second.
2100 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) {
2101 // The only exceptions are when the second operand is a constant and
2102 // we can use things like CHHSI.
2103 if (!ConstOp1)
2104 return true;
2105 // The unsigned memory-immediate instructions can handle 16-bit
2106 // unsigned integers.
2107 if (C.ICmpType != SystemZICMP::SignedOnly &&
2108 isUInt<16>(ConstOp1->getZExtValue()))
2109 return false;
2110 // The signed memory-immediate instructions can handle 16-bit
2111 // signed integers.
2112 if (C.ICmpType != SystemZICMP::UnsignedOnly &&
2113 isInt<16>(ConstOp1->getSExtValue()))
2114 return false;
2115 return true;
2116 }
2117
2118 // Try to promote the use of CGFR and CLGFR.
2119 unsigned Opcode0 = C.Op0.getOpcode();
2120 if (C.ICmpType != SystemZICMP::UnsignedOnly && Opcode0 == ISD::SIGN_EXTEND)
2121 return true;
2122 if (C.ICmpType != SystemZICMP::SignedOnly && Opcode0 == ISD::ZERO_EXTEND)
2123 return true;
2124 if (C.ICmpType != SystemZICMP::SignedOnly &&
2125 Opcode0 == ISD::AND &&
2126 C.Op0.getOperand(1).getOpcode() == ISD::Constant &&
2127 cast<ConstantSDNode>(C.Op0.getOperand(1))->getZExtValue() == 0xffffffff)
2128 return true;
2129
2130 return false;
2131}
2132
2133// Return a version of comparison CC mask CCMask in which the LT and GT
2134// actions are swapped.
2135static unsigned reverseCCMask(unsigned CCMask) {
2136 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
2137 (CCMask & SystemZ::CCMASK_CMP_GT ? SystemZ::CCMASK_CMP_LT : 0) |
2138 (CCMask & SystemZ::CCMASK_CMP_LT ? SystemZ::CCMASK_CMP_GT : 0) |
2139 (CCMask & SystemZ::CCMASK_CMP_UO));
2140}
2141
2142// Check whether C tests for equality between X and Y and whether X - Y
2143// or Y - X is also computed. In that case it's better to compare the
2144// result of the subtraction against zero.
2145static void adjustForSubtraction(SelectionDAG &DAG, const SDLoc &DL,
2146 Comparison &C) {
2147 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2148 C.CCMask == SystemZ::CCMASK_CMP_NE) {
2149 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
2150 SDNode *N = *I;
2151 if (N->getOpcode() == ISD::SUB &&
2152 ((N->getOperand(0) == C.Op0 && N->getOperand(1) == C.Op1) ||
2153 (N->getOperand(0) == C.Op1 && N->getOperand(1) == C.Op0))) {
2154 C.Op0 = SDValue(N, 0);
2155 C.Op1 = DAG.getConstant(0, DL, N->getValueType(0));
2156 return;
2157 }
2158 }
2159 }
2160}
2161
2162// Check whether C compares a floating-point value with zero and if that
2163// floating-point value is also negated. In this case we can use the
2164// negation to set CC, so avoiding separate LOAD AND TEST and
2165// LOAD (NEGATIVE/COMPLEMENT) instructions.
2166static void adjustForFNeg(Comparison &C) {
2167 auto *C1 = dyn_cast<ConstantFPSDNode>(C.Op1);
2168 if (C1 && C1->isZero()) {
2169 for (auto I = C.Op0->use_begin(), E = C.Op0->use_end(); I != E; ++I) {
2170 SDNode *N = *I;
2171 if (N->getOpcode() == ISD::FNEG) {
2172 C.Op0 = SDValue(N, 0);
2173 C.CCMask = reverseCCMask(C.CCMask);
2174 return;
2175 }
2176 }
2177 }
2178}
2179
2180// Check whether C compares (shl X, 32) with 0 and whether X is
2181// also sign-extended. In that case it is better to test the result
2182// of the sign extension using LTGFR.
2183//
2184// This case is important because InstCombine transforms a comparison
2185// with (sext (trunc X)) into a comparison with (shl X, 32).
2186static void adjustForLTGFR(Comparison &C) {
2187 // Check for a comparison between (shl X, 32) and 0.
2188 if (C.Op0.getOpcode() == ISD::SHL &&
2189 C.Op0.getValueType() == MVT::i64 &&
2190 C.Op1.getOpcode() == ISD::Constant &&
2191 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2192 auto *C1 = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
2193 if (C1 && C1->getZExtValue() == 32) {
2194 SDValue ShlOp0 = C.Op0.getOperand(0);
2195 // See whether X has any SIGN_EXTEND_INREG uses.
2196 for (auto I = ShlOp0->use_begin(), E = ShlOp0->use_end(); I != E; ++I) {
2197 SDNode *N = *I;
2198 if (N->getOpcode() == ISD::SIGN_EXTEND_INREG &&
2199 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32) {
2200 C.Op0 = SDValue(N, 0);
2201 return;
2202 }
2203 }
2204 }
2205 }
2206}
2207
2208// If C compares the truncation of an extending load, try to compare
2209// the untruncated value instead. This exposes more opportunities to
2210// reuse CC.
2211static void adjustICmpTruncate(SelectionDAG &DAG, const SDLoc &DL,
2212 Comparison &C) {
2213 if (C.Op0.getOpcode() == ISD::TRUNCATE &&
2214 C.Op0.getOperand(0).getOpcode() == ISD::LOAD &&
2215 C.Op1.getOpcode() == ISD::Constant &&
2216 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2217 auto *L = cast<LoadSDNode>(C.Op0.getOperand(0));
2218 if (L->getMemoryVT().getStoreSizeInBits() <= C.Op0.getValueSizeInBits()) {
2219 unsigned Type = L->getExtensionType();
2220 if ((Type == ISD::ZEXTLOAD && C.ICmpType != SystemZICMP::SignedOnly) ||
2221 (Type == ISD::SEXTLOAD && C.ICmpType != SystemZICMP::UnsignedOnly)) {
2222 C.Op0 = C.Op0.getOperand(0);
2223 C.Op1 = DAG.getConstant(0, DL, C.Op0.getValueType());
2224 }
2225 }
2226 }
2227}
2228
2229// Return true if shift operation N has an in-range constant shift value.
2230// Store it in ShiftVal if so.
2231static bool isSimpleShift(SDValue N, unsigned &ShiftVal) {
2232 auto *Shift = dyn_cast<ConstantSDNode>(N.getOperand(1));
2233 if (!Shift)
2234 return false;
2235
2236 uint64_t Amount = Shift->getZExtValue();
2237 if (Amount >= N.getValueSizeInBits())
2238 return false;
2239
2240 ShiftVal = Amount;
2241 return true;
2242}
2243
2244// Check whether an AND with Mask is suitable for a TEST UNDER MASK
2245// instruction and whether the CC value is descriptive enough to handle
2246// a comparison of type Opcode between the AND result and CmpVal.
2247// CCMask says which comparison result is being tested and BitSize is
2248// the number of bits in the operands. If TEST UNDER MASK can be used,
2249// return the corresponding CC mask, otherwise return 0.
2250static unsigned getTestUnderMaskCond(unsigned BitSize, unsigned CCMask,
2251 uint64_t Mask, uint64_t CmpVal,
2252 unsigned ICmpType) {
2253 assert(Mask != 0 && "ANDs with zero should have been removed by now")((Mask != 0 && "ANDs with zero should have been removed by now"
) ? static_cast<void> (0) : __assert_fail ("Mask != 0 && \"ANDs with zero should have been removed by now\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2253, __PRETTY_FUNCTION__))
;
2254
2255 // Check whether the mask is suitable for TMHH, TMHL, TMLH or TMLL.
2256 if (!SystemZ::isImmLL(Mask) && !SystemZ::isImmLH(Mask) &&
2257 !SystemZ::isImmHL(Mask) && !SystemZ::isImmHH(Mask))
2258 return 0;
2259
2260 // Work out the masks for the lowest and highest bits.
2261 unsigned HighShift = 63 - countLeadingZeros(Mask);
2262 uint64_t High = uint64_t(1) << HighShift;
2263 uint64_t Low = uint64_t(1) << countTrailingZeros(Mask);
2264
2265 // Signed ordered comparisons are effectively unsigned if the sign
2266 // bit is dropped.
2267 bool EffectivelyUnsigned = (ICmpType != SystemZICMP::SignedOnly);
2268
2269 // Check for equality comparisons with 0, or the equivalent.
2270 if (CmpVal == 0) {
2271 if (CCMask == SystemZ::CCMASK_CMP_EQ)
2272 return SystemZ::CCMASK_TM_ALL_0;
2273 if (CCMask == SystemZ::CCMASK_CMP_NE)
2274 return SystemZ::CCMASK_TM_SOME_1;
2275 }
2276 if (EffectivelyUnsigned && CmpVal > 0 && CmpVal <= Low) {
2277 if (CCMask == SystemZ::CCMASK_CMP_LT)
2278 return SystemZ::CCMASK_TM_ALL_0;
2279 if (CCMask == SystemZ::CCMASK_CMP_GE)
2280 return SystemZ::CCMASK_TM_SOME_1;
2281 }
2282 if (EffectivelyUnsigned && CmpVal < Low) {
2283 if (CCMask == SystemZ::CCMASK_CMP_LE)
2284 return SystemZ::CCMASK_TM_ALL_0;
2285 if (CCMask == SystemZ::CCMASK_CMP_GT)
2286 return SystemZ::CCMASK_TM_SOME_1;
2287 }
2288
2289 // Check for equality comparisons with the mask, or the equivalent.
2290 if (CmpVal == Mask) {
2291 if (CCMask == SystemZ::CCMASK_CMP_EQ)
2292 return SystemZ::CCMASK_TM_ALL_1;
2293 if (CCMask == SystemZ::CCMASK_CMP_NE)
2294 return SystemZ::CCMASK_TM_SOME_0;
2295 }
2296 if (EffectivelyUnsigned && CmpVal >= Mask - Low && CmpVal < Mask) {
2297 if (CCMask == SystemZ::CCMASK_CMP_GT)
2298 return SystemZ::CCMASK_TM_ALL_1;
2299 if (CCMask == SystemZ::CCMASK_CMP_LE)
2300 return SystemZ::CCMASK_TM_SOME_0;
2301 }
2302 if (EffectivelyUnsigned && CmpVal > Mask - Low && CmpVal <= Mask) {
2303 if (CCMask == SystemZ::CCMASK_CMP_GE)
2304 return SystemZ::CCMASK_TM_ALL_1;
2305 if (CCMask == SystemZ::CCMASK_CMP_LT)
2306 return SystemZ::CCMASK_TM_SOME_0;
2307 }
2308
2309 // Check for ordered comparisons with the top bit.
2310 if (EffectivelyUnsigned && CmpVal >= Mask - High && CmpVal < High) {
2311 if (CCMask == SystemZ::CCMASK_CMP_LE)
2312 return SystemZ::CCMASK_TM_MSB_0;
2313 if (CCMask == SystemZ::CCMASK_CMP_GT)
2314 return SystemZ::CCMASK_TM_MSB_1;
2315 }
2316 if (EffectivelyUnsigned && CmpVal > Mask - High && CmpVal <= High) {
2317 if (CCMask == SystemZ::CCMASK_CMP_LT)
2318 return SystemZ::CCMASK_TM_MSB_0;
2319 if (CCMask == SystemZ::CCMASK_CMP_GE)
2320 return SystemZ::CCMASK_TM_MSB_1;
2321 }
2322
2323 // If there are just two bits, we can do equality checks for Low and High
2324 // as well.
2325 if (Mask == Low + High) {
2326 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == Low)
2327 return SystemZ::CCMASK_TM_MIXED_MSB_0;
2328 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == Low)
2329 return SystemZ::CCMASK_TM_MIXED_MSB_0 ^ SystemZ::CCMASK_ANY;
2330 if (CCMask == SystemZ::CCMASK_CMP_EQ && CmpVal == High)
2331 return SystemZ::CCMASK_TM_MIXED_MSB_1;
2332 if (CCMask == SystemZ::CCMASK_CMP_NE && CmpVal == High)
2333 return SystemZ::CCMASK_TM_MIXED_MSB_1 ^ SystemZ::CCMASK_ANY;
2334 }
2335
2336 // Looks like we've exhausted our options.
2337 return 0;
2338}
2339
2340// See whether C can be implemented as a TEST UNDER MASK instruction.
2341// Update the arguments with the TM version if so.
2342static void adjustForTestUnderMask(SelectionDAG &DAG, const SDLoc &DL,
2343 Comparison &C) {
2344 // Check that we have a comparison with a constant.
2345 auto *ConstOp1 = dyn_cast<ConstantSDNode>(C.Op1);
2346 if (!ConstOp1)
2347 return;
2348 uint64_t CmpVal = ConstOp1->getZExtValue();
2349
2350 // Check whether the nonconstant input is an AND with a constant mask.
2351 Comparison NewC(C);
2352 uint64_t MaskVal;
2353 ConstantSDNode *Mask = nullptr;
2354 if (C.Op0.getOpcode() == ISD::AND) {
2355 NewC.Op0 = C.Op0.getOperand(0);
2356 NewC.Op1 = C.Op0.getOperand(1);
2357 Mask = dyn_cast<ConstantSDNode>(NewC.Op1);
2358 if (!Mask)
2359 return;
2360 MaskVal = Mask->getZExtValue();
2361 } else {
2362 // There is no instruction to compare with a 64-bit immediate
2363 // so use TMHH instead if possible. We need an unsigned ordered
2364 // comparison with an i64 immediate.
2365 if (NewC.Op0.getValueType() != MVT::i64 ||
2366 NewC.CCMask == SystemZ::CCMASK_CMP_EQ ||
2367 NewC.CCMask == SystemZ::CCMASK_CMP_NE ||
2368 NewC.ICmpType == SystemZICMP::SignedOnly)
2369 return;
2370 // Convert LE and GT comparisons into LT and GE.
2371 if (NewC.CCMask == SystemZ::CCMASK_CMP_LE ||
2372 NewC.CCMask == SystemZ::CCMASK_CMP_GT) {
2373 if (CmpVal == uint64_t(-1))
2374 return;
2375 CmpVal += 1;
2376 NewC.CCMask ^= SystemZ::CCMASK_CMP_EQ;
2377 }
2378 // If the low N bits of Op1 are zero than the low N bits of Op0 can
2379 // be masked off without changing the result.
2380 MaskVal = -(CmpVal & -CmpVal);
2381 NewC.ICmpType = SystemZICMP::UnsignedOnly;
2382 }
2383 if (!MaskVal)
2384 return;
2385
2386 // Check whether the combination of mask, comparison value and comparison
2387 // type are suitable.
2388 unsigned BitSize = NewC.Op0.getValueSizeInBits();
2389 unsigned NewCCMask, ShiftVal;
2390 if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2391 NewC.Op0.getOpcode() == ISD::SHL &&
2392 isSimpleShift(NewC.Op0, ShiftVal) &&
2393 (MaskVal >> ShiftVal != 0) &&
2394 ((CmpVal >> ShiftVal) << ShiftVal) == CmpVal &&
2395 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2396 MaskVal >> ShiftVal,
2397 CmpVal >> ShiftVal,
2398 SystemZICMP::Any))) {
2399 NewC.Op0 = NewC.Op0.getOperand(0);
2400 MaskVal >>= ShiftVal;
2401 } else if (NewC.ICmpType != SystemZICMP::SignedOnly &&
2402 NewC.Op0.getOpcode() == ISD::SRL &&
2403 isSimpleShift(NewC.Op0, ShiftVal) &&
2404 (MaskVal << ShiftVal != 0) &&
2405 ((CmpVal << ShiftVal) >> ShiftVal) == CmpVal &&
2406 (NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask,
2407 MaskVal << ShiftVal,
2408 CmpVal << ShiftVal,
2409 SystemZICMP::UnsignedOnly))) {
2410 NewC.Op0 = NewC.Op0.getOperand(0);
2411 MaskVal <<= ShiftVal;
2412 } else {
2413 NewCCMask = getTestUnderMaskCond(BitSize, NewC.CCMask, MaskVal, CmpVal,
2414 NewC.ICmpType);
2415 if (!NewCCMask)
2416 return;
2417 }
2418
2419 // Go ahead and make the change.
2420 C.Opcode = SystemZISD::TM;
2421 C.Op0 = NewC.Op0;
2422 if (Mask && Mask->getZExtValue() == MaskVal)
2423 C.Op1 = SDValue(Mask, 0);
2424 else
2425 C.Op1 = DAG.getConstant(MaskVal, DL, C.Op0.getValueType());
2426 C.CCValid = SystemZ::CCMASK_TM;
2427 C.CCMask = NewCCMask;
2428}
2429
2430// See whether the comparison argument contains a redundant AND
2431// and remove it if so. This sometimes happens due to the generic
2432// BRCOND expansion.
2433static void adjustForRedundantAnd(SelectionDAG &DAG, const SDLoc &DL,
2434 Comparison &C) {
2435 if (C.Op0.getOpcode() != ISD::AND)
2436 return;
2437 auto *Mask = dyn_cast<ConstantSDNode>(C.Op0.getOperand(1));
2438 if (!Mask)
2439 return;
2440 KnownBits Known = DAG.computeKnownBits(C.Op0.getOperand(0));
2441 if ((~Known.Zero).getZExtValue() & ~Mask->getZExtValue())
2442 return;
2443
2444 C.Op0 = C.Op0.getOperand(0);
2445}
2446
2447// Return a Comparison that tests the condition-code result of intrinsic
2448// node Call against constant integer CC using comparison code Cond.
2449// Opcode is the opcode of the SystemZISD operation for the intrinsic
2450// and CCValid is the set of possible condition-code results.
2451static Comparison getIntrinsicCmp(SelectionDAG &DAG, unsigned Opcode,
2452 SDValue Call, unsigned CCValid, uint64_t CC,
2453 ISD::CondCode Cond) {
2454 Comparison C(Call, SDValue());
2455 C.Opcode = Opcode;
2456 C.CCValid = CCValid;
2457 if (Cond == ISD::SETEQ)
2458 // bit 3 for CC==0, bit 0 for CC==3, always false for CC>3.
2459 C.CCMask = CC < 4 ? 1 << (3 - CC) : 0;
2460 else if (Cond == ISD::SETNE)
2461 // ...and the inverse of that.
2462 C.CCMask = CC < 4 ? ~(1 << (3 - CC)) : -1;
2463 else if (Cond == ISD::SETLT || Cond == ISD::SETULT)
2464 // bits above bit 3 for CC==0 (always false), bits above bit 0 for CC==3,
2465 // always true for CC>3.
2466 C.CCMask = CC < 4 ? ~0U << (4 - CC) : -1;
2467 else if (Cond == ISD::SETGE || Cond == ISD::SETUGE)
2468 // ...and the inverse of that.
2469 C.CCMask = CC < 4 ? ~(~0U << (4 - CC)) : 0;
2470 else if (Cond == ISD::SETLE || Cond == ISD::SETULE)
2471 // bit 3 and above for CC==0, bit 0 and above for CC==3 (always true),
2472 // always true for CC>3.
2473 C.CCMask = CC < 4 ? ~0U << (3 - CC) : -1;
2474 else if (Cond == ISD::SETGT || Cond == ISD::SETUGT)
2475 // ...and the inverse of that.
2476 C.CCMask = CC < 4 ? ~(~0U << (3 - CC)) : 0;
2477 else
2478 llvm_unreachable("Unexpected integer comparison type")::llvm::llvm_unreachable_internal("Unexpected integer comparison type"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2478)
;
2479 C.CCMask &= CCValid;
2480 return C;
2481}
2482
2483// Decide how to implement a comparison of type Cond between CmpOp0 with CmpOp1.
2484static Comparison getCmp(SelectionDAG &DAG, SDValue CmpOp0, SDValue CmpOp1,
2485 ISD::CondCode Cond, const SDLoc &DL) {
2486 if (CmpOp1.getOpcode() == ISD::Constant) {
2487 uint64_t Constant = cast<ConstantSDNode>(CmpOp1)->getZExtValue();
2488 unsigned Opcode, CCValid;
2489 if (CmpOp0.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
2490 CmpOp0.getResNo() == 0 && CmpOp0->hasNUsesOfValue(1, 0) &&
2491 isIntrinsicWithCCAndChain(CmpOp0, Opcode, CCValid))
2492 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2493 if (CmpOp0.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
2494 CmpOp0.getResNo() == CmpOp0->getNumValues() - 1 &&
2495 isIntrinsicWithCC(CmpOp0, Opcode, CCValid))
2496 return getIntrinsicCmp(DAG, Opcode, CmpOp0, CCValid, Constant, Cond);
2497 }
2498 Comparison C(CmpOp0, CmpOp1);
2499 C.CCMask = CCMaskForCondCode(Cond);
2500 if (C.Op0.getValueType().isFloatingPoint()) {
2501 C.CCValid = SystemZ::CCMASK_FCMP;
2502 C.Opcode = SystemZISD::FCMP;
2503 adjustForFNeg(C);
2504 } else {
2505 C.CCValid = SystemZ::CCMASK_ICMP;
2506 C.Opcode = SystemZISD::ICMP;
2507 // Choose the type of comparison. Equality and inequality tests can
2508 // use either signed or unsigned comparisons. The choice also doesn't
2509 // matter if both sign bits are known to be clear. In those cases we
2510 // want to give the main isel code the freedom to choose whichever
2511 // form fits best.
2512 if (C.CCMask == SystemZ::CCMASK_CMP_EQ ||
2513 C.CCMask == SystemZ::CCMASK_CMP_NE ||
2514 (DAG.SignBitIsZero(C.Op0) && DAG.SignBitIsZero(C.Op1)))
2515 C.ICmpType = SystemZICMP::Any;
2516 else if (C.CCMask & SystemZ::CCMASK_CMP_UO)
2517 C.ICmpType = SystemZICMP::UnsignedOnly;
2518 else
2519 C.ICmpType = SystemZICMP::SignedOnly;
2520 C.CCMask &= ~SystemZ::CCMASK_CMP_UO;
2521 adjustForRedundantAnd(DAG, DL, C);
2522 adjustZeroCmp(DAG, DL, C);
2523 adjustSubwordCmp(DAG, DL, C);
2524 adjustForSubtraction(DAG, DL, C);
2525 adjustForLTGFR(C);
2526 adjustICmpTruncate(DAG, DL, C);
2527 }
2528
2529 if (shouldSwapCmpOperands(C)) {
2530 std::swap(C.Op0, C.Op1);
2531 C.CCMask = reverseCCMask(C.CCMask);
2532 }
2533
2534 adjustForTestUnderMask(DAG, DL, C);
2535 return C;
2536}
2537
2538// Emit the comparison instruction described by C.
2539static SDValue emitCmp(SelectionDAG &DAG, const SDLoc &DL, Comparison &C) {
2540 if (!C.Op1.getNode()) {
2541 SDNode *Node;
2542 switch (C.Op0.getOpcode()) {
2543 case ISD::INTRINSIC_W_CHAIN:
2544 Node = emitIntrinsicWithCCAndChain(DAG, C.Op0, C.Opcode);
2545 return SDValue(Node, 0);
2546 case ISD::INTRINSIC_WO_CHAIN:
2547 Node = emitIntrinsicWithCC(DAG, C.Op0, C.Opcode);
2548 return SDValue(Node, Node->getNumValues() - 1);
2549 default:
2550 llvm_unreachable("Invalid comparison operands")::llvm::llvm_unreachable_internal("Invalid comparison operands"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2550)
;
2551 }
2552 }
2553 if (C.Opcode == SystemZISD::ICMP)
2554 return DAG.getNode(SystemZISD::ICMP, DL, MVT::i32, C.Op0, C.Op1,
2555 DAG.getTargetConstant(C.ICmpType, DL, MVT::i32));
2556 if (C.Opcode == SystemZISD::TM) {
2557 bool RegisterOnly = (bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_0) !=
2558 bool(C.CCMask & SystemZ::CCMASK_TM_MIXED_MSB_1));
2559 return DAG.getNode(SystemZISD::TM, DL, MVT::i32, C.Op0, C.Op1,
2560 DAG.getTargetConstant(RegisterOnly, DL, MVT::i32));
2561 }
2562 return DAG.getNode(C.Opcode, DL, MVT::i32, C.Op0, C.Op1);
2563}
2564
2565// Implement a 32-bit *MUL_LOHI operation by extending both operands to
2566// 64 bits. Extend is the extension type to use. Store the high part
2567// in Hi and the low part in Lo.
2568static void lowerMUL_LOHI32(SelectionDAG &DAG, const SDLoc &DL, unsigned Extend,
2569 SDValue Op0, SDValue Op1, SDValue &Hi,
2570 SDValue &Lo) {
2571 Op0 = DAG.getNode(Extend, DL, MVT::i64, Op0);
2572 Op1 = DAG.getNode(Extend, DL, MVT::i64, Op1);
2573 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, Op0, Op1);
2574 Hi = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul,
2575 DAG.getConstant(32, DL, MVT::i64));
2576 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Hi);
2577 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mul);
2578}
2579
2580// Lower a binary operation that produces two VT results, one in each
2581// half of a GR128 pair. Op0 and Op1 are the VT operands to the operation,
2582// and Opcode performs the GR128 operation. Store the even register result
2583// in Even and the odd register result in Odd.
2584static void lowerGR128Binary(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
2585 unsigned Opcode, SDValue Op0, SDValue Op1,
2586 SDValue &Even, SDValue &Odd) {
2587 SDValue Result = DAG.getNode(Opcode, DL, MVT::Untyped, Op0, Op1);
2588 bool Is32Bit = is32Bit(VT);
2589 Even = DAG.getTargetExtractSubreg(SystemZ::even128(Is32Bit), DL, VT, Result);
2590 Odd = DAG.getTargetExtractSubreg(SystemZ::odd128(Is32Bit), DL, VT, Result);
2591}
2592
2593// Return an i32 value that is 1 if the CC value produced by CCReg is
2594// in the mask CCMask and 0 otherwise. CC is known to have a value
2595// in CCValid, so other values can be ignored.
2596static SDValue emitSETCC(SelectionDAG &DAG, const SDLoc &DL, SDValue CCReg,
2597 unsigned CCValid, unsigned CCMask) {
2598 SDValue Ops[] = {DAG.getConstant(1, DL, MVT::i32),
2599 DAG.getConstant(0, DL, MVT::i32),
2600 DAG.getTargetConstant(CCValid, DL, MVT::i32),
2601 DAG.getTargetConstant(CCMask, DL, MVT::i32), CCReg};
2602 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, MVT::i32, Ops);
2603}
2604
2605// Return the SystemISD vector comparison operation for CC, or 0 if it cannot
2606// be done directly. IsFP is true if CC is for a floating-point rather than
2607// integer comparison.
2608static unsigned getVectorComparison(ISD::CondCode CC, bool IsFP) {
2609 switch (CC) {
2610 case ISD::SETOEQ:
2611 case ISD::SETEQ:
2612 return IsFP ? SystemZISD::VFCMPE : SystemZISD::VICMPE;
2613
2614 case ISD::SETOGE:
2615 case ISD::SETGE:
2616 return IsFP ? SystemZISD::VFCMPHE : static_cast<SystemZISD::NodeType>(0);
2617
2618 case ISD::SETOGT:
2619 case ISD::SETGT:
2620 return IsFP ? SystemZISD::VFCMPH : SystemZISD::VICMPH;
2621
2622 case ISD::SETUGT:
2623 return IsFP ? static_cast<SystemZISD::NodeType>(0) : SystemZISD::VICMPHL;
2624
2625 default:
2626 return 0;
2627 }
2628}
2629
2630// Return the SystemZISD vector comparison operation for CC or its inverse,
2631// or 0 if neither can be done directly. Indicate in Invert whether the
2632// result is for the inverse of CC. IsFP is true if CC is for a
2633// floating-point rather than integer comparison.
2634static unsigned getVectorComparisonOrInvert(ISD::CondCode CC, bool IsFP,
2635 bool &Invert) {
2636 if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2637 Invert = false;
2638 return Opcode;
2639 }
2640
2641 CC = ISD::getSetCCInverse(CC, !IsFP);
2642 if (unsigned Opcode = getVectorComparison(CC, IsFP)) {
2643 Invert = true;
2644 return Opcode;
2645 }
2646
2647 return 0;
2648}
2649
2650// Return a v2f64 that contains the extended form of elements Start and Start+1
2651// of v4f32 value Op.
2652static SDValue expandV4F32ToV2F64(SelectionDAG &DAG, int Start, const SDLoc &DL,
2653 SDValue Op) {
2654 int Mask[] = { Start, -1, Start + 1, -1 };
2655 Op = DAG.getVectorShuffle(MVT::v4f32, DL, Op, DAG.getUNDEF(MVT::v4f32), Mask);
2656 return DAG.getNode(SystemZISD::VEXTEND, DL, MVT::v2f64, Op);
2657}
2658
2659// Build a comparison of vectors CmpOp0 and CmpOp1 using opcode Opcode,
2660// producing a result of type VT.
2661SDValue SystemZTargetLowering::getVectorCmp(SelectionDAG &DAG, unsigned Opcode,
2662 const SDLoc &DL, EVT VT,
2663 SDValue CmpOp0,
2664 SDValue CmpOp1) const {
2665 // There is no hardware support for v4f32 (unless we have the vector
2666 // enhancements facility 1), so extend the vector into two v2f64s
2667 // and compare those.
2668 if (CmpOp0.getValueType() == MVT::v4f32 &&
2669 !Subtarget.hasVectorEnhancements1()) {
2670 SDValue H0 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp0);
2671 SDValue L0 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp0);
2672 SDValue H1 = expandV4F32ToV2F64(DAG, 0, DL, CmpOp1);
2673 SDValue L1 = expandV4F32ToV2F64(DAG, 2, DL, CmpOp1);
2674 SDValue HRes = DAG.getNode(Opcode, DL, MVT::v2i64, H0, H1);
2675 SDValue LRes = DAG.getNode(Opcode, DL, MVT::v2i64, L0, L1);
2676 return DAG.getNode(SystemZISD::PACK, DL, VT, HRes, LRes);
2677 }
2678 return DAG.getNode(Opcode, DL, VT, CmpOp0, CmpOp1);
2679}
2680
2681// Lower a vector comparison of type CC between CmpOp0 and CmpOp1, producing
2682// an integer mask of type VT.
2683SDValue SystemZTargetLowering::lowerVectorSETCC(SelectionDAG &DAG,
2684 const SDLoc &DL, EVT VT,
2685 ISD::CondCode CC,
2686 SDValue CmpOp0,
2687 SDValue CmpOp1) const {
2688 bool IsFP = CmpOp0.getValueType().isFloatingPoint();
2689 bool Invert = false;
2690 SDValue Cmp;
2691 switch (CC) {
2692 // Handle tests for order using (or (ogt y x) (oge x y)).
2693 case ISD::SETUO:
2694 Invert = true;
2695 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2696 case ISD::SETO: {
2697 assert(IsFP && "Unexpected integer comparison")((IsFP && "Unexpected integer comparison") ? static_cast
<void> (0) : __assert_fail ("IsFP && \"Unexpected integer comparison\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2697, __PRETTY_FUNCTION__))
;
2698 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2699 SDValue GE = getVectorCmp(DAG, SystemZISD::VFCMPHE, DL, VT, CmpOp0, CmpOp1);
2700 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GE);
2701 break;
2702 }
2703
2704 // Handle <> tests using (or (ogt y x) (ogt x y)).
2705 case ISD::SETUEQ:
2706 Invert = true;
2707 LLVM_FALLTHROUGH[[gnu::fallthrough]];
2708 case ISD::SETONE: {
2709 assert(IsFP && "Unexpected integer comparison")((IsFP && "Unexpected integer comparison") ? static_cast
<void> (0) : __assert_fail ("IsFP && \"Unexpected integer comparison\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2709, __PRETTY_FUNCTION__))
;
2710 SDValue LT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp1, CmpOp0);
2711 SDValue GT = getVectorCmp(DAG, SystemZISD::VFCMPH, DL, VT, CmpOp0, CmpOp1);
2712 Cmp = DAG.getNode(ISD::OR, DL, VT, LT, GT);
2713 break;
2714 }
2715
2716 // Otherwise a single comparison is enough. It doesn't really
2717 // matter whether we try the inversion or the swap first, since
2718 // there are no cases where both work.
2719 default:
2720 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2721 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp0, CmpOp1);
2722 else {
2723 CC = ISD::getSetCCSwappedOperands(CC);
2724 if (unsigned Opcode = getVectorComparisonOrInvert(CC, IsFP, Invert))
2725 Cmp = getVectorCmp(DAG, Opcode, DL, VT, CmpOp1, CmpOp0);
2726 else
2727 llvm_unreachable("Unhandled comparison")::llvm::llvm_unreachable_internal("Unhandled comparison", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2727)
;
2728 }
2729 break;
2730 }
2731 if (Invert) {
2732 SDValue Mask =
2733 DAG.getSplatBuildVector(VT, DL, DAG.getConstant(-1, DL, MVT::i64));
2734 Cmp = DAG.getNode(ISD::XOR, DL, VT, Cmp, Mask);
2735 }
2736 return Cmp;
2737}
2738
2739SDValue SystemZTargetLowering::lowerSETCC(SDValue Op,
2740 SelectionDAG &DAG) const {
2741 SDValue CmpOp0 = Op.getOperand(0);
2742 SDValue CmpOp1 = Op.getOperand(1);
2743 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2744 SDLoc DL(Op);
2745 EVT VT = Op.getValueType();
2746 if (VT.isVector())
2747 return lowerVectorSETCC(DAG, DL, VT, CC, CmpOp0, CmpOp1);
2748
2749 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2750 SDValue CCReg = emitCmp(DAG, DL, C);
2751 return emitSETCC(DAG, DL, CCReg, C.CCValid, C.CCMask);
2752}
2753
2754SDValue SystemZTargetLowering::lowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
2755 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
2756 SDValue CmpOp0 = Op.getOperand(2);
2757 SDValue CmpOp1 = Op.getOperand(3);
2758 SDValue Dest = Op.getOperand(4);
2759 SDLoc DL(Op);
2760
2761 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2762 SDValue CCReg = emitCmp(DAG, DL, C);
2763 return DAG.getNode(
2764 SystemZISD::BR_CCMASK, DL, Op.getValueType(), Op.getOperand(0),
2765 DAG.getTargetConstant(C.CCValid, DL, MVT::i32),
2766 DAG.getTargetConstant(C.CCMask, DL, MVT::i32), Dest, CCReg);
2767}
2768
2769// Return true if Pos is CmpOp and Neg is the negative of CmpOp,
2770// allowing Pos and Neg to be wider than CmpOp.
2771static bool isAbsolute(SDValue CmpOp, SDValue Pos, SDValue Neg) {
2772 return (Neg.getOpcode() == ISD::SUB &&
2773 Neg.getOperand(0).getOpcode() == ISD::Constant &&
2774 cast<ConstantSDNode>(Neg.getOperand(0))->getZExtValue() == 0 &&
2775 Neg.getOperand(1) == Pos &&
2776 (Pos == CmpOp ||
2777 (Pos.getOpcode() == ISD::SIGN_EXTEND &&
2778 Pos.getOperand(0) == CmpOp)));
2779}
2780
2781// Return the absolute or negative absolute of Op; IsNegative decides which.
2782static SDValue getAbsolute(SelectionDAG &DAG, const SDLoc &DL, SDValue Op,
2783 bool IsNegative) {
2784 Op = DAG.getNode(SystemZISD::IABS, DL, Op.getValueType(), Op);
2785 if (IsNegative)
2786 Op = DAG.getNode(ISD::SUB, DL, Op.getValueType(),
2787 DAG.getConstant(0, DL, Op.getValueType()), Op);
2788 return Op;
2789}
2790
2791SDValue SystemZTargetLowering::lowerSELECT_CC(SDValue Op,
2792 SelectionDAG &DAG) const {
2793 SDValue CmpOp0 = Op.getOperand(0);
2794 SDValue CmpOp1 = Op.getOperand(1);
2795 SDValue TrueOp = Op.getOperand(2);
2796 SDValue FalseOp = Op.getOperand(3);
2797 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2798 SDLoc DL(Op);
2799
2800 Comparison C(getCmp(DAG, CmpOp0, CmpOp1, CC, DL));
2801
2802 // Check for absolute and negative-absolute selections, including those
2803 // where the comparison value is sign-extended (for LPGFR and LNGFR).
2804 // This check supplements the one in DAGCombiner.
2805 if (C.Opcode == SystemZISD::ICMP &&
2806 C.CCMask != SystemZ::CCMASK_CMP_EQ &&
2807 C.CCMask != SystemZ::CCMASK_CMP_NE &&
2808 C.Op1.getOpcode() == ISD::Constant &&
2809 cast<ConstantSDNode>(C.Op1)->getZExtValue() == 0) {
2810 if (isAbsolute(C.Op0, TrueOp, FalseOp))
2811 return getAbsolute(DAG, DL, TrueOp, C.CCMask & SystemZ::CCMASK_CMP_LT);
2812 if (isAbsolute(C.Op0, FalseOp, TrueOp))
2813 return getAbsolute(DAG, DL, FalseOp, C.CCMask & SystemZ::CCMASK_CMP_GT);
2814 }
2815
2816 SDValue CCReg = emitCmp(DAG, DL, C);
2817 SDValue Ops[] = {TrueOp, FalseOp,
2818 DAG.getTargetConstant(C.CCValid, DL, MVT::i32),
2819 DAG.getTargetConstant(C.CCMask, DL, MVT::i32), CCReg};
2820
2821 return DAG.getNode(SystemZISD::SELECT_CCMASK, DL, Op.getValueType(), Ops);
2822}
2823
2824SDValue SystemZTargetLowering::lowerGlobalAddress(GlobalAddressSDNode *Node,
2825 SelectionDAG &DAG) const {
2826 SDLoc DL(Node);
2827 const GlobalValue *GV = Node->getGlobal();
2828 int64_t Offset = Node->getOffset();
2829 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2830 CodeModel::Model CM = DAG.getTarget().getCodeModel();
2831
2832 SDValue Result;
2833 if (Subtarget.isPC32DBLSymbol(GV, CM)) {
2834 if (isInt<32>(Offset)) {
2835 // Assign anchors at 1<<12 byte boundaries.
2836 uint64_t Anchor = Offset & ~uint64_t(0xfff);
2837 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor);
2838 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2839
2840 // The offset can be folded into the address if it is aligned to a
2841 // halfword.
2842 Offset -= Anchor;
2843 if (Offset != 0 && (Offset & 1) == 0) {
2844 SDValue Full =
2845 DAG.getTargetGlobalAddress(GV, DL, PtrVT, Anchor + Offset);
2846 Result = DAG.getNode(SystemZISD::PCREL_OFFSET, DL, PtrVT, Full, Result);
2847 Offset = 0;
2848 }
2849 } else {
2850 // Conservatively load a constant offset greater than 32 bits into a
2851 // register below.
2852 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT);
2853 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2854 }
2855 } else {
2856 Result = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, SystemZII::MO_GOT);
2857 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
2858 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
2859 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2860 }
2861
2862 // If there was a non-zero offset that we didn't fold, create an explicit
2863 // addition for it.
2864 if (Offset != 0)
2865 Result = DAG.getNode(ISD::ADD, DL, PtrVT, Result,
2866 DAG.getConstant(Offset, DL, PtrVT));
2867
2868 return Result;
2869}
2870
2871SDValue SystemZTargetLowering::lowerTLSGetOffset(GlobalAddressSDNode *Node,
2872 SelectionDAG &DAG,
2873 unsigned Opcode,
2874 SDValue GOTOffset) const {
2875 SDLoc DL(Node);
2876 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2877 SDValue Chain = DAG.getEntryNode();
2878 SDValue Glue;
2879
2880 if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2881 CallingConv::GHC)
2882 report_fatal_error("In GHC calling convention TLS is not supported");
2883
2884 // __tls_get_offset takes the GOT offset in %r2 and the GOT in %r12.
2885 SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(PtrVT);
2886 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R12D, GOT, Glue);
2887 Glue = Chain.getValue(1);
2888 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R2D, GOTOffset, Glue);
2889 Glue = Chain.getValue(1);
2890
2891 // The first call operand is the chain and the second is the TLS symbol.
2892 SmallVector<SDValue, 8> Ops;
2893 Ops.push_back(Chain);
2894 Ops.push_back(DAG.getTargetGlobalAddress(Node->getGlobal(), DL,
2895 Node->getValueType(0),
2896 0, 0));
2897
2898 // Add argument registers to the end of the list so that they are
2899 // known live into the call.
2900 Ops.push_back(DAG.getRegister(SystemZ::R2D, PtrVT));
2901 Ops.push_back(DAG.getRegister(SystemZ::R12D, PtrVT));
2902
2903 // Add a register mask operand representing the call-preserved registers.
2904 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2905 const uint32_t *Mask =
2906 TRI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
2907 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 2907, __PRETTY_FUNCTION__))
;
2908 Ops.push_back(DAG.getRegisterMask(Mask));
2909
2910 // Glue the call to the argument copies.
2911 Ops.push_back(Glue);
2912
2913 // Emit the call.
2914 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2915 Chain = DAG.getNode(Opcode, DL, NodeTys, Ops);
2916 Glue = Chain.getValue(1);
2917
2918 // Copy the return value from %r2.
2919 return DAG.getCopyFromReg(Chain, DL, SystemZ::R2D, PtrVT, Glue);
2920}
2921
2922SDValue SystemZTargetLowering::lowerThreadPointer(const SDLoc &DL,
2923 SelectionDAG &DAG) const {
2924 SDValue Chain = DAG.getEntryNode();
2925 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2926
2927 // The high part of the thread pointer is in access register 0.
2928 SDValue TPHi = DAG.getCopyFromReg(Chain, DL, SystemZ::A0, MVT::i32);
2929 TPHi = DAG.getNode(ISD::ANY_EXTEND, DL, PtrVT, TPHi);
2930
2931 // The low part of the thread pointer is in access register 1.
2932 SDValue TPLo = DAG.getCopyFromReg(Chain, DL, SystemZ::A1, MVT::i32);
2933 TPLo = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TPLo);
2934
2935 // Merge them into a single 64-bit address.
2936 SDValue TPHiShifted = DAG.getNode(ISD::SHL, DL, PtrVT, TPHi,
2937 DAG.getConstant(32, DL, PtrVT));
2938 return DAG.getNode(ISD::OR, DL, PtrVT, TPHiShifted, TPLo);
2939}
2940
2941SDValue SystemZTargetLowering::lowerGlobalTLSAddress(GlobalAddressSDNode *Node,
2942 SelectionDAG &DAG) const {
2943 if (DAG.getTarget().useEmulatedTLS())
2944 return LowerToTLSEmulatedModel(Node, DAG);
2945 SDLoc DL(Node);
2946 const GlobalValue *GV = Node->getGlobal();
2947 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2948 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
2949
2950 if (DAG.getMachineFunction().getFunction().getCallingConv() ==
2951 CallingConv::GHC)
2952 report_fatal_error("In GHC calling convention TLS is not supported");
2953
2954 SDValue TP = lowerThreadPointer(DL, DAG);
2955
2956 // Get the offset of GA from the thread pointer, based on the TLS model.
2957 SDValue Offset;
2958 switch (model) {
2959 case TLSModel::GeneralDynamic: {
2960 // Load the GOT offset of the tls_index (module ID / per-symbol offset).
2961 SystemZConstantPoolValue *CPV =
2962 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSGD);
2963
2964 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2965 Offset = DAG.getLoad(
2966 PtrVT, DL, DAG.getEntryNode(), Offset,
2967 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2968
2969 // Call __tls_get_offset to retrieve the offset.
2970 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_GDCALL, Offset);
2971 break;
2972 }
2973
2974 case TLSModel::LocalDynamic: {
2975 // Load the GOT offset of the module ID.
2976 SystemZConstantPoolValue *CPV =
2977 SystemZConstantPoolValue::Create(GV, SystemZCP::TLSLDM);
2978
2979 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
2980 Offset = DAG.getLoad(
2981 PtrVT, DL, DAG.getEntryNode(), Offset,
2982 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2983
2984 // Call __tls_get_offset to retrieve the module base offset.
2985 Offset = lowerTLSGetOffset(Node, DAG, SystemZISD::TLS_LDCALL, Offset);
2986
2987 // Note: The SystemZLDCleanupPass will remove redundant computations
2988 // of the module base offset. Count total number of local-dynamic
2989 // accesses to trigger execution of that pass.
2990 SystemZMachineFunctionInfo* MFI =
2991 DAG.getMachineFunction().getInfo<SystemZMachineFunctionInfo>();
2992 MFI->incNumLocalDynamicTLSAccesses();
2993
2994 // Add the per-symbol offset.
2995 CPV = SystemZConstantPoolValue::Create(GV, SystemZCP::DTPOFF);
2996
2997 SDValue DTPOffset = DAG.getConstantPool(CPV, PtrVT, 8);
2998 DTPOffset = DAG.getLoad(
2999 PtrVT, DL, DAG.getEntryNode(), DTPOffset,
3000 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3001
3002 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Offset, DTPOffset);
3003 break;
3004 }
3005
3006 case TLSModel::InitialExec: {
3007 // Load the offset from the GOT.
3008 Offset = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
3009 SystemZII::MO_INDNTPOFF);
3010 Offset = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Offset);
3011 Offset =
3012 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Offset,
3013 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3014 break;
3015 }
3016
3017 case TLSModel::LocalExec: {
3018 // Force the offset into the constant pool and load it from there.
3019 SystemZConstantPoolValue *CPV =
3020 SystemZConstantPoolValue::Create(GV, SystemZCP::NTPOFF);
3021
3022 Offset = DAG.getConstantPool(CPV, PtrVT, 8);
3023 Offset = DAG.getLoad(
3024 PtrVT, DL, DAG.getEntryNode(), Offset,
3025 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3026 break;
3027 }
3028 }
3029
3030 // Add the base and offset together.
3031 return DAG.getNode(ISD::ADD, DL, PtrVT, TP, Offset);
3032}
3033
3034SDValue SystemZTargetLowering::lowerBlockAddress(BlockAddressSDNode *Node,
3035 SelectionDAG &DAG) const {
3036 SDLoc DL(Node);
3037 const BlockAddress *BA = Node->getBlockAddress();
3038 int64_t Offset = Node->getOffset();
3039 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3040
3041 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset);
3042 Result = DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3043 return Result;
3044}
3045
3046SDValue SystemZTargetLowering::lowerJumpTable(JumpTableSDNode *JT,
3047 SelectionDAG &DAG) const {
3048 SDLoc DL(JT);
3049 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3050 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3051
3052 // Use LARL to load the address of the table.
3053 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3054}
3055
3056SDValue SystemZTargetLowering::lowerConstantPool(ConstantPoolSDNode *CP,
3057 SelectionDAG &DAG) const {
3058 SDLoc DL(CP);
3059 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3060
3061 SDValue Result;
3062 if (CP->isMachineConstantPoolEntry())
3063 Result = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
3064 CP->getAlignment());
3065 else
3066 Result = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
3067 CP->getAlignment(), CP->getOffset());
3068
3069 // Use LARL to load the address of the constant pool entry.
3070 return DAG.getNode(SystemZISD::PCREL_WRAPPER, DL, PtrVT, Result);
3071}
3072
3073SDValue SystemZTargetLowering::lowerFRAMEADDR(SDValue Op,
3074 SelectionDAG &DAG) const {
3075 MachineFunction &MF = DAG.getMachineFunction();
3076 MachineFrameInfo &MFI = MF.getFrameInfo();
3077 MFI.setFrameAddressIsTaken(true);
3078
3079 SDLoc DL(Op);
3080 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3081 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3082
3083 // If the back chain frame index has not been allocated yet, do so.
3084 SystemZMachineFunctionInfo *FI = MF.getInfo<SystemZMachineFunctionInfo>();
3085 int BackChainIdx = FI->getFramePointerSaveIndex();
3086 if (!BackChainIdx) {
3087 // By definition, the frame address is the address of the back chain.
3088 BackChainIdx = MFI.CreateFixedObject(8, -SystemZMC::CallFrameSize, false);
3089 FI->setFramePointerSaveIndex(BackChainIdx);
3090 }
3091 SDValue BackChain = DAG.getFrameIndex(BackChainIdx, PtrVT);
3092
3093 // FIXME The frontend should detect this case.
3094 if (Depth > 0) {
3095 report_fatal_error("Unsupported stack frame traversal count");
3096 }
3097
3098 return BackChain;
3099}
3100
3101SDValue SystemZTargetLowering::lowerRETURNADDR(SDValue Op,
3102 SelectionDAG &DAG) const {
3103 MachineFunction &MF = DAG.getMachineFunction();
3104 MachineFrameInfo &MFI = MF.getFrameInfo();
3105 MFI.setReturnAddressIsTaken(true);
3106
3107 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3108 return SDValue();
3109
3110 SDLoc DL(Op);
3111 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3112 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3113
3114 // FIXME The frontend should detect this case.
3115 if (Depth > 0) {
3116 report_fatal_error("Unsupported stack frame traversal count");
3117 }
3118
3119 // Return R14D, which has the return address. Mark it an implicit live-in.
3120 unsigned LinkReg = MF.addLiveIn(SystemZ::R14D, &SystemZ::GR64BitRegClass);
3121 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, LinkReg, PtrVT);
3122}
3123
3124SDValue SystemZTargetLowering::lowerBITCAST(SDValue Op,
3125 SelectionDAG &DAG) const {
3126 SDLoc DL(Op);
3127 SDValue In = Op.getOperand(0);
3128 EVT InVT = In.getValueType();
3129 EVT ResVT = Op.getValueType();
3130
3131 // Convert loads directly. This is normally done by DAGCombiner,
3132 // but we need this case for bitcasts that are created during lowering
3133 // and which are then lowered themselves.
3134 if (auto *LoadN = dyn_cast<LoadSDNode>(In))
3135 if (ISD::isNormalLoad(LoadN)) {
3136 SDValue NewLoad = DAG.getLoad(ResVT, DL, LoadN->getChain(),
3137 LoadN->getBasePtr(), LoadN->getMemOperand());
3138 // Update the chain uses.
3139 DAG.ReplaceAllUsesOfValueWith(SDValue(LoadN, 1), NewLoad.getValue(1));
3140 return NewLoad;
3141 }
3142
3143 if (InVT == MVT::i32 && ResVT == MVT::f32) {
3144 SDValue In64;
3145 if (Subtarget.hasHighWord()) {
3146 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL,
3147 MVT::i64);
3148 In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
3149 MVT::i64, SDValue(U64, 0), In);
3150 } else {
3151 In64 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, In);
3152 In64 = DAG.getNode(ISD::SHL, DL, MVT::i64, In64,
3153 DAG.getConstant(32, DL, MVT::i64));
3154 }
3155 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::f64, In64);
3156 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32,
3157 DL, MVT::f32, Out64);
3158 }
3159 if (InVT == MVT::f32 && ResVT == MVT::i32) {
3160 SDNode *U64 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::f64);
3161 SDValue In64 = DAG.getTargetInsertSubreg(SystemZ::subreg_h32, DL,
3162 MVT::f64, SDValue(U64, 0), In);
3163 SDValue Out64 = DAG.getNode(ISD::BITCAST, DL, MVT::i64, In64);
3164 if (Subtarget.hasHighWord())
3165 return DAG.getTargetExtractSubreg(SystemZ::subreg_h32, DL,
3166 MVT::i32, Out64);
3167 SDValue Shift = DAG.getNode(ISD::SRL, DL, MVT::i64, Out64,
3168 DAG.getConstant(32, DL, MVT::i64));
3169 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Shift);
3170 }
3171 llvm_unreachable("Unexpected bitcast combination")::llvm::llvm_unreachable_internal("Unexpected bitcast combination"
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3171)
;
3172}
3173
3174SDValue SystemZTargetLowering::lowerVASTART(SDValue Op,
3175 SelectionDAG &DAG) const {
3176 MachineFunction &MF = DAG.getMachineFunction();
3177 SystemZMachineFunctionInfo *FuncInfo =
3178 MF.getInfo<SystemZMachineFunctionInfo>();
3179 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3180
3181 SDValue Chain = Op.getOperand(0);
3182 SDValue Addr = Op.getOperand(1);
3183 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3184 SDLoc DL(Op);
3185
3186 // The initial values of each field.
3187 const unsigned NumFields = 4;
3188 SDValue Fields[NumFields] = {
3189 DAG.getConstant(FuncInfo->getVarArgsFirstGPR(), DL, PtrVT),
3190 DAG.getConstant(FuncInfo->getVarArgsFirstFPR(), DL, PtrVT),
3191 DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT),
3192 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT)
3193 };
3194
3195 // Store each field into its respective slot.
3196 SDValue MemOps[NumFields];
3197 unsigned Offset = 0;
3198 for (unsigned I = 0; I < NumFields; ++I) {
3199 SDValue FieldAddr = Addr;
3200 if (Offset != 0)
3201 FieldAddr = DAG.getNode(ISD::ADD, DL, PtrVT, FieldAddr,
3202 DAG.getIntPtrConstant(Offset, DL));
3203 MemOps[I] = DAG.getStore(Chain, DL, Fields[I], FieldAddr,
3204 MachinePointerInfo(SV, Offset));
3205 Offset += 8;
3206 }
3207 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
3208}
3209
3210SDValue SystemZTargetLowering::lowerVACOPY(SDValue Op,
3211 SelectionDAG &DAG) const {
3212 SDValue Chain = Op.getOperand(0);
3213 SDValue DstPtr = Op.getOperand(1);
3214 SDValue SrcPtr = Op.getOperand(2);
3215 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
3216 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
3217 SDLoc DL(Op);
3218
3219 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(32, DL),
3220 /*Align*/8, /*isVolatile*/false, /*AlwaysInline*/false,
3221 /*isTailCall*/false,
3222 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
3223}
3224
3225SDValue SystemZTargetLowering::
3226lowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
3227 const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
3228 MachineFunction &MF = DAG.getMachineFunction();
3229 bool RealignOpt = !MF.getFunction().hasFnAttribute("no-realign-stack");
3230 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
3231
3232 SDValue Chain = Op.getOperand(0);
3233 SDValue Size = Op.getOperand(1);
3234 SDValue Align = Op.getOperand(2);
3235 SDLoc DL(Op);
3236
3237 // If user has set the no alignment function attribute, ignore
3238 // alloca alignments.
3239 uint64_t AlignVal = (RealignOpt ?
3240 dyn_cast<ConstantSDNode>(Align)->getZExtValue() : 0);
3241
3242 uint64_t StackAlign = TFI->getStackAlignment();
3243 uint64_t RequiredAlign = std::max(AlignVal, StackAlign);
3244 uint64_t ExtraAlignSpace = RequiredAlign - StackAlign;
3245
3246 unsigned SPReg = getStackPointerRegisterToSaveRestore();
3247 SDValue NeededSpace = Size;
3248
3249 // Get a reference to the stack pointer.
3250 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SPReg, MVT::i64);
3251
3252 // If we need a backchain, save it now.
3253 SDValue Backchain;
3254 if (StoreBackchain)
3255 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
3256
3257 // Add extra space for alignment if needed.
3258 if (ExtraAlignSpace)
3259 NeededSpace = DAG.getNode(ISD::ADD, DL, MVT::i64, NeededSpace,
3260 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
3261
3262 // Get the new stack pointer value.
3263 SDValue NewSP = DAG.getNode(ISD::SUB, DL, MVT::i64, OldSP, NeededSpace);
3264
3265 // Copy the new stack pointer back.
3266 Chain = DAG.getCopyToReg(Chain, DL, SPReg, NewSP);
3267
3268 // The allocated data lives above the 160 bytes allocated for the standard
3269 // frame, plus any outgoing stack arguments. We don't know how much that
3270 // amounts to yet, so emit a special ADJDYNALLOC placeholder.
3271 SDValue ArgAdjust = DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
3272 SDValue Result = DAG.getNode(ISD::ADD, DL, MVT::i64, NewSP, ArgAdjust);
3273
3274 // Dynamically realign if needed.
3275 if (RequiredAlign > StackAlign) {
3276 Result =
3277 DAG.getNode(ISD::ADD, DL, MVT::i64, Result,
3278 DAG.getConstant(ExtraAlignSpace, DL, MVT::i64));
3279 Result =
3280 DAG.getNode(ISD::AND, DL, MVT::i64, Result,
3281 DAG.getConstant(~(RequiredAlign - 1), DL, MVT::i64));
3282 }
3283
3284 if (StoreBackchain)
3285 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3286
3287 SDValue Ops[2] = { Result, Chain };
3288 return DAG.getMergeValues(Ops, DL);
3289}
3290
3291SDValue SystemZTargetLowering::lowerGET_DYNAMIC_AREA_OFFSET(
3292 SDValue Op, SelectionDAG &DAG) const {
3293 SDLoc DL(Op);
3294
3295 return DAG.getNode(SystemZISD::ADJDYNALLOC, DL, MVT::i64);
3296}
3297
3298SDValue SystemZTargetLowering::lowerSMUL_LOHI(SDValue Op,
3299 SelectionDAG &DAG) const {
3300 EVT VT = Op.getValueType();
3301 SDLoc DL(Op);
3302 SDValue Ops[2];
3303 if (is32Bit(VT))
3304 // Just do a normal 64-bit multiplication and extract the results.
3305 // We define this so that it can be used for constant division.
3306 lowerMUL_LOHI32(DAG, DL, ISD::SIGN_EXTEND, Op.getOperand(0),
3307 Op.getOperand(1), Ops[1], Ops[0]);
3308 else if (Subtarget.hasMiscellaneousExtensions2())
3309 // SystemZISD::SMUL_LOHI returns the low result in the odd register and
3310 // the high result in the even register. ISD::SMUL_LOHI is defined to
3311 // return the low half first, so the results are in reverse order.
3312 lowerGR128Binary(DAG, DL, VT, SystemZISD::SMUL_LOHI,
3313 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3314 else {
3315 // Do a full 128-bit multiplication based on SystemZISD::UMUL_LOHI:
3316 //
3317 // (ll * rl) + ((lh * rl) << 64) + ((ll * rh) << 64)
3318 //
3319 // but using the fact that the upper halves are either all zeros
3320 // or all ones:
3321 //
3322 // (ll * rl) - ((lh & rl) << 64) - ((ll & rh) << 64)
3323 //
3324 // and grouping the right terms together since they are quicker than the
3325 // multiplication:
3326 //
3327 // (ll * rl) - (((lh & rl) + (ll & rh)) << 64)
3328 SDValue C63 = DAG.getConstant(63, DL, MVT::i64);
3329 SDValue LL = Op.getOperand(0);
3330 SDValue RL = Op.getOperand(1);
3331 SDValue LH = DAG.getNode(ISD::SRA, DL, VT, LL, C63);
3332 SDValue RH = DAG.getNode(ISD::SRA, DL, VT, RL, C63);
3333 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3334 // the high result in the even register. ISD::SMUL_LOHI is defined to
3335 // return the low half first, so the results are in reverse order.
3336 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
3337 LL, RL, Ops[1], Ops[0]);
3338 SDValue NegLLTimesRH = DAG.getNode(ISD::AND, DL, VT, LL, RH);
3339 SDValue NegLHTimesRL = DAG.getNode(ISD::AND, DL, VT, LH, RL);
3340 SDValue NegSum = DAG.getNode(ISD::ADD, DL, VT, NegLLTimesRH, NegLHTimesRL);
3341 Ops[1] = DAG.getNode(ISD::SUB, DL, VT, Ops[1], NegSum);
3342 }
3343 return DAG.getMergeValues(Ops, DL);
3344}
3345
3346SDValue SystemZTargetLowering::lowerUMUL_LOHI(SDValue Op,
3347 SelectionDAG &DAG) const {
3348 EVT VT = Op.getValueType();
3349 SDLoc DL(Op);
3350 SDValue Ops[2];
3351 if (is32Bit(VT))
3352 // Just do a normal 64-bit multiplication and extract the results.
3353 // We define this so that it can be used for constant division.
3354 lowerMUL_LOHI32(DAG, DL, ISD::ZERO_EXTEND, Op.getOperand(0),
3355 Op.getOperand(1), Ops[1], Ops[0]);
3356 else
3357 // SystemZISD::UMUL_LOHI returns the low result in the odd register and
3358 // the high result in the even register. ISD::UMUL_LOHI is defined to
3359 // return the low half first, so the results are in reverse order.
3360 lowerGR128Binary(DAG, DL, VT, SystemZISD::UMUL_LOHI,
3361 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3362 return DAG.getMergeValues(Ops, DL);
3363}
3364
3365SDValue SystemZTargetLowering::lowerSDIVREM(SDValue Op,
3366 SelectionDAG &DAG) const {
3367 SDValue Op0 = Op.getOperand(0);
3368 SDValue Op1 = Op.getOperand(1);
3369 EVT VT = Op.getValueType();
3370 SDLoc DL(Op);
3371
3372 // We use DSGF for 32-bit division. This means the first operand must
3373 // always be 64-bit, and the second operand should be 32-bit whenever
3374 // that is possible, to improve performance.
3375 if (is32Bit(VT))
3376 Op0 = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Op0);
3377 else if (DAG.ComputeNumSignBits(Op1) > 32)
3378 Op1 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Op1);
3379
3380 // DSG(F) returns the remainder in the even register and the
3381 // quotient in the odd register.
3382 SDValue Ops[2];
3383 lowerGR128Binary(DAG, DL, VT, SystemZISD::SDIVREM, Op0, Op1, Ops[1], Ops[0]);
3384 return DAG.getMergeValues(Ops, DL);
3385}
3386
3387SDValue SystemZTargetLowering::lowerUDIVREM(SDValue Op,
3388 SelectionDAG &DAG) const {
3389 EVT VT = Op.getValueType();
3390 SDLoc DL(Op);
3391
3392 // DL(G) returns the remainder in the even register and the
3393 // quotient in the odd register.
3394 SDValue Ops[2];
3395 lowerGR128Binary(DAG, DL, VT, SystemZISD::UDIVREM,
3396 Op.getOperand(0), Op.getOperand(1), Ops[1], Ops[0]);
3397 return DAG.getMergeValues(Ops, DL);
3398}
3399
3400SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
3401 assert(Op.getValueType() == MVT::i64 && "Should be 64-bit operation")((Op.getValueType() == MVT::i64 && "Should be 64-bit operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getValueType() == MVT::i64 && \"Should be 64-bit operation\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3401, __PRETTY_FUNCTION__))
;
3402
3403 // Get the known-zero masks for each operand.
3404 SDValue Ops[] = {Op.getOperand(0), Op.getOperand(1)};
3405 KnownBits Known[2] = {DAG.computeKnownBits(Ops[0]),
3406 DAG.computeKnownBits(Ops[1])};
3407
3408 // See if the upper 32 bits of one operand and the lower 32 bits of the
3409 // other are known zero. They are the low and high operands respectively.
3410 uint64_t Masks[] = { Known[0].Zero.getZExtValue(),
3411 Known[1].Zero.getZExtValue() };
3412 unsigned High, Low;
3413 if ((Masks[0] >> 32) == 0xffffffff && uint32_t(Masks[1]) == 0xffffffff)
3414 High = 1, Low = 0;
3415 else if ((Masks[1] >> 32) == 0xffffffff && uint32_t(Masks[0]) == 0xffffffff)
3416 High = 0, Low = 1;
3417 else
3418 return Op;
3419
3420 SDValue LowOp = Ops[Low];
3421 SDValue HighOp = Ops[High];
3422
3423 // If the high part is a constant, we're better off using IILH.
3424 if (HighOp.getOpcode() == ISD::Constant)
3425 return Op;
3426
3427 // If the low part is a constant that is outside the range of LHI,
3428 // then we're better off using IILF.
3429 if (LowOp.getOpcode() == ISD::Constant) {
3430 int64_t Value = int32_t(cast<ConstantSDNode>(LowOp)->getZExtValue());
3431 if (!isInt<16>(Value))
3432 return Op;
3433 }
3434
3435 // Check whether the high part is an AND that doesn't change the
3436 // high 32 bits and just masks out low bits. We can skip it if so.
3437 if (HighOp.getOpcode() == ISD::AND &&
3438 HighOp.getOperand(1).getOpcode() == ISD::Constant) {
3439 SDValue HighOp0 = HighOp.getOperand(0);
3440 uint64_t Mask = cast<ConstantSDNode>(HighOp.getOperand(1))->getZExtValue();
3441 if (DAG.MaskedValueIsZero(HighOp0, APInt(64, ~(Mask | 0xffffffff))))
3442 HighOp = HighOp0;
3443 }
3444
3445 // Take advantage of the fact that all GR32 operations only change the
3446 // low 32 bits by truncating Low to an i32 and inserting it directly
3447 // using a subreg. The interesting cases are those where the truncation
3448 // can be folded.
3449 SDLoc DL(Op);
3450 SDValue Low32 = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, LowOp);
3451 return DAG.getTargetInsertSubreg(SystemZ::subreg_l32, DL,
3452 MVT::i64, HighOp, Low32);
3453}
3454
3455// Lower SADDO/SSUBO/UADDO/USUBO nodes.
3456SDValue SystemZTargetLowering::lowerXALUO(SDValue Op,
3457 SelectionDAG &DAG) const {
3458 SDNode *N = Op.getNode();
3459 SDValue LHS = N->getOperand(0);
3460 SDValue RHS = N->getOperand(1);
3461 SDLoc DL(N);
3462 unsigned BaseOp = 0;
3463 unsigned CCValid = 0;
3464 unsigned CCMask = 0;
3465
3466 switch (Op.getOpcode()) {
3467 default: llvm_unreachable("Unknown instruction!")::llvm::llvm_unreachable_internal("Unknown instruction!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3467)
;
3468 case ISD::SADDO:
3469 BaseOp = SystemZISD::SADDO;
3470 CCValid = SystemZ::CCMASK_ARITH;
3471 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
3472 break;
3473 case ISD::SSUBO:
3474 BaseOp = SystemZISD::SSUBO;
3475 CCValid = SystemZ::CCMASK_ARITH;
3476 CCMask = SystemZ::CCMASK_ARITH_OVERFLOW;
3477 break;
3478 case ISD::UADDO:
3479 BaseOp = SystemZISD::UADDO;
3480 CCValid = SystemZ::CCMASK_LOGICAL;
3481 CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
3482 break;
3483 case ISD::USUBO:
3484 BaseOp = SystemZISD::USUBO;
3485 CCValid = SystemZ::CCMASK_LOGICAL;
3486 CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
3487 break;
3488 }
3489
3490 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
3491 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
3492
3493 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
3494 if (N->getValueType(1) == MVT::i1)
3495 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
3496
3497 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
3498}
3499
3500static bool isAddCarryChain(SDValue Carry) {
3501 while (Carry.getOpcode() == ISD::ADDCARRY)
3502 Carry = Carry.getOperand(2);
3503 return Carry.getOpcode() == ISD::UADDO;
3504}
3505
3506static bool isSubBorrowChain(SDValue Carry) {
3507 while (Carry.getOpcode() == ISD::SUBCARRY)
3508 Carry = Carry.getOperand(2);
3509 return Carry.getOpcode() == ISD::USUBO;
3510}
3511
3512// Lower ADDCARRY/SUBCARRY nodes.
3513SDValue SystemZTargetLowering::lowerADDSUBCARRY(SDValue Op,
3514 SelectionDAG &DAG) const {
3515
3516 SDNode *N = Op.getNode();
3517 MVT VT = N->getSimpleValueType(0);
3518
3519 // Let legalize expand this if it isn't a legal type yet.
3520 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3521 return SDValue();
3522
3523 SDValue LHS = N->getOperand(0);
3524 SDValue RHS = N->getOperand(1);
3525 SDValue Carry = Op.getOperand(2);
3526 SDLoc DL(N);
3527 unsigned BaseOp = 0;
3528 unsigned CCValid = 0;
3529 unsigned CCMask = 0;
3530
3531 switch (Op.getOpcode()) {
3532 default: llvm_unreachable("Unknown instruction!")::llvm::llvm_unreachable_internal("Unknown instruction!", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3532)
;
3533 case ISD::ADDCARRY:
3534 if (!isAddCarryChain(Carry))
3535 return SDValue();
3536
3537 BaseOp = SystemZISD::ADDCARRY;
3538 CCValid = SystemZ::CCMASK_LOGICAL;
3539 CCMask = SystemZ::CCMASK_LOGICAL_CARRY;
3540 break;
3541 case ISD::SUBCARRY:
3542 if (!isSubBorrowChain(Carry))
3543 return SDValue();
3544
3545 BaseOp = SystemZISD::SUBCARRY;
3546 CCValid = SystemZ::CCMASK_LOGICAL;
3547 CCMask = SystemZ::CCMASK_LOGICAL_BORROW;
3548 break;
3549 }
3550
3551 // Set the condition code from the carry flag.
3552 Carry = DAG.getNode(SystemZISD::GET_CCMASK, DL, MVT::i32, Carry,
3553 DAG.getConstant(CCValid, DL, MVT::i32),
3554 DAG.getConstant(CCMask, DL, MVT::i32));
3555
3556 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
3557 SDValue Result = DAG.getNode(BaseOp, DL, VTs, LHS, RHS, Carry);
3558
3559 SDValue SetCC = emitSETCC(DAG, DL, Result.getValue(1), CCValid, CCMask);
3560 if (N->getValueType(1) == MVT::i1)
3561 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
3562
3563 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, SetCC);
3564}
3565
3566SDValue SystemZTargetLowering::lowerCTPOP(SDValue Op,
3567 SelectionDAG &DAG) const {
3568 EVT VT = Op.getValueType();
3569 SDLoc DL(Op);
3570 Op = Op.getOperand(0);
3571
3572 // Handle vector types via VPOPCT.
3573 if (VT.isVector()) {
3574 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Op);
3575 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::v16i8, Op);
3576 switch (VT.getScalarSizeInBits()) {
3577 case 8:
3578 break;
3579 case 16: {
3580 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
3581 SDValue Shift = DAG.getConstant(8, DL, MVT::i32);
3582 SDValue Tmp = DAG.getNode(SystemZISD::VSHL_BY_SCALAR, DL, VT, Op, Shift);
3583 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3584 Op = DAG.getNode(SystemZISD::VSRL_BY_SCALAR, DL, VT, Op, Shift);
3585 break;
3586 }
3587 case 32: {
3588 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
3589 DAG.getConstant(0, DL, MVT::i32));
3590 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3591 break;
3592 }
3593 case 64: {
3594 SDValue Tmp = DAG.getSplatBuildVector(MVT::v16i8, DL,
3595 DAG.getConstant(0, DL, MVT::i32));
3596 Op = DAG.getNode(SystemZISD::VSUM, DL, MVT::v4i32, Op, Tmp);
3597 Op = DAG.getNode(SystemZISD::VSUM, DL, VT, Op, Tmp);
3598 break;
3599 }
3600 default:
3601 llvm_unreachable("Unexpected type")::llvm::llvm_unreachable_internal("Unexpected type", "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3601)
;
3602 }
3603 return Op;
3604 }
3605
3606 // Get the known-zero mask for the operand.
3607 KnownBits Known = DAG.computeKnownBits(Op);
3608 unsigned NumSignificantBits = (~Known.Zero).getActiveBits();
3609 if (NumSignificantBits == 0)
3610 return DAG.getConstant(0, DL, VT);
3611
3612 // Skip known-zero high parts of the operand.
3613 int64_t OrigBitSize = VT.getSizeInBits();
3614 int64_t BitSize = (int64_t)1 << Log2_32_Ceil(NumSignificantBits);
3615 BitSize = std::min(BitSize, OrigBitSize);
3616
3617 // The POPCNT instruction counts the number of bits in each byte.
3618 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op);
3619 Op = DAG.getNode(SystemZISD::POPCNT, DL, MVT::i64, Op);
3620 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
3621
3622 // Add up per-byte counts in a binary tree. All bits of Op at
3623 // position larger than BitSize remain zero throughout.
3624 for (int64_t I = BitSize / 2; I >= 8; I = I / 2) {
3625 SDValue Tmp = DAG.getNode(ISD::SHL, DL, VT, Op, DAG.getConstant(I, DL, VT));
3626 if (BitSize != OrigBitSize)
3627 Tmp = DAG.getNode(ISD::AND, DL, VT, Tmp,
3628 DAG.getConstant(((uint64_t)1 << BitSize) - 1, DL, VT));
3629 Op = DAG.getNode(ISD::ADD, DL, VT, Op, Tmp);
3630 }
3631
3632 // Extract overall result from high byte.
3633 if (BitSize > 8)
3634 Op = DAG.getNode(ISD::SRL, DL, VT, Op,
3635 DAG.getConstant(BitSize - 8, DL, VT));
3636
3637 return Op;
3638}
3639
3640SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op,
3641 SelectionDAG &DAG) const {
3642 SDLoc DL(Op);
3643 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
3644 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
3645 SyncScope::ID FenceSSID = static_cast<SyncScope::ID>(
3646 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
3647
3648 // The only fence that needs an instruction is a sequentially-consistent
3649 // cross-thread fence.
3650 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
3651 FenceSSID == SyncScope::System) {
3652 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
3653 Op.getOperand(0)),
3654 0);
3655 }
3656
3657 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
3658 return DAG.getNode(SystemZISD::MEMBARRIER, DL, MVT::Other, Op.getOperand(0));
3659}
3660
3661// Op is an atomic load. Lower it into a normal volatile load.
3662SDValue SystemZTargetLowering::lowerATOMIC_LOAD(SDValue Op,
3663 SelectionDAG &DAG) const {
3664 auto *Node = cast<AtomicSDNode>(Op.getNode());
3665 return DAG.getExtLoad(ISD::EXTLOAD, SDLoc(Op), Op.getValueType(),
3666 Node->getChain(), Node->getBasePtr(),
3667 Node->getMemoryVT(), Node->getMemOperand());
3668}
3669
3670// Op is an atomic store. Lower it into a normal volatile store.
3671SDValue SystemZTargetLowering::lowerATOMIC_STORE(SDValue Op,
3672 SelectionDAG &DAG) const {
3673 auto *Node = cast<AtomicSDNode>(Op.getNode());
3674 SDValue Chain = DAG.getTruncStore(Node->getChain(), SDLoc(Op), Node->getVal(),
3675 Node->getBasePtr(), Node->getMemoryVT(),
3676 Node->getMemOperand());
3677 // We have to enforce sequential consistency by performing a
3678 // serialization operation after the store.
3679 if (Node->getOrdering() == AtomicOrdering::SequentiallyConsistent)
3680 Chain = SDValue(DAG.getMachineNode(SystemZ::Serialize, SDLoc(Op),
3681 MVT::Other, Chain), 0);
3682 return Chain;
3683}
3684
3685// Op is an 8-, 16-bit or 32-bit ATOMIC_LOAD_* operation. Lower the first
3686// two into the fullword ATOMIC_LOADW_* operation given by Opcode.
3687SDValue SystemZTargetLowering::lowerATOMIC_LOAD_OP(SDValue Op,
3688 SelectionDAG &DAG,
3689 unsigned Opcode) const {
3690 auto *Node = cast<AtomicSDNode>(Op.getNode());
3691
3692 // 32-bit operations need no code outside the main loop.
3693 EVT NarrowVT = Node->getMemoryVT();
3694 EVT WideVT = MVT::i32;
3695 if (NarrowVT == WideVT)
3696 return Op;
3697
3698 int64_t BitSize = NarrowVT.getSizeInBits();
3699 SDValue ChainIn = Node->getChain();
3700 SDValue Addr = Node->getBasePtr();
3701 SDValue Src2 = Node->getVal();
3702 MachineMemOperand *MMO = Node->getMemOperand();
3703 SDLoc DL(Node);
3704 EVT PtrVT = Addr.getValueType();
3705
3706 // Convert atomic subtracts of constants into additions.
3707 if (Opcode == SystemZISD::ATOMIC_LOADW_SUB)
3708 if (auto *Const = dyn_cast<ConstantSDNode>(Src2)) {
3709 Opcode = SystemZISD::ATOMIC_LOADW_ADD;
3710 Src2 = DAG.getConstant(-Const->getSExtValue(), DL, Src2.getValueType());
3711 }
3712
3713 // Get the address of the containing word.
3714 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3715 DAG.getConstant(-4, DL, PtrVT));
3716
3717 // Get the number of bits that the word must be rotated left in order
3718 // to bring the field to the top bits of a GR32.
3719 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3720 DAG.getConstant(3, DL, PtrVT));
3721 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3722
3723 // Get the complementing shift amount, for rotating a field in the top
3724 // bits back to its proper position.
3725 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3726 DAG.getConstant(0, DL, WideVT), BitShift);
3727
3728 // Extend the source operand to 32 bits and prepare it for the inner loop.
3729 // ATOMIC_SWAPW uses RISBG to rotate the field left, but all other
3730 // operations require the source to be shifted in advance. (This shift
3731 // can be folded if the source is constant.) For AND and NAND, the lower
3732 // bits must be set, while for other opcodes they should be left clear.
3733 if (Opcode != SystemZISD::ATOMIC_SWAPW)
3734 Src2 = DAG.getNode(ISD::SHL, DL, WideVT, Src2,
3735 DAG.getConstant(32 - BitSize, DL, WideVT));
3736 if (Opcode == SystemZISD::ATOMIC_LOADW_AND ||
3737 Opcode == SystemZISD::ATOMIC_LOADW_NAND)
3738 Src2 = DAG.getNode(ISD::OR, DL, WideVT, Src2,
3739 DAG.getConstant(uint32_t(-1) >> BitSize, DL, WideVT));
3740
3741 // Construct the ATOMIC_LOADW_* node.
3742 SDVTList VTList = DAG.getVTList(WideVT, MVT::Other);
3743 SDValue Ops[] = { ChainIn, AlignedAddr, Src2, BitShift, NegBitShift,
3744 DAG.getConstant(BitSize, DL, WideVT) };
3745 SDValue AtomicOp = DAG.getMemIntrinsicNode(Opcode, DL, VTList, Ops,
3746 NarrowVT, MMO);
3747
3748 // Rotate the result of the final CS so that the field is in the lower
3749 // bits of a GR32, then truncate it.
3750 SDValue ResultShift = DAG.getNode(ISD::ADD, DL, WideVT, BitShift,
3751 DAG.getConstant(BitSize, DL, WideVT));
3752 SDValue Result = DAG.getNode(ISD::ROTL, DL, WideVT, AtomicOp, ResultShift);
3753
3754 SDValue RetOps[2] = { Result, AtomicOp.getValue(1) };
3755 return DAG.getMergeValues(RetOps, DL);
3756}
3757
3758// Op is an ATOMIC_LOAD_SUB operation. Lower 8- and 16-bit operations
3759// into ATOMIC_LOADW_SUBs and decide whether to convert 32- and 64-bit
3760// operations into additions.
3761SDValue SystemZTargetLowering::lowerATOMIC_LOAD_SUB(SDValue Op,
3762 SelectionDAG &DAG) const {
3763 auto *Node = cast<AtomicSDNode>(Op.getNode());
3764 EVT MemVT = Node->getMemoryVT();
3765 if (MemVT == MVT::i32 || MemVT == MVT::i64) {
3766 // A full-width operation.
3767 assert(Op.getValueType() == MemVT && "Mismatched VTs")((Op.getValueType() == MemVT && "Mismatched VTs") ? static_cast
<void> (0) : __assert_fail ("Op.getValueType() == MemVT && \"Mismatched VTs\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3767, __PRETTY_FUNCTION__))
;
3768 SDValue Src2 = Node->getVal();
3769 SDValue NegSrc2;
3770 SDLoc DL(Src2);
3771
3772 if (auto *Op2 = dyn_cast<ConstantSDNode>(Src2)) {
3773 // Use an addition if the operand is constant and either LAA(G) is
3774 // available or the negative value is in the range of A(G)FHI.
3775 int64_t Value = (-Op2->getAPIntValue()).getSExtValue();
3776 if (isInt<32>(Value) || Subtarget.hasInterlockedAccess1())
3777 NegSrc2 = DAG.getConstant(Value, DL, MemVT);
3778 } else if (Subtarget.hasInterlockedAccess1())
3779 // Use LAA(G) if available.
3780 NegSrc2 = DAG.getNode(ISD::SUB, DL, MemVT, DAG.getConstant(0, DL, MemVT),
3781 Src2);
3782
3783 if (NegSrc2.getNode())
3784 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, MemVT,
3785 Node->getChain(), Node->getBasePtr(), NegSrc2,
3786 Node->getMemOperand());
3787
3788 // Use the node as-is.
3789 return Op;
3790 }
3791
3792 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_SUB);
3793}
3794
3795// Lower 8/16/32/64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS node.
3796SDValue SystemZTargetLowering::lowerATOMIC_CMP_SWAP(SDValue Op,
3797 SelectionDAG &DAG) const {
3798 auto *Node = cast<AtomicSDNode>(Op.getNode());
3799 SDValue ChainIn = Node->getOperand(0);
3800 SDValue Addr = Node->getOperand(1);
3801 SDValue CmpVal = Node->getOperand(2);
3802 SDValue SwapVal = Node->getOperand(3);
3803 MachineMemOperand *MMO = Node->getMemOperand();
3804 SDLoc DL(Node);
3805
3806 // We have native support for 32-bit and 64-bit compare and swap, but we
3807 // still need to expand extracting the "success" result from the CC.
3808 EVT NarrowVT = Node->getMemoryVT();
3809 EVT WideVT = NarrowVT == MVT::i64 ? MVT::i64 : MVT::i32;
3810 if (NarrowVT == WideVT) {
3811 SDVTList Tys = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
3812 SDValue Ops[] = { ChainIn, Addr, CmpVal, SwapVal };
3813 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP,
3814 DL, Tys, Ops, NarrowVT, MMO);
3815 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
3816 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);
3817
3818 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
3819 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
3820 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
3821 return SDValue();
3822 }
3823
3824 // Convert 8-bit and 16-bit compare and swap to a loop, implemented
3825 // via a fullword ATOMIC_CMP_SWAPW operation.
3826 int64_t BitSize = NarrowVT.getSizeInBits();
3827 EVT PtrVT = Addr.getValueType();
3828
3829 // Get the address of the containing word.
3830 SDValue AlignedAddr = DAG.getNode(ISD::AND, DL, PtrVT, Addr,
3831 DAG.getConstant(-4, DL, PtrVT));
3832
3833 // Get the number of bits that the word must be rotated left in order
3834 // to bring the field to the top bits of a GR32.
3835 SDValue BitShift = DAG.getNode(ISD::SHL, DL, PtrVT, Addr,
3836 DAG.getConstant(3, DL, PtrVT));
3837 BitShift = DAG.getNode(ISD::TRUNCATE, DL, WideVT, BitShift);
3838
3839 // Get the complementing shift amount, for rotating a field in the top
3840 // bits back to its proper position.
3841 SDValue NegBitShift = DAG.getNode(ISD::SUB, DL, WideVT,
3842 DAG.getConstant(0, DL, WideVT), BitShift);
3843
3844 // Construct the ATOMIC_CMP_SWAPW node.
3845 SDVTList VTList = DAG.getVTList(WideVT, MVT::i32, MVT::Other);
3846 SDValue Ops[] = { ChainIn, AlignedAddr, CmpVal, SwapVal, BitShift,
3847 NegBitShift, DAG.getConstant(BitSize, DL, WideVT) };
3848 SDValue AtomicOp = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAPW, DL,
3849 VTList, Ops, NarrowVT, MMO);
3850 SDValue Success = emitSETCC(DAG, DL, AtomicOp.getValue(1),
3851 SystemZ::CCMASK_ICMP, SystemZ::CCMASK_CMP_EQ);
3852
3853 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), AtomicOp.getValue(0));
3854 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
3855 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), AtomicOp.getValue(2));
3856 return SDValue();
3857}
3858
3859MachineMemOperand::Flags
3860SystemZTargetLowering::getMMOFlags(const Instruction &I) const {
3861 // Because of how we convert atomic_load and atomic_store to normal loads and
3862 // stores in the DAG, we need to ensure that the MMOs are marked volatile
3863 // since DAGCombine hasn't been updated to account for atomic, but non
3864 // volatile loads. (See D57601)
3865 if (auto *SI = dyn_cast<StoreInst>(&I))
3866 if (SI->isAtomic())
3867 return MachineMemOperand::MOVolatile;
3868 if (auto *LI = dyn_cast<LoadInst>(&I))
3869 if (LI->isAtomic())
3870 return MachineMemOperand::MOVolatile;
3871 if (auto *AI = dyn_cast<AtomicRMWInst>(&I))
3872 if (AI->isAtomic())
3873 return MachineMemOperand::MOVolatile;
3874 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(&I))
3875 if (AI->isAtomic())
3876 return MachineMemOperand::MOVolatile;
3877 return MachineMemOperand::MONone;
3878}
3879
3880SDValue SystemZTargetLowering::lowerSTACKSAVE(SDValue Op,
3881 SelectionDAG &DAG) const {
3882 MachineFunction &MF = DAG.getMachineFunction();
3883 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3884 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
3885 report_fatal_error("Variable-sized stack allocations are not supported "
3886 "in GHC calling convention");
3887 return DAG.getCopyFromReg(Op.getOperand(0), SDLoc(Op),
3888 SystemZ::R15D, Op.getValueType());
3889}
3890
3891SDValue SystemZTargetLowering::lowerSTACKRESTORE(SDValue Op,
3892 SelectionDAG &DAG) const {
3893 MachineFunction &MF = DAG.getMachineFunction();
3894 MF.getInfo<SystemZMachineFunctionInfo>()->setManipulatesSP(true);
3895 bool StoreBackchain = MF.getFunction().hasFnAttribute("backchain");
3896
3897 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
3898 report_fatal_error("Variable-sized stack allocations are not supported "
3899 "in GHC calling convention");
3900
3901 SDValue Chain = Op.getOperand(0);
3902 SDValue NewSP = Op.getOperand(1);
3903 SDValue Backchain;
3904 SDLoc DL(Op);
3905
3906 if (StoreBackchain) {
3907 SDValue OldSP = DAG.getCopyFromReg(Chain, DL, SystemZ::R15D, MVT::i64);
3908 Backchain = DAG.getLoad(MVT::i64, DL, Chain, OldSP, MachinePointerInfo());
3909 }
3910
3911 Chain = DAG.getCopyToReg(Chain, DL, SystemZ::R15D, NewSP);
3912
3913 if (StoreBackchain)
3914 Chain = DAG.getStore(Chain, DL, Backchain, NewSP, MachinePointerInfo());
3915
3916 return Chain;
3917}
3918
3919SDValue SystemZTargetLowering::lowerPREFETCH(SDValue Op,
3920 SelectionDAG &DAG) const {
3921 bool IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3922 if (!IsData)
3923 // Just preserve the chain.
3924 return Op.getOperand(0);
3925
3926 SDLoc DL(Op);
3927 bool IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
3928 unsigned Code = IsWrite ? SystemZ::PFD_WRITE : SystemZ::PFD_READ;
3929 auto *Node = cast<MemIntrinsicSDNode>(Op.getNode());
3930 SDValue Ops[] = {Op.getOperand(0), DAG.getTargetConstant(Code, DL, MVT::i32),
3931 Op.getOperand(1)};
3932 return DAG.getMemIntrinsicNode(SystemZISD::PREFETCH, DL,
3933 Node->getVTList(), Ops,
3934 Node->getMemoryVT(), Node->getMemOperand());
3935}
3936
3937// Convert condition code in CCReg to an i32 value.
3938static SDValue getCCResult(SelectionDAG &DAG, SDValue CCReg) {
3939 SDLoc DL(CCReg);
3940 SDValue IPM = DAG.getNode(SystemZISD::IPM, DL, MVT::i32, CCReg);
3941 return DAG.getNode(ISD::SRL, DL, MVT::i32, IPM,
3942 DAG.getConstant(SystemZ::IPM_CC, DL, MVT::i32));
3943}
3944
3945SDValue
3946SystemZTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
3947 SelectionDAG &DAG) const {
3948 unsigned Opcode, CCValid;
3949 if (isIntrinsicWithCCAndChain(Op, Opcode, CCValid)) {
3950 assert(Op->getNumValues() == 2 && "Expected only CC result and chain")((Op->getNumValues() == 2 && "Expected only CC result and chain"
) ? static_cast<void> (0) : __assert_fail ("Op->getNumValues() == 2 && \"Expected only CC result and chain\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3950, __PRETTY_FUNCTION__))
;
3951 SDNode *Node = emitIntrinsicWithCCAndChain(DAG, Op, Opcode);
3952 SDValue CC = getCCResult(DAG, SDValue(Node, 0));
3953 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), CC);
3954 return SDValue();
3955 }
3956
3957 return SDValue();
3958}
3959
3960SDValue
3961SystemZTargetLowering::lowerINTRINSIC_WO_CHAIN(SDValue Op,
3962 SelectionDAG &DAG) const {
3963 unsigned Opcode, CCValid;
3964 if (isIntrinsicWithCC(Op, Opcode, CCValid)) {
3965 SDNode *Node = emitIntrinsicWithCC(DAG, Op, Opcode);
3966 if (Op->getNumValues() == 1)
3967 return getCCResult(DAG, SDValue(Node, 0));
3968 assert(Op->getNumValues() == 2 && "Expected a CC and non-CC result")((Op->getNumValues() == 2 && "Expected a CC and non-CC result"
) ? static_cast<void> (0) : __assert_fail ("Op->getNumValues() == 2 && \"Expected a CC and non-CC result\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 3968, __PRETTY_FUNCTION__))
;
3969 return DAG.getNode(ISD::MERGE_VALUES, SDLoc(Op), Op->getVTList(),
3970 SDValue(Node, 0), getCCResult(DAG, SDValue(Node, 1)));
3971 }
3972
3973 unsigned Id = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3974 switch (Id) {
3975 case Intrinsic::thread_pointer:
3976 return lowerThreadPointer(SDLoc(Op), DAG);
3977
3978 case Intrinsic::s390_vpdi:
3979 return DAG.getNode(SystemZISD::PERMUTE_DWORDS, SDLoc(Op), Op.getValueType(),
3980 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3981
3982 case Intrinsic::s390_vperm:
3983 return DAG.getNode(SystemZISD::PERMUTE, SDLoc(Op), Op.getValueType(),
3984 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3985
3986 case Intrinsic::s390_vuphb:
3987 case Intrinsic::s390_vuphh:
3988 case Intrinsic::s390_vuphf:
3989 return DAG.getNode(SystemZISD::UNPACK_HIGH, SDLoc(Op), Op.getValueType(),
3990 Op.getOperand(1));
3991
3992 case Intrinsic::s390_vuplhb:
3993 case Intrinsic::s390_vuplhh:
3994 case Intrinsic::s390_vuplhf:
3995 return DAG.getNode(SystemZISD::UNPACKL_HIGH, SDLoc(Op), Op.getValueType(),
3996 Op.getOperand(1));
3997
3998 case Intrinsic::s390_vuplb:
3999 case Intrinsic::s390_vuplhw:
4000 case Intrinsic::s390_vuplf:
4001 return DAG.getNode(SystemZISD::UNPACK_LOW, SDLoc(Op), Op.getValueType(),
4002 Op.getOperand(1));
4003
4004 case Intrinsic::s390_vupllb:
4005 case Intrinsic::s390_vupllh:
4006 case Intrinsic::s390_vupllf:
4007 return DAG.getNode(SystemZISD::UNPACKL_LOW, SDLoc(Op), Op.getValueType(),
4008 Op.getOperand(1));
4009
4010 case Intrinsic::s390_vsumb:
4011 case Intrinsic::s390_vsumh:
4012 case Intrinsic::s390_vsumgh:
4013 case Intrinsic::s390_vsumgf:
4014 case Intrinsic::s390_vsumqf:
4015 case Intrinsic::s390_vsumqg:
4016 return DAG.getNode(SystemZISD::VSUM, SDLoc(Op), Op.getValueType(),
4017 Op.getOperand(1), Op.getOperand(2));
4018 }
4019
4020 return SDValue();
4021}
4022
4023namespace {
4024// Says that SystemZISD operation Opcode can be used to perform the equivalent
4025// of a VPERM with permute vector Bytes. If Opcode takes three operands,
4026// Operand is the constant third operand, otherwise it is the number of
4027// bytes in each element of the result.
4028struct Permute {
4029 unsigned Opcode;
4030 unsigned Operand;
4031 unsigned char Bytes[SystemZ::VectorBytes];
4032};
4033}
4034
4035static const Permute PermuteForms[] = {
4036 // VMRHG
4037 { SystemZISD::MERGE_HIGH, 8,
4038 { 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23 } },
4039 // VMRHF
4040 { SystemZISD::MERGE_HIGH, 4,
4041 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
4042 // VMRHH
4043 { SystemZISD::MERGE_HIGH, 2,
4044 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
4045 // VMRHB
4046 { SystemZISD::MERGE_HIGH, 1,
4047 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
4048 // VMRLG
4049 { SystemZISD::MERGE_LOW, 8,
4050 { 8, 9, 10, 11, 12, 13, 14, 15, 24, 25, 26, 27, 28, 29, 30, 31 } },
4051 // VMRLF
4052 { SystemZISD::MERGE_LOW, 4,
4053 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
4054 // VMRLH
4055 { SystemZISD::MERGE_LOW, 2,
4056 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
4057 // VMRLB
4058 { SystemZISD::MERGE_LOW, 1,
4059 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
4060 // VPKG
4061 { SystemZISD::PACK, 4,
4062 { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 } },
4063 // VPKF
4064 { SystemZISD::PACK, 2,
4065 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
4066 // VPKH
4067 { SystemZISD::PACK, 1,
4068 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
4069 // VPDI V1, V2, 4 (low half of V1, high half of V2)
4070 { SystemZISD::PERMUTE_DWORDS, 4,
4071 { 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 } },
4072 // VPDI V1, V2, 1 (high half of V1, low half of V2)
4073 { SystemZISD::PERMUTE_DWORDS, 1,
4074 { 0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31 } }
4075};
4076
4077// Called after matching a vector shuffle against a particular pattern.
4078// Both the original shuffle and the pattern have two vector operands.
4079// OpNos[0] is the operand of the original shuffle that should be used for
4080// operand 0 of the pattern, or -1 if operand 0 of the pattern can be anything.
4081// OpNos[1] is the same for operand 1 of the pattern. Resolve these -1s and
4082// set OpNo0 and OpNo1 to the shuffle operands that should actually be used
4083// for operands 0 and 1 of the pattern.
4084static bool chooseShuffleOpNos(int *OpNos, unsigned &OpNo0, unsigned &OpNo1) {
4085 if (OpNos[0] < 0) {
4086 if (OpNos[1] < 0)
4087 return false;
4088 OpNo0 = OpNo1 = OpNos[1];
4089 } else if (OpNos[1] < 0) {
4090 OpNo0 = OpNo1 = OpNos[0];
4091 } else {
4092 OpNo0 = OpNos[0];
4093 OpNo1 = OpNos[1];
4094 }
4095 return true;
4096}
4097
4098// Bytes is a VPERM-like permute vector, except that -1 is used for
4099// undefined bytes. Return true if the VPERM can be implemented using P.
4100// When returning true set OpNo0 to the VPERM operand that should be
4101// used for operand 0 of P and likewise OpNo1 for operand 1 of P.
4102//
4103// For example, if swapping the VPERM operands allows P to match, OpNo0
4104// will be 1 and OpNo1 will be 0. If instead Bytes only refers to one
4105// operand, but rewriting it to use two duplicated operands allows it to
4106// match P, then OpNo0 and OpNo1 will be the same.
4107static bool matchPermute(const SmallVectorImpl<int> &Bytes, const Permute &P,
4108 unsigned &OpNo0, unsigned &OpNo1) {
4109 int OpNos[] = { -1, -1 };
4110 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I) {
4111 int Elt = Bytes[I];
4112 if (Elt >= 0) {
4113 // Make sure that the two permute vectors use the same suboperand
4114 // byte number. Only the operand numbers (the high bits) are
4115 // allowed to differ.
4116 if ((Elt ^ P.Bytes[I]) & (SystemZ::VectorBytes - 1))
4117 return false;
4118 int ModelOpNo = P.Bytes[I] / SystemZ::VectorBytes;
4119 int RealOpNo = unsigned(Elt) / SystemZ::VectorBytes;
4120 // Make sure that the operand mappings are consistent with previous
4121 // elements.
4122 if (OpNos[ModelOpNo] == 1 - RealOpNo)
4123 return false;
4124 OpNos[ModelOpNo] = RealOpNo;
4125 }
4126 }
4127 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
4128}
4129
4130// As above, but search for a matching permute.
4131static const Permute *matchPermute(const SmallVectorImpl<int> &Bytes,
4132 unsigned &OpNo0, unsigned &OpNo1) {
4133 for (auto &P : PermuteForms)
4134 if (matchPermute(Bytes, P, OpNo0, OpNo1))
4135 return &P;
4136 return nullptr;
4137}
4138
4139// Bytes is a VPERM-like permute vector, except that -1 is used for
4140// undefined bytes. This permute is an operand of an outer permute.
4141// See whether redistributing the -1 bytes gives a shuffle that can be
4142// implemented using P. If so, set Transform to a VPERM-like permute vector
4143// that, when applied to the result of P, gives the original permute in Bytes.
4144static bool matchDoublePermute(const SmallVectorImpl<int> &Bytes,
4145 const Permute &P,
4146 SmallVectorImpl<int> &Transform) {
4147 unsigned To = 0;
4148 for (unsigned From = 0; From < SystemZ::VectorBytes; ++From) {
4149 int Elt = Bytes[From];
4150 if (Elt < 0)
4151 // Byte number From of the result is undefined.
4152 Transform[From] = -1;
4153 else {
4154 while (P.Bytes[To] != Elt) {
4155 To += 1;
4156 if (To == SystemZ::VectorBytes)
4157 return false;
4158 }
4159 Transform[From] = To;
4160 }
4161 }
4162 return true;
4163}
4164
4165// As above, but search for a matching permute.
4166static const Permute *matchDoublePermute(const SmallVectorImpl<int> &Bytes,
4167 SmallVectorImpl<int> &Transform) {
4168 for (auto &P : PermuteForms)
4169 if (matchDoublePermute(Bytes, P, Transform))
4170 return &P;
4171 return nullptr;
4172}
4173
4174// Convert the mask of the given shuffle op into a byte-level mask,
4175// as if it had type vNi8.
4176static bool getVPermMask(SDValue ShuffleOp,
4177 SmallVectorImpl<int> &Bytes) {
4178 EVT VT = ShuffleOp.getValueType();
4179 unsigned NumElements = VT.getVectorNumElements();
4180 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4181
4182 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(ShuffleOp)) {
4183 Bytes.resize(NumElements * BytesPerElement, -1);
4184 for (unsigned I = 0; I < NumElements; ++I) {
4185 int Index = VSN->getMaskElt(I);
4186 if (Index >= 0)
4187 for (unsigned J = 0; J < BytesPerElement; ++J)
4188 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
4189 }
4190 return true;
4191 }
4192 if (SystemZISD::SPLAT == ShuffleOp.getOpcode() &&
4193 isa<ConstantSDNode>(ShuffleOp.getOperand(1))) {
4194 unsigned Index = ShuffleOp.getConstantOperandVal(1);
4195 Bytes.resize(NumElements * BytesPerElement, -1);
4196 for (unsigned I = 0; I < NumElements; ++I)
4197 for (unsigned J = 0; J < BytesPerElement; ++J)
4198 Bytes[I * BytesPerElement + J] = Index * BytesPerElement + J;
4199 return true;
4200 }
4201 return false;
4202}
4203
4204// Bytes is a VPERM-like permute vector, except that -1 is used for
4205// undefined bytes. See whether bytes [Start, Start + BytesPerElement) of
4206// the result come from a contiguous sequence of bytes from one input.
4207// Set Base to the selector for the first byte if so.
4208static bool getShuffleInput(const SmallVectorImpl<int> &Bytes, unsigned Start,
4209 unsigned BytesPerElement, int &Base) {
4210 Base = -1;
4211 for (unsigned I = 0; I < BytesPerElement; ++I) {
4212 if (Bytes[Start + I] >= 0) {
4213 unsigned Elem = Bytes[Start + I];
4214 if (Base < 0) {
4215 Base = Elem - I;
4216 // Make sure the bytes would come from one input operand.
4217 if (unsigned(Base) % Bytes.size() + BytesPerElement > Bytes.size())
4218 return false;
4219 } else if (unsigned(Base) != Elem - I)
4220 return false;
4221 }
4222 }
4223 return true;
4224}
4225
4226// Bytes is a VPERM-like permute vector, except that -1 is used for
4227// undefined bytes. Return true if it can be performed using VSLDI.
4228// When returning true, set StartIndex to the shift amount and OpNo0
4229// and OpNo1 to the VPERM operands that should be used as the first
4230// and second shift operand respectively.
4231static bool isShlDoublePermute(const SmallVectorImpl<int> &Bytes,
4232 unsigned &StartIndex, unsigned &OpNo0,
4233 unsigned &OpNo1) {
4234 int OpNos[] = { -1, -1 };
4235 int Shift = -1;
4236 for (unsigned I = 0; I < 16; ++I) {
4237 int Index = Bytes[I];
4238 if (Index >= 0) {
4239 int ExpectedShift = (Index - I) % SystemZ::VectorBytes;
4240 int ModelOpNo = unsigned(ExpectedShift + I) / SystemZ::VectorBytes;
4241 int RealOpNo = unsigned(Index) / SystemZ::VectorBytes;
4242 if (Shift < 0)
4243 Shift = ExpectedShift;
4244 else if (Shift != ExpectedShift)
4245 return false;
4246 // Make sure that the operand mappings are consistent with previous
4247 // elements.
4248 if (OpNos[ModelOpNo] == 1 - RealOpNo)
4249 return false;
4250 OpNos[ModelOpNo] = RealOpNo;
4251 }
4252 }
4253 StartIndex = Shift;
4254 return chooseShuffleOpNos(OpNos, OpNo0, OpNo1);
4255}
4256
4257// Create a node that performs P on operands Op0 and Op1, casting the
4258// operands to the appropriate type. The type of the result is determined by P.
4259static SDValue getPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
4260 const Permute &P, SDValue Op0, SDValue Op1) {
4261 // VPDI (PERMUTE_DWORDS) always operates on v2i64s. The input
4262 // elements of a PACK are twice as wide as the outputs.
4263 unsigned InBytes = (P.Opcode == SystemZISD::PERMUTE_DWORDS ? 8 :
4264 P.Opcode == SystemZISD::PACK ? P.Operand * 2 :
4265 P.Operand);
4266 // Cast both operands to the appropriate type.
4267 MVT InVT = MVT::getVectorVT(MVT::getIntegerVT(InBytes * 8),
4268 SystemZ::VectorBytes / InBytes);
4269 Op0 = DAG.getNode(ISD::BITCAST, DL, InVT, Op0);
4270 Op1 = DAG.getNode(ISD::BITCAST, DL, InVT, Op1);
4271 SDValue Op;
4272 if (P.Opcode == SystemZISD::PERMUTE_DWORDS) {
4273 SDValue Op2 = DAG.getTargetConstant(P.Operand, DL, MVT::i32);
4274 Op = DAG.getNode(SystemZISD::PERMUTE_DWORDS, DL, InVT, Op0, Op1, Op2);
4275 } else if (P.Opcode == SystemZISD::PACK) {
4276 MVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(P.Operand * 8),
4277 SystemZ::VectorBytes / P.Operand);
4278 Op = DAG.getNode(SystemZISD::PACK, DL, OutVT, Op0, Op1);
4279 } else {
4280 Op = DAG.getNode(P.Opcode, DL, InVT, Op0, Op1);
4281 }
4282 return Op;
4283}
4284
4285// Bytes is a VPERM-like permute vector, except that -1 is used for
4286// undefined bytes. Implement it on operands Ops[0] and Ops[1] using
4287// VSLDI or VPERM.
4288static SDValue getGeneralPermuteNode(SelectionDAG &DAG, const SDLoc &DL,
4289 SDValue *Ops,
4290 const SmallVectorImpl<int> &Bytes) {
4291 for (unsigned I = 0; I < 2; ++I)
4292 Ops[I] = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Ops[I]);
4293
4294 // First see whether VSLDI can be used.
4295 unsigned StartIndex, OpNo0, OpNo1;
4296 if (isShlDoublePermute(Bytes, StartIndex, OpNo0, OpNo1))
4297 return DAG.getNode(SystemZISD::SHL_DOUBLE, DL, MVT::v16i8, Ops[OpNo0],
4298 Ops[OpNo1],
4299 DAG.getTargetConstant(StartIndex, DL, MVT::i32));
4300
4301 // Fall back on VPERM. Construct an SDNode for the permute vector.
4302 SDValue IndexNodes[SystemZ::VectorBytes];
4303 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4304 if (Bytes[I] >= 0)
4305 IndexNodes[I] = DAG.getConstant(Bytes[I], DL, MVT::i32);
4306 else
4307 IndexNodes[I] = DAG.getUNDEF(MVT::i32);
4308 SDValue Op2 = DAG.getBuildVector(MVT::v16i8, DL, IndexNodes);
4309 return DAG.getNode(SystemZISD::PERMUTE, DL, MVT::v16i8, Ops[0], Ops[1], Op2);
4310}
4311
4312namespace {
4313// Describes a general N-operand vector shuffle.
4314struct GeneralShuffle {
4315 GeneralShuffle(EVT vt) : VT(vt) {}
4316 void addUndef();
4317 bool add(SDValue, unsigned);
4318 SDValue getNode(SelectionDAG &, const SDLoc &);
4319
4320 // The operands of the shuffle.
4321 SmallVector<SDValue, SystemZ::VectorBytes> Ops;
4322
4323 // Index I is -1 if byte I of the result is undefined. Otherwise the
4324 // result comes from byte Bytes[I] % SystemZ::VectorBytes of operand
4325 // Bytes[I] / SystemZ::VectorBytes.
4326 SmallVector<int, SystemZ::VectorBytes> Bytes;
4327
4328 // The type of the shuffle result.
4329 EVT VT;
4330};
4331}
4332
4333// Add an extra undefined element to the shuffle.
4334void GeneralShuffle::addUndef() {
4335 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4336 for (unsigned I = 0; I < BytesPerElement; ++I)
4337 Bytes.push_back(-1);
4338}
4339
4340// Add an extra element to the shuffle, taking it from element Elem of Op.
4341// A null Op indicates a vector input whose value will be calculated later;
4342// there is at most one such input per shuffle and it always has the same
4343// type as the result. Aborts and returns false if the source vector elements
4344// of an EXTRACT_VECTOR_ELT are smaller than the destination elements. Per
4345// LLVM they become implicitly extended, but this is rare and not optimized.
4346bool GeneralShuffle::add(SDValue Op, unsigned Elem) {
4347 unsigned BytesPerElement = VT.getVectorElementType().getStoreSize();
4348
4349 // The source vector can have wider elements than the result,
4350 // either through an explicit TRUNCATE or because of type legalization.
4351 // We want the least significant part.
4352 EVT FromVT = Op.getNode() ? Op.getValueType() : VT;
4353 unsigned FromBytesPerElement = FromVT.getVectorElementType().getStoreSize();
4354
4355 // Return false if the source elements are smaller than their destination
4356 // elements.
4357 if (FromBytesPerElement < BytesPerElement)
4358 return false;
4359
4360 unsigned Byte = ((Elem * FromBytesPerElement) % SystemZ::VectorBytes +
4361 (FromBytesPerElement - BytesPerElement));
4362
4363 // Look through things like shuffles and bitcasts.
4364 while (Op.getNode()) {
4365 if (Op.getOpcode() == ISD::BITCAST)
4366 Op = Op.getOperand(0);
4367 else if (Op.getOpcode() == ISD::VECTOR_SHUFFLE && Op.hasOneUse()) {
4368 // See whether the bytes we need come from a contiguous part of one
4369 // operand.
4370 SmallVector<int, SystemZ::VectorBytes> OpBytes;
4371 if (!getVPermMask(Op, OpBytes))
4372 break;
4373 int NewByte;
4374 if (!getShuffleInput(OpBytes, Byte, BytesPerElement, NewByte))
4375 break;
4376 if (NewByte < 0) {
4377 addUndef();
4378 return true;
4379 }
4380 Op = Op.getOperand(unsigned(NewByte) / SystemZ::VectorBytes);
4381 Byte = unsigned(NewByte) % SystemZ::VectorBytes;
4382 } else if (Op.isUndef()) {
4383 addUndef();
4384 return true;
4385 } else
4386 break;
4387 }
4388
4389 // Make sure that the source of the extraction is in Ops.
4390 unsigned OpNo = 0;
4391 for (; OpNo < Ops.size(); ++OpNo)
4392 if (Ops[OpNo] == Op)
4393 break;
4394 if (OpNo == Ops.size())
4395 Ops.push_back(Op);
4396
4397 // Add the element to Bytes.
4398 unsigned Base = OpNo * SystemZ::VectorBytes + Byte;
4399 for (unsigned I = 0; I < BytesPerElement; ++I)
4400 Bytes.push_back(Base + I);
4401
4402 return true;
4403}
4404
4405// Return SDNodes for the completed shuffle.
4406SDValue GeneralShuffle::getNode(SelectionDAG &DAG, const SDLoc &DL) {
4407 assert(Bytes.size() == SystemZ::VectorBytes && "Incomplete vector")((Bytes.size() == SystemZ::VectorBytes && "Incomplete vector"
) ? static_cast<void> (0) : __assert_fail ("Bytes.size() == SystemZ::VectorBytes && \"Incomplete vector\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4407, __PRETTY_FUNCTION__))
;
4408
4409 if (Ops.size() == 0)
4410 return DAG.getUNDEF(VT);
4411
4412 // Make sure that there are at least two shuffle operands.
4413 if (Ops.size() == 1)
4414 Ops.push_back(DAG.getUNDEF(MVT::v16i8));
4415
4416 // Create a tree of shuffles, deferring root node until after the loop.
4417 // Try to redistribute the undefined elements of non-root nodes so that
4418 // the non-root shuffles match something like a pack or merge, then adjust
4419 // the parent node's permute vector to compensate for the new order.
4420 // Among other things, this copes with vectors like <2 x i16> that were
4421 // padded with undefined elements during type legalization.
4422 //
4423 // In the best case this redistribution will lead to the whole tree
4424 // using packs and merges. It should rarely be a loss in other cases.
4425 unsigned Stride = 1;
4426 for (; Stride * 2 < Ops.size(); Stride *= 2) {
4427 for (unsigned I = 0; I < Ops.size() - Stride; I += Stride * 2) {
4428 SDValue SubOps[] = { Ops[I], Ops[I + Stride] };
4429
4430 // Create a mask for just these two operands.
4431 SmallVector<int, SystemZ::VectorBytes> NewBytes(SystemZ::VectorBytes);
4432 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
4433 unsigned OpNo = unsigned(Bytes[J]) / SystemZ::VectorBytes;
4434 unsigned Byte = unsigned(Bytes[J]) % SystemZ::VectorBytes;
4435 if (OpNo == I)
4436 NewBytes[J] = Byte;
4437 else if (OpNo == I + Stride)
4438 NewBytes[J] = SystemZ::VectorBytes + Byte;
4439 else
4440 NewBytes[J] = -1;
4441 }
4442 // See if it would be better to reorganize NewMask to avoid using VPERM.
4443 SmallVector<int, SystemZ::VectorBytes> NewBytesMap(SystemZ::VectorBytes);
4444 if (const Permute *P = matchDoublePermute(NewBytes, NewBytesMap)) {
4445 Ops[I] = getPermuteNode(DAG, DL, *P, SubOps[0], SubOps[1]);
4446 // Applying NewBytesMap to Ops[I] gets back to NewBytes.
4447 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J) {
4448 if (NewBytes[J] >= 0) {
4449 assert(unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&((unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
"Invalid double permute") ? static_cast<void> (0) : __assert_fail
("unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && \"Invalid double permute\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4450, __PRETTY_FUNCTION__))
4450 "Invalid double permute")((unsigned(NewBytesMap[J]) < SystemZ::VectorBytes &&
"Invalid double permute") ? static_cast<void> (0) : __assert_fail
("unsigned(NewBytesMap[J]) < SystemZ::VectorBytes && \"Invalid double permute\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4450, __PRETTY_FUNCTION__))
;
4451 Bytes[J] = I * SystemZ::VectorBytes + NewBytesMap[J];
4452 } else
4453 assert(NewBytesMap[J] < 0 && "Invalid double permute")((NewBytesMap[J] < 0 && "Invalid double permute") ?
static_cast<void> (0) : __assert_fail ("NewBytesMap[J] < 0 && \"Invalid double permute\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4453, __PRETTY_FUNCTION__))
;
4454 }
4455 } else {
4456 // Just use NewBytes on the operands.
4457 Ops[I] = getGeneralPermuteNode(DAG, DL, SubOps, NewBytes);
4458 for (unsigned J = 0; J < SystemZ::VectorBytes; ++J)
4459 if (NewBytes[J] >= 0)
4460 Bytes[J] = I * SystemZ::VectorBytes + J;
4461 }
4462 }
4463 }
4464
4465 // Now we just have 2 inputs. Put the second operand in Ops[1].
4466 if (Stride > 1) {
4467 Ops[1] = Ops[Stride];
4468 for (unsigned I = 0; I < SystemZ::VectorBytes; ++I)
4469 if (Bytes[I] >= int(SystemZ::VectorBytes))
4470 Bytes[I] -= (Stride - 1) * SystemZ::VectorBytes;
4471 }
4472
4473 // Look for an instruction that can do the permute without resorting
4474 // to VPERM.
4475 unsigned OpNo0, OpNo1;
4476 SDValue Op;
4477 if (const Permute *P = matchPermute(Bytes, OpNo0, OpNo1))
4478 Op = getPermuteNode(DAG, DL, *P, Ops[OpNo0], Ops[OpNo1]);
4479 else
4480 Op = getGeneralPermuteNode(DAG, DL, &Ops[0], Bytes);
4481 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4482}
4483
4484// Return true if the given BUILD_VECTOR is a scalar-to-vector conversion.
4485static bool isScalarToVector(SDValue Op) {
4486 for (unsigned I = 1, E = Op.getNumOperands(); I != E; ++I)
4487 if (!Op.getOperand(I).isUndef())
4488 return false;
4489 return true;
4490}
4491
4492// Return a vector of type VT that contains Value in the first element.
4493// The other elements don't matter.
4494static SDValue buildScalarToVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4495 SDValue Value) {
4496 // If we have a constant, replicate it to all elements and let the
4497 // BUILD_VECTOR lowering take care of it.
4498 if (Value.getOpcode() == ISD::Constant ||
4499 Value.getOpcode() == ISD::ConstantFP) {
4500 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Value);
4501 return DAG.getBuildVector(VT, DL, Ops);
4502 }
4503 if (Value.isUndef())
4504 return DAG.getUNDEF(VT);
4505 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
4506}
4507
4508// Return a vector of type VT in which Op0 is in element 0 and Op1 is in
4509// element 1. Used for cases in which replication is cheap.
4510static SDValue buildMergeScalars(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4511 SDValue Op0, SDValue Op1) {
4512 if (Op0.isUndef()) {
4513 if (Op1.isUndef())
4514 return DAG.getUNDEF(VT);
4515 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op1);
4516 }
4517 if (Op1.isUndef())
4518 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0);
4519 return DAG.getNode(SystemZISD::MERGE_HIGH, DL, VT,
4520 buildScalarToVector(DAG, DL, VT, Op0),
4521 buildScalarToVector(DAG, DL, VT, Op1));
4522}
4523
4524// Extend GPR scalars Op0 and Op1 to doublewords and return a v2i64
4525// vector for them.
4526static SDValue joinDwords(SelectionDAG &DAG, const SDLoc &DL, SDValue Op0,
4527 SDValue Op1) {
4528 if (Op0.isUndef() && Op1.isUndef())
4529 return DAG.getUNDEF(MVT::v2i64);
4530 // If one of the two inputs is undefined then replicate the other one,
4531 // in order to avoid using another register unnecessarily.
4532 if (Op0.isUndef())
4533 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4534 else if (Op1.isUndef())
4535 Op0 = Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4536 else {
4537 Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
4538 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op1);
4539 }
4540 return DAG.getNode(SystemZISD::JOIN_DWORDS, DL, MVT::v2i64, Op0, Op1);
4541}
4542
4543// If a BUILD_VECTOR contains some EXTRACT_VECTOR_ELTs, it's usually
4544// better to use VECTOR_SHUFFLEs on them, only using BUILD_VECTOR for
4545// the non-EXTRACT_VECTOR_ELT elements. See if the given BUILD_VECTOR
4546// would benefit from this representation and return it if so.
4547static SDValue tryBuildVectorShuffle(SelectionDAG &DAG,
4548 BuildVectorSDNode *BVN) {
4549 EVT VT = BVN->getValueType(0);
4550 unsigned NumElements = VT.getVectorNumElements();
4551
4552 // Represent the BUILD_VECTOR as an N-operand VECTOR_SHUFFLE-like operation
4553 // on byte vectors. If there are non-EXTRACT_VECTOR_ELT elements that still
4554 // need a BUILD_VECTOR, add an additional placeholder operand for that
4555 // BUILD_VECTOR and store its operands in ResidueOps.
4556 GeneralShuffle GS(VT);
4557 SmallVector<SDValue, SystemZ::VectorBytes> ResidueOps;
4558 bool FoundOne = false;
4559 for (unsigned I = 0; I < NumElements; ++I) {
4560 SDValue Op = BVN->getOperand(I);
4561 if (Op.getOpcode() == ISD::TRUNCATE)
4562 Op = Op.getOperand(0);
4563 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4564 Op.getOperand(1).getOpcode() == ISD::Constant) {
4565 unsigned Elem = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
4566 if (!GS.add(Op.getOperand(0), Elem))
4567 return SDValue();
4568 FoundOne = true;
4569 } else if (Op.isUndef()) {
4570 GS.addUndef();
4571 } else {
4572 if (!GS.add(SDValue(), ResidueOps.size()))
4573 return SDValue();
4574 ResidueOps.push_back(BVN->getOperand(I));
4575 }
4576 }
4577
4578 // Nothing to do if there are no EXTRACT_VECTOR_ELTs.
4579 if (!FoundOne)
4580 return SDValue();
4581
4582 // Create the BUILD_VECTOR for the remaining elements, if any.
4583 if (!ResidueOps.empty()) {
4584 while (ResidueOps.size() < NumElements)
4585 ResidueOps.push_back(DAG.getUNDEF(ResidueOps[0].getValueType()));
4586 for (auto &Op : GS.Ops) {
4587 if (!Op.getNode()) {
4588 Op = DAG.getBuildVector(VT, SDLoc(BVN), ResidueOps);
4589 break;
4590 }
4591 }
4592 }
4593 return GS.getNode(DAG, SDLoc(BVN));
4594}
4595
4596bool SystemZTargetLowering::isVectorElementLoad(SDValue Op) const {
4597 if (Op.getOpcode() == ISD::LOAD && cast<LoadSDNode>(Op)->isUnindexed())
4598 return true;
4599 if (Subtarget.hasVectorEnhancements2() && Op.getOpcode() == SystemZISD::LRV)
4600 return true;
4601 return false;
4602}
4603
4604// Combine GPR scalar values Elems into a vector of type VT.
4605SDValue
4606SystemZTargetLowering::buildVector(SelectionDAG &DAG, const SDLoc &DL, EVT VT,
4607 SmallVectorImpl<SDValue> &Elems) const {
4608 // See whether there is a single replicated value.
4609 SDValue Single;
4610 unsigned int NumElements = Elems.size();
4611 unsigned int Count = 0;
4612 for (auto Elem : Elems) {
4613 if (!Elem.isUndef()) {
4614 if (!Single.getNode())
4615 Single = Elem;
4616 else if (Elem != Single) {
4617 Single = SDValue();
4618 break;
4619 }
4620 Count += 1;
4621 }
4622 }
4623 // There are three cases here:
4624 //
4625 // - if the only defined element is a loaded one, the best sequence
4626 // is a replicating load.
4627 //
4628 // - otherwise, if the only defined element is an i64 value, we will
4629 // end up with the same VLVGP sequence regardless of whether we short-cut
4630 // for replication or fall through to the later code.
4631 //
4632 // - otherwise, if the only defined element is an i32 or smaller value,
4633 // we would need 2 instructions to replicate it: VLVGP followed by VREPx.
4634 // This is only a win if the single defined element is used more than once.
4635 // In other cases we're better off using a single VLVGx.
4636 if (Single.getNode() && (Count > 1 || isVectorElementLoad(Single)))
4637 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Single);
4638
4639 // If all elements are loads, use VLREP/VLEs (below).
4640 bool AllLoads = true;
4641 for (auto Elem : Elems)
4642 if (!isVectorElementLoad(Elem)) {
4643 AllLoads = false;
4644 break;
4645 }
4646
4647 // The best way of building a v2i64 from two i64s is to use VLVGP.
4648 if (VT == MVT::v2i64 && !AllLoads)
4649 return joinDwords(DAG, DL, Elems[0], Elems[1]);
4650
4651 // Use a 64-bit merge high to combine two doubles.
4652 if (VT == MVT::v2f64 && !AllLoads)
4653 return buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4654
4655 // Build v4f32 values directly from the FPRs:
4656 //
4657 // <Axxx> <Bxxx> <Cxxxx> <Dxxx>
4658 // V V VMRHF
4659 // <ABxx> <CDxx>
4660 // V VMRHG
4661 // <ABCD>
4662 if (VT == MVT::v4f32 && !AllLoads) {
4663 SDValue Op01 = buildMergeScalars(DAG, DL, VT, Elems[0], Elems[1]);
4664 SDValue Op23 = buildMergeScalars(DAG, DL, VT, Elems[2], Elems[3]);
4665 // Avoid unnecessary undefs by reusing the other operand.
4666 if (Op01.isUndef())
4667 Op01 = Op23;
4668 else if (Op23.isUndef())
4669 Op23 = Op01;
4670 // Merging identical replications is a no-op.
4671 if (Op01.getOpcode() == SystemZISD::REPLICATE && Op01 == Op23)
4672 return Op01;
4673 Op01 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op01);
4674 Op23 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Op23);
4675 SDValue Op = DAG.getNode(SystemZISD::MERGE_HIGH,
4676 DL, MVT::v2i64, Op01, Op23);
4677 return DAG.getNode(ISD::BITCAST, DL, VT, Op);
4678 }
4679
4680 // Collect the constant terms.
4681 SmallVector<SDValue, SystemZ::VectorBytes> Constants(NumElements, SDValue());
4682 SmallVector<bool, SystemZ::VectorBytes> Done(NumElements, false);
4683
4684 unsigned NumConstants = 0;
4685 for (unsigned I = 0; I < NumElements; ++I) {
4686 SDValue Elem = Elems[I];
4687 if (Elem.getOpcode() == ISD::Constant ||
4688 Elem.getOpcode() == ISD::ConstantFP) {
4689 NumConstants += 1;
4690 Constants[I] = Elem;
4691 Done[I] = true;
4692 }
4693 }
4694 // If there was at least one constant, fill in the other elements of
4695 // Constants with undefs to get a full vector constant and use that
4696 // as the starting point.
4697 SDValue Result;
4698 SDValue ReplicatedVal;
4699 if (NumConstants > 0) {
4700 for (unsigned I = 0; I < NumElements; ++I)
4701 if (!Constants[I].getNode())
4702 Constants[I] = DAG.getUNDEF(Elems[I].getValueType());
4703 Result = DAG.getBuildVector(VT, DL, Constants);
4704 } else {
4705 // Otherwise try to use VLREP or VLVGP to start the sequence in order to
4706 // avoid a false dependency on any previous contents of the vector
4707 // register.
4708
4709 // Use a VLREP if at least one element is a load. Make sure to replicate
4710 // the load with the most elements having its value.
4711 std::map<const SDNode*, unsigned> UseCounts;
4712 SDNode *LoadMaxUses = nullptr;
4713 for (unsigned I = 0; I < NumElements; ++I)
4714 if (isVectorElementLoad(Elems[I])) {
4715 SDNode *Ld = Elems[I].getNode();
4716 UseCounts[Ld]++;
4717 if (LoadMaxUses == nullptr || UseCounts[LoadMaxUses] < UseCounts[Ld])
4718 LoadMaxUses = Ld;
4719 }
4720 if (LoadMaxUses != nullptr) {
4721 ReplicatedVal = SDValue(LoadMaxUses, 0);
4722 Result = DAG.getNode(SystemZISD::REPLICATE, DL, VT, ReplicatedVal);
4723 } else {
4724 // Try to use VLVGP.
4725 unsigned I1 = NumElements / 2 - 1;
4726 unsigned I2 = NumElements - 1;
4727 bool Def1 = !Elems[I1].isUndef();
4728 bool Def2 = !Elems[I2].isUndef();
4729 if (Def1 || Def2) {
4730 SDValue Elem1 = Elems[Def1 ? I1 : I2];
4731 SDValue Elem2 = Elems[Def2 ? I2 : I1];
4732 Result = DAG.getNode(ISD::BITCAST, DL, VT,
4733 joinDwords(DAG, DL, Elem1, Elem2));
4734 Done[I1] = true;
4735 Done[I2] = true;
4736 } else
4737 Result = DAG.getUNDEF(VT);
4738 }
4739 }
4740
4741 // Use VLVGx to insert the other elements.
4742 for (unsigned I = 0; I < NumElements; ++I)
4743 if (!Done[I] && !Elems[I].isUndef() && Elems[I] != ReplicatedVal)
4744 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Result, Elems[I],
4745 DAG.getConstant(I, DL, MVT::i32));
4746 return Result;
4747}
4748
4749SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
4750 SelectionDAG &DAG) const {
4751 auto *BVN = cast<BuildVectorSDNode>(Op.getNode());
4752 SDLoc DL(Op);
4753 EVT VT = Op.getValueType();
4754
4755 if (BVN->isConstant()) {
4756 if (SystemZVectorConstantInfo(BVN).isVectorConstantLegal(Subtarget))
4757 return Op;
4758
4759 // Fall back to loading it from memory.
4760 return SDValue();
4761 }
4762
4763 // See if we should use shuffles to construct the vector from other vectors.
4764 if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
4765 return Res;
4766
4767 // Detect SCALAR_TO_VECTOR conversions.
4768 if (isOperationLegal(ISD::SCALAR_TO_VECTOR, VT) && isScalarToVector(Op))
4769 return buildScalarToVector(DAG, DL, VT, Op.getOperand(0));
4770
4771 // Otherwise use buildVector to build the vector up from GPRs.
4772 unsigned NumElements = Op.getNumOperands();
4773 SmallVector<SDValue, SystemZ::VectorBytes> Ops(NumElements);
4774 for (unsigned I = 0; I < NumElements; ++I)
4775 Ops[I] = Op.getOperand(I);
4776 return buildVector(DAG, DL, VT, Ops);
4777}
4778
4779SDValue SystemZTargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
4780 SelectionDAG &DAG) const {
4781 auto *VSN = cast<ShuffleVectorSDNode>(Op.getNode());
4782 SDLoc DL(Op);
4783 EVT VT = Op.getValueType();
4784 unsigned NumElements = VT.getVectorNumElements();
4785
4786 if (VSN->isSplat()) {
4787 SDValue Op0 = Op.getOperand(0);
4788 unsigned Index = VSN->getSplatIndex();
4789 assert(Index < VT.getVectorNumElements() &&((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand"
) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4790, __PRETTY_FUNCTION__))
4790 "Splat index should be defined and in first operand")((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand"
) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4790, __PRETTY_FUNCTION__))
;
4791 // See whether the value we're splatting is directly available as a scalar.
4792 if ((Index == 0 && Op0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
4793 Op0.getOpcode() == ISD::BUILD_VECTOR)
4794 return DAG.getNode(SystemZISD::REPLICATE, DL, VT, Op0.getOperand(Index));
4795 // Otherwise keep it as a vector-to-vector operation.
4796 return DAG.getNode(SystemZISD::SPLAT, DL, VT, Op.getOperand(0),
4797 DAG.getTargetConstant(Index, DL, MVT::i32));
4798 }
4799
4800 GeneralShuffle GS(VT);
4801 for (unsigned I = 0; I < NumElements; ++I) {
4802 int Elt = VSN->getMaskElt(I);
4803 if (Elt < 0)
4804 GS.addUndef();
4805 else if (!GS.add(Op.getOperand(unsigned(Elt) / NumElements),
4806 unsigned(Elt) % NumElements))
4807 return SDValue();
4808 }
4809 return GS.getNode(DAG, SDLoc(VSN));
4810}
4811
4812SDValue SystemZTargetLowering::lowerSCALAR_TO_VECTOR(SDValue Op,
4813 SelectionDAG &DAG) const {
4814 SDLoc DL(Op);
4815 // Just insert the scalar into element 0 of an undefined vector.
4816 return DAG.getNode(ISD::INSERT_VECTOR_ELT, DL,
4817 Op.getValueType(), DAG.getUNDEF(Op.getValueType()),
4818 Op.getOperand(0), DAG.getConstant(0, DL, MVT::i32));
4819}
4820
4821SDValue SystemZTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
4822 SelectionDAG &DAG) const {
4823 // Handle insertions of floating-point values.
4824 SDLoc DL(Op);
4825 SDValue Op0 = Op.getOperand(0);
4826 SDValue Op1 = Op.getOperand(1);
4827 SDValue Op2 = Op.getOperand(2);
4828 EVT VT = Op.getValueType();
4829
4830 // Insertions into constant indices of a v2f64 can be done using VPDI.
4831 // However, if the inserted value is a bitcast or a constant then it's
4832 // better to use GPRs, as below.
4833 if (VT == MVT::v2f64 &&
4834 Op1.getOpcode() != ISD::BITCAST &&
4835 Op1.getOpcode() != ISD::ConstantFP &&
4836 Op2.getOpcode() == ISD::Constant) {
4837 uint64_t Index = cast<ConstantSDNode>(Op2)->getZExtValue();
4838 unsigned Mask = VT.getVectorNumElements() - 1;
4839 if (Index <= Mask)
4840 return Op;
4841 }
4842
4843 // Otherwise bitcast to the equivalent integer form and insert via a GPR.
4844 MVT IntVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
4845 MVT IntVecVT = MVT::getVectorVT(IntVT, VT.getVectorNumElements());
4846 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntVecVT,
4847 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0),
4848 DAG.getNode(ISD::BITCAST, DL, IntVT, Op1), Op2);
4849 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
4850}
4851
4852SDValue
4853SystemZTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
4854 SelectionDAG &DAG) const {
4855 // Handle extractions of floating-point values.
4856 SDLoc DL(Op);
4857 SDValue Op0 = Op.getOperand(0);
4858 SDValue Op1 = Op.getOperand(1);
4859 EVT VT = Op.getValueType();
4860 EVT VecVT = Op0.getValueType();
4861
4862 // Extractions of constant indices can be done directly.
4863 if (auto *CIndexN = dyn_cast<ConstantSDNode>(Op1)) {
4864 uint64_t Index = CIndexN->getZExtValue();
4865 unsigned Mask = VecVT.getVectorNumElements() - 1;
4866 if (Index <= Mask)
4867 return Op;
4868 }
4869
4870 // Otherwise bitcast to the equivalent integer form and extract via a GPR.
4871 MVT IntVT = MVT::getIntegerVT(VT.getSizeInBits());
4872 MVT IntVecVT = MVT::getVectorVT(IntVT, VecVT.getVectorNumElements());
4873 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntVT,
4874 DAG.getNode(ISD::BITCAST, DL, IntVecVT, Op0), Op1);
4875 return DAG.getNode(ISD::BITCAST, DL, VT, Res);
4876}
4877
4878SDValue
4879SystemZTargetLowering::lowerExtendVectorInreg(SDValue Op, SelectionDAG &DAG,
4880 unsigned UnpackHigh) const {
4881 SDValue PackedOp = Op.getOperand(0);
4882 EVT OutVT = Op.getValueType();
4883 EVT InVT = PackedOp.getValueType();
4884 unsigned ToBits = OutVT.getScalarSizeInBits();
4885 unsigned FromBits = InVT.getScalarSizeInBits();
4886 do {
4887 FromBits *= 2;
4888 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(FromBits),
4889 SystemZ::VectorBits / FromBits);
4890 PackedOp = DAG.getNode(UnpackHigh, SDLoc(PackedOp), OutVT, PackedOp);
4891 } while (FromBits != ToBits);
4892 return PackedOp;
4893}
4894
4895SDValue SystemZTargetLowering::lowerShift(SDValue Op, SelectionDAG &DAG,
4896 unsigned ByScalar) const {
4897 // Look for cases where a vector shift can use the *_BY_SCALAR form.
4898 SDValue Op0 = Op.getOperand(0);
4899 SDValue Op1 = Op.getOperand(1);
4900 SDLoc DL(Op);
4901 EVT VT = Op.getValueType();
4902 unsigned ElemBitSize = VT.getScalarSizeInBits();
4903
4904 // See whether the shift vector is a splat represented as BUILD_VECTOR.
4905 if (auto *BVN = dyn_cast<BuildVectorSDNode>(Op1)) {
4906 APInt SplatBits, SplatUndef;
4907 unsigned SplatBitSize;
4908 bool HasAnyUndefs;
4909 // Check for constant splats. Use ElemBitSize as the minimum element
4910 // width and reject splats that need wider elements.
4911 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
4912 ElemBitSize, true) &&
4913 SplatBitSize == ElemBitSize) {
4914 SDValue Shift = DAG.getConstant(SplatBits.getZExtValue() & 0xfff,
4915 DL, MVT::i32);
4916 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4917 }
4918 // Check for variable splats.
4919 BitVector UndefElements;
4920 SDValue Splat = BVN->getSplatValue(&UndefElements);
4921 if (Splat) {
4922 // Since i32 is the smallest legal type, we either need a no-op
4923 // or a truncation.
4924 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Splat);
4925 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4926 }
4927 }
4928
4929 // See whether the shift vector is a splat represented as SHUFFLE_VECTOR,
4930 // and the shift amount is directly available in a GPR.
4931 if (auto *VSN = dyn_cast<ShuffleVectorSDNode>(Op1)) {
4932 if (VSN->isSplat()) {
4933 SDValue VSNOp0 = VSN->getOperand(0);
4934 unsigned Index = VSN->getSplatIndex();
4935 assert(Index < VT.getVectorNumElements() &&((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand"
) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4936, __PRETTY_FUNCTION__))
4936 "Splat index should be defined and in first operand")((Index < VT.getVectorNumElements() && "Splat index should be defined and in first operand"
) ? static_cast<void> (0) : __assert_fail ("Index < VT.getVectorNumElements() && \"Splat index should be defined and in first operand\""
, "/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 4936, __PRETTY_FUNCTION__))
;
4937 if ((Index == 0 && VSNOp0.getOpcode() == ISD::SCALAR_TO_VECTOR) ||
4938 VSNOp0.getOpcode() == ISD::BUILD_VECTOR) {
4939 // Since i32 is the smallest legal type, we either need a no-op
4940 // or a truncation.
4941 SDValue Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
4942 VSNOp0.getOperand(Index));
4943 return DAG.getNode(ByScalar, DL, VT, Op0, Shift);
4944 }
4945 }
4946 }
4947
4948 // Otherwise just treat the current form as legal.
4949 return Op;
4950}
4951
4952SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
4953 SelectionDAG &DAG) const {
4954 switch (Op.getOpcode()) {
4955 case ISD::FRAMEADDR:
4956 return lowerFRAMEADDR(Op, DAG);
4957 case ISD::RETURNADDR:
4958 return lowerRETURNADDR(Op, DAG);
4959 case ISD::BR_CC:
4960 return lowerBR_CC(Op, DAG);
4961 case ISD::SELECT_CC:
4962 return lowerSELECT_CC(Op, DAG);
4963 case ISD::SETCC:
4964 return lowerSETCC(Op, DAG);
4965 case ISD::GlobalAddress:
4966 return lowerGlobalAddress(cast<GlobalAddressSDNode>(Op), DAG);
4967 case ISD::GlobalTLSAddress:
4968 return lowerGlobalTLSAddress(cast<GlobalAddressSDNode>(Op), DAG);
4969 case ISD::BlockAddress:
4970 return lowerBlockAddress(cast<BlockAddressSDNode>(Op), DAG);
4971 case ISD::JumpTable:
4972 return lowerJumpTable(cast<JumpTableSDNode>(Op), DAG);
4973 case ISD::ConstantPool:
4974 return lowerConstantPool(cast<ConstantPoolSDNode>(Op), DAG);
4975 case ISD::BITCAST:
4976 return lowerBITCAST(Op, DAG);
4977 case ISD::VASTART:
4978 return lowerVASTART(Op, DAG);
4979 case ISD::VACOPY:
4980 return lowerVACOPY(Op, DAG);
4981 case ISD::DYNAMIC_STACKALLOC:
4982 return lowerDYNAMIC_STACKALLOC(Op, DAG);
4983 case ISD::GET_DYNAMIC_AREA_OFFSET:
4984 return lowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
4985 case ISD::SMUL_LOHI:
4986 return lowerSMUL_LOHI(Op, DAG);
4987 case ISD::UMUL_LOHI:
4988 return lowerUMUL_LOHI(Op, DAG);
4989 case ISD::SDIVREM:
4990 return lowerSDIVREM(Op, DAG);
4991 case ISD::UDIVREM:
4992 return lowerUDIVREM(Op, DAG);
4993 case ISD::SADDO:
4994 case ISD::SSUBO:
4995 case ISD::UADDO:
4996 case ISD::USUBO:
4997 return lowerXALUO(Op, DAG);
4998 case ISD::ADDCARRY:
4999 case ISD::SUBCARRY:
5000 return lowerADDSUBCARRY(Op, DAG);
5001 case ISD::OR:
5002 return lowerOR(Op, DAG);
5003 case ISD::CTPOP:
5004 return lowerCTPOP(Op, DAG);
5005 case ISD::ATOMIC_FENCE:
5006 return lowerATOMIC_FENCE(Op, DAG);
5007 case ISD::ATOMIC_SWAP:
5008 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_SWAPW);
5009 case ISD::ATOMIC_STORE:
5010 return lowerATOMIC_STORE(Op, DAG);
5011 case ISD::ATOMIC_LOAD:
5012 return lowerATOMIC_LOAD(Op, DAG);
5013 case ISD::ATOMIC_LOAD_ADD:
5014 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_ADD);
5015 case ISD::ATOMIC_LOAD_SUB:
5016 return lowerATOMIC_LOAD_SUB(Op, DAG);
5017 case ISD::ATOMIC_LOAD_AND:
5018 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_AND);
5019 case ISD::ATOMIC_LOAD_OR:
5020 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_OR);
5021 case ISD::ATOMIC_LOAD_XOR:
5022 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_XOR);
5023 case ISD::ATOMIC_LOAD_NAND:
5024 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_NAND);
5025 case ISD::ATOMIC_LOAD_MIN:
5026 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MIN);
5027 case ISD::ATOMIC_LOAD_MAX:
5028 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_MAX);
5029 case ISD::ATOMIC_LOAD_UMIN:
5030 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMIN);
5031 case ISD::ATOMIC_LOAD_UMAX:
5032 return lowerATOMIC_LOAD_OP(Op, DAG, SystemZISD::ATOMIC_LOADW_UMAX);
5033 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
5034 return lowerATOMIC_CMP_SWAP(Op, DAG);
5035 case ISD::STACKSAVE:
5036 return lowerSTACKSAVE(Op, DAG);
5037 case ISD::STACKRESTORE:
5038 return lowerSTACKRESTORE(Op, DAG);
5039 case ISD::PREFETCH:
5040 return lowerPREFETCH(Op, DAG);
5041 case ISD::INTRINSIC_W_CHAIN:
5042 return lowerINTRINSIC_W_CHAIN(Op, DAG);
5043 case ISD::INTRINSIC_WO_CHAIN:
5044 return lowerINTRINSIC_WO_CHAIN(Op, DAG);
5045 case ISD::BUILD_VECTOR:
5046 return lowerBUILD_VECTOR(Op, DAG);
5047 case ISD::VECTOR_SHUFFLE:
5048 return lowerVECTOR_SHUFFLE(Op, DAG);
5049 case ISD::SCALAR_TO_VECTOR:
5050 return lowerSCALAR_TO_VECTOR(Op, DAG);
5051 case ISD::INSERT_VECTOR_ELT:
5052 return lowerINSERT_VECTOR_ELT(Op, DAG);
5053 case ISD::EXTRACT_VECTOR_ELT:
5054 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
5055 case ISD::SIGN_EXTEND_VECTOR_INREG:
5056 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACK_HIGH);
5057 case ISD::ZERO_EXTEND_VECTOR_INREG:
5058 return lowerExtendVectorInreg(Op, DAG, SystemZISD::UNPACKL_HIGH);
5059 case ISD::SHL:
5060 return lowerShift(Op, DAG, SystemZISD::VSHL_BY_SCALAR);
5061 case ISD::SRL:
5062 return lowerShift(Op, DAG, SystemZISD::VSRL_BY_SCALAR);
5063 case ISD::SRA:
5064 return lowerShift(Op, DAG, SystemZISD::VSRA_BY_SCALAR);
5065 default:
5066 llvm_unreachable("Unexpected node to lower")::llvm::llvm_unreachable_internal("Unexpected node to lower",
"/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 5066)
;
5067 }
5068}
5069
5070// Lower operations with invalid operand or result types (currently used
5071// only for 128-bit integer types).
5072
5073static SDValue lowerI128ToGR128(SelectionDAG &DAG, SDValue In) {
5074 SDLoc DL(In);
5075 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
5076 DAG.getIntPtrConstant(0, DL));
5077 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i64, In,
5078 DAG.getIntPtrConstant(1, DL));
5079 SDNode *Pair = DAG.getMachineNode(SystemZ::PAIR128, DL,
5080 MVT::Untyped, Hi, Lo);
5081 return SDValue(Pair, 0);
5082}
5083
5084static SDValue lowerGR128ToI128(SelectionDAG &DAG, SDValue In) {
5085 SDLoc DL(In);
5086 SDValue Hi = DAG.getTargetExtractSubreg(SystemZ::subreg_h64,
5087 DL, MVT::i64, In);
5088 SDValue Lo = DAG.getTargetExtractSubreg(SystemZ::subreg_l64,
5089 DL, MVT::i64, In);
5090 return DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i128, Lo, Hi);
5091}
5092
5093void
5094SystemZTargetLowering::LowerOperationWrapper(SDNode *N,
5095 SmallVectorImpl<SDValue> &Results,
5096 SelectionDAG &DAG) const {
5097 switch (N->getOpcode()) {
5098 case ISD::ATOMIC_LOAD: {
5099 SDLoc DL(N);
5100 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::Other);
5101 SDValue Ops[] = { N->getOperand(0), N->getOperand(1) };
5102 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
5103 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_LOAD_128,
5104 DL, Tys, Ops, MVT::i128, MMO);
5105 Results.push_back(lowerGR128ToI128(DAG, Res));
5106 Results.push_back(Res.getValue(1));
5107 break;
5108 }
5109 case ISD::ATOMIC_STORE: {
5110 SDLoc DL(N);
5111 SDVTList Tys = DAG.getVTList(MVT::Other);
5112 SDValue Ops[] = { N->getOperand(0),
5113 lowerI128ToGR128(DAG, N->getOperand(2)),
5114 N->getOperand(1) };
5115 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
5116 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_STORE_128,
5117 DL, Tys, Ops, MVT::i128, MMO);
5118 // We have to enforce sequential consistency by performing a
5119 // serialization operation after the store.
5120 if (cast<AtomicSDNode>(N)->getOrdering() ==
5121 AtomicOrdering::SequentiallyConsistent)
5122 Res = SDValue(DAG.getMachineNode(SystemZ::Serialize, DL,
5123 MVT::Other, Res), 0);
5124 Results.push_back(Res);
5125 break;
5126 }
5127 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
5128 SDLoc DL(N);
5129 SDVTList Tys = DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other);
5130 SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
5131 lowerI128ToGR128(DAG, N->getOperand(2)),
5132 lowerI128ToGR128(DAG, N->getOperand(3)) };
5133 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
5134 SDValue Res = DAG.getMemIntrinsicNode(SystemZISD::ATOMIC_CMP_SWAP_128,
5135 DL, Tys, Ops, MVT::i128, MMO);
5136 SDValue Success = emitSETCC(DAG, DL, Res.getValue(1),
5137 SystemZ::CCMASK_CS, SystemZ::CCMASK_CS_EQ);
5138 Success = DAG.getZExtOrTrunc(Success, DL, N->getValueType(1));
5139 Results.push_back(lowerGR128ToI128(DAG, Res));
5140 Results.push_back(Success);
5141 Results.push_back(Res.getValue(2));
5142 break;
5143 }
5144 default:
5145 llvm_unreachable("Unexpected node to lower")::llvm::llvm_unreachable_internal("Unexpected node to lower",
"/build/llvm-toolchain-snapshot-10~+201911111502510600c19528f1809/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp"
, 5145)
;
5146 }
5147}
5148
5149void
5150SystemZTargetLowering::ReplaceNodeResults(SDNode *N,
5151 SmallVectorImpl<SDValue> &Results,
5152 SelectionDAG &DAG) const {
5153 return LowerOperationWrapper(N, Results, DAG);
5154}
5155
5156const char *SystemZTargetLowering::getTargetNodeName(unsigned Opcode) const {
5157#define OPCODE(NAME) case SystemZISD::NAME: return "SystemZISD::" #NAME
5158 switch ((SystemZISD::NodeType)Opcode) {
5159 case SystemZISD::FIRST_NUMBER: break;
5160 OPCODE(RET_FLAG);
5161 OPCODE(CALL);
5162 OPCODE(SIBCALL);
5163 OPCODE(TLS_GDCALL);
5164 OPCODE(TLS_LDCALL);
5165 OPCODE(PCREL_WRAPPER);
5166 OPCODE(PCREL_OFFSET);
5167 OPCODE(IABS);
5168 OPCODE(ICMP);
5169 OPCODE(FCMP);
5170 OPCODE(TM);
5171 OPCODE(BR_CCMASK);
5172 OPCODE(SELECT_CCMASK);
5173 OPCODE(ADJDYNALLOC);
5174 OPCODE(POPCNT);
5175 OPCODE(SMUL_LOHI);
5176 OPCODE(UMUL_LOHI);
5177 OPCODE(SDIVREM);
5178 OPCODE(UDIVREM);
5179 OPCODE(SADDO);
5180 OPCODE(SSUBO);
5181 OPCODE(UADDO);
5182 OPCODE(USUBO);
5183 OPCODE(ADDCARRY);
5184 OPCODE(SUBCARRY);
5185 OPCODE(GET_CCMASK);
5186 OPCODE(MVC);
5187 OPCODE(MVC_LOOP);
5188 OPCODE(NC);
5189 OPCODE(NC_LOOP);
5190 OPCODE(OC);
5191 OPCODE(OC_LOOP);
5192 OPCODE(XC);
5193 OPCODE(XC_LOOP);
5194 OPCODE(CLC);
5195 OPCODE(CLC_LOOP);
5196 OPCODE(STPCPY);
5197 OPCODE(STRCMP);
5198 OPCODE(SEARCH_STRING);
5199 OPCODE(IPM);
5200 OPCODE(MEMBARRIER);
5201 OPCODE(TBEGIN);
5202 OPCODE(TBEGIN_NOFLOAT);
5203 OPCODE(TEND);
5204 OPCODE(BYTE_MASK);
5205 OPCODE(ROTATE_MASK);
5206 OPCODE(REPLICATE);
5207 OPCODE(JOIN_DWORDS);
5208 OPCODE(SPLAT);
5209 OPCODE(MERGE_HIGH);
5210 OPCODE(MERGE_LOW);
5211 OPCODE(SHL_DOUBLE);
5212 OPCODE(PERMUTE_DWORDS);
5213 OPCODE(PERMUTE);
5214 OPCODE(PACK);
5215 OPCODE(PACKS_CC);
5216 OPCODE(PACKLS_CC);
5217 OPCODE(UNPACK_HIGH);
5218 OPCODE(UNPACKL_HIGH);
5219 OPCODE(UNPACK_LOW);
5220 OPCODE(UNPACKL_LOW);
5221 OPCODE(VSHL_BY_SCALAR);
5222 OPCODE(VSRL_BY_SCALAR);
5223 OPCODE(VSRA_BY_SCALAR);
5224 OPCODE(VSUM);
5225 OPCODE(VICMPE);
5226 OPCODE(VICMPH);
5227 OPCODE(VICMPHL);
5228 OPCODE(VICMPES);
5229 OPCODE(VICMPHS);
5230 OPCODE(VICMPHLS);
5231 OPCODE(VFCMPE);
5232 OPCODE(VFCMPH);
5233 OPCODE(VFCMPHE);
5234 OPCODE(VFCMPES);
5235 OPCODE(VFCMPHS);
5236 OPCODE(VFCMPHES);
5237 OPCODE(VFTCI);
5238 OPCODE(VEXTEND);
5239 OPCODE(VROUND);
5240 OPCODE(VTM);
5241 OPCODE(VFAE_CC);
5242 OPCODE(VFAEZ_CC);
5243 OPCODE(VFEE_CC);
5244 OPCODE(VFEEZ_CC);
5245 OPCODE(VFENE_CC);
5246 OPCODE(VFENEZ_CC);
5247 OPCODE(VISTR_CC);
5248 OPCODE(VSTRC_CC);
5249 OPCODE(VSTRCZ_CC);
5250 OPCODE(VSTRS_CC);
5251 OPCODE(VSTRSZ_CC);
5252 OPCODE(TDC);
5253 OPCODE(ATOMIC_SWAPW);
5254 OPCODE(ATOMIC_LOADW_ADD);
5255 OPCODE(ATOMIC_LOADW_SUB);
5256 OPCODE(ATOMIC_LOADW_AND);
5257 OPCODE(ATOMIC_LOADW_OR);
5258 OPCODE(ATOMIC_LOADW_XOR);
5259 OPCODE(ATOMIC_LOADW_NAND);
5260 OPCODE(ATOMIC_LOADW_MIN);
5261 OPCODE(ATOMIC_LOADW_MAX);
5262 OPCODE(ATOMIC_LOADW_UMIN);
5263 OPCODE(ATOMIC_LOADW_UMAX);
5264 OPCODE(ATOMIC_CMP_SWAPW);
5265 OPCODE(ATOMIC_CMP_SWAP);
5266 OPCODE(ATOMIC_LOAD_128);
5267 OPCODE(ATOMIC_STORE_128);
5268 OPCODE(ATOMIC_CMP_SWAP_128);
5269 OPCODE(LRV);
5270 OPCODE(STRV);
5271 OPCODE(VLER);
5272 OPCODE(VSTER);
5273 OPCODE(PREFETCH);
5274 }
5275 return nullptr;
5276#undef OPCODE
5277}
5278
5279// Return true if VT is a vector whose elements are a whole number of bytes
5280// in width. Also check for presence of vector support.
5281bool SystemZTargetLowering::canTreatAsByteVector(EVT VT) const {
5282 if (!Subtarget.hasVector())
5283 return false;
5284
5285 return VT.isVector() && VT.getScalarSizeInBits() % 8 == 0 && VT.isSimple();
5286}
5287
5288// Try to simplify an EXTRACT_VECTOR_ELT from a vector of type VecVT
5289// producing a result of type ResVT. Op is a possibly bitcast version
5290// of the input vector and Index is the index (based on type VecVT) that
5291// should be extracted. Return the new extraction if a simplification
5292// was possible or if Force is true.
5293SDValue SystemZTargetLowering::combineExtract(const SDLoc &DL, EVT ResVT,
5294 EVT VecVT, SDValue Op,
5295 unsigned Index,
5296 DAGCombinerInfo &DCI,
5297 bool Force) const {
5298 SelectionDAG &DAG = DCI.DAG;
5299
5300 // The number of bytes being extracted.
5301 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
5302
5303 for (;;) {
5304 unsigned Opcode = Op.getOpcode();
5305 if (Opcode == ISD::BITCAST)
5306 // Look through bitcasts.
5307 Op = Op.getOperand(0);
5308 else if ((Opcode == ISD::VECTOR_SHUFFLE || Opcode == SystemZISD::SPLAT) &&
5309 canTreatAsByteVector(Op.getValueType())) {
5310 // Get a VPERM-like permute mask and see whether the bytes covered
5311 // by the extracted element are a contiguous sequence from one
5312 // source operand.
5313 SmallVector<int, SystemZ::VectorBytes> Bytes;
5314 if (!getVPermMask(Op, Bytes))
5315 break;
5316 int First;
5317 if (!getShuffleInput(Bytes, Index * BytesPerElement,
5318 BytesPerElement, First))
5319 break;
5320 if (First < 0)
5321 return DAG.getUNDEF(ResVT);
5322 // Make sure the contiguous sequence starts at a multiple of the
5323 // original element size.
5324 unsigned Byte = unsigned(First) % Bytes.size();
5325 if (Byte % BytesPerElement != 0)
5326 break;
5327 // We can get the extracted value directly from an input.
5328 Index = Byte / BytesPerElement;
5329 Op = Op.getOperand(unsigned(First) / Bytes.size());
5330 Force = true;
5331 } else if (Opcode == ISD::BUILD_VECTOR &&
5332 canTreatAsByteVector(Op.getValueType())) {
5333 // We can only optimize this case if the BUILD_VECTOR elements are
5334 // at least as wide as the extracted value.
5335 EVT OpVT = Op.getValueType();
5336 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
5337 if (OpBytesPerElement < BytesPerElement)
5338 break;
5339 // Make sure that the least-significant bit of the extracted value
5340 // is the least significant bit of an input.
5341 unsigned End = (Index + 1) * BytesPerElement;
5342 if (End % OpBytesPerElement != 0)
5343 break;
5344 // We're extracting the low part of one operand of the BUILD_VECTOR.
5345 Op = Op.getOperand(End / OpBytesPerElement - 1);
5346 if (!Op.getValueType().isInteger()) {
5347 EVT VT = MVT::getIntegerVT(Op.getValueSizeInBits());
5348 Op = DAG.getNode(ISD::BITCAST, DL, VT, Op);
5349 DCI.AddToWorklist(Op.getNode());
5350 }
5351 EVT VT = MVT::getIntegerVT(ResVT.getSizeInBits());
5352 Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
5353 if (VT != ResVT) {
5354 DCI.AddToWorklist(Op.getNode());
5355 Op = DAG.getNode(ISD::BITCAST, DL, ResVT, Op);
5356 }
5357 return Op;
5358 } else if ((Opcode == ISD::SIGN_EXTEND_VECTOR_INREG ||
5359 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
5360 Opcode == ISD::ANY_EXTEND_VECTOR_INREG) &&
5361 canTreatAsByteVector(Op.getValueType()) &&
5362 canTreatAsByteVector(Op.getOperand(0).getValueType())) {
5363 // Make sure that only the unextended bits are significant.
5364 EVT ExtVT = Op.getValueType();
5365 EVT OpVT = Op.getOperand(0).getValueType();
5366 unsigned ExtBytesPerElement = ExtVT.getVectorElementType().getStoreSize();
5367 unsigned OpBytesPerElement = OpVT.getVectorElementType().getStoreSize();
5368 unsigned Byte = Index * BytesPerElement;
5369 unsigned SubByte = Byte % ExtBytesPerElement;
5370 unsigned MinSubByte = ExtBytesPerElement - OpBytesPerElement;
5371 if (SubByte < MinSubByte ||
5372 SubByte + BytesPerElement > ExtBytesPerElement)
5373 break;
5374 // Get the byte offset of the unextended element
5375 Byte = Byte / ExtBytesPerElement * OpBytesPerElement;
5376 // ...then add the byte offset relative to that element.
5377 Byte += SubByte - MinSubByte;
5378 if (Byte % BytesPerElement != 0)
5379 break;
5380 Op = Op.getOperand(0);
5381 Index = Byte / BytesPerElement;
5382 Force = true;
5383 } else
5384 break;
5385 }
5386 if (Force) {
5387 if (Op.getValueType() != VecVT) {
5388 Op = DAG.getNode(ISD::BITCAST, DL, VecVT, Op);
5389 DCI.AddToWorklist(Op.getNode());
5390 }
5391 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Op,
5392 DAG.getConstant(Index, DL, MVT::i32));
5393 }
5394 return SDValue();
5395}
5396
5397// Optimize vector operations in scalar value Op on the basis that Op
5398// is truncated to TruncVT.
5399SDValue SystemZTargetLowering::combineTruncateExtract(
5400 const SDLoc &DL, EVT TruncVT, SDValue Op, DAGCombinerInfo &DCI) const {
5401 // If we have (trunc (extract_vector_elt X, Y)), try to turn it into
5402 // (extract_vector_elt (bitcast X), Y'), where (bitcast X) has elements
5403 // of type TruncVT.
5404 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5405 TruncVT.getSizeInBits() % 8 == 0) {
5406 SDValue Vec = Op.getOperand(0);
5407 EVT VecVT = Vec.getValueType();
5408 if (canTreatAsByteVector(VecVT)) {
5409 if (auto *IndexN = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
5410 unsigned BytesPerElement = VecVT.getVectorElementType().getStoreSize();
5411 unsigned TruncBytes = TruncVT.getStoreSize();
5412 if (BytesPerElement % TruncBytes == 0) {
5413 // Calculate the value of Y' in the above description. We are
5414 // splitting the original elements into Scale equal-sized pieces
5415 // and for truncation purposes want the last (least-significant)
5416 // of these pieces for IndexN. This is easiest to do by calculating
5417 // the start index of the following element and then subtracting 1.
5418 unsigned Scale = BytesPerElement / TruncBytes;
5419 unsigned NewIndex = (IndexN->getZExtValue() + 1) * Scale - 1;
5420
5421 // Defer the creation of the bitcast from X to combineExtract,
5422 // which might be able to optimize the extraction.
5423 VecVT = MVT::getVectorVT(MVT::getIntegerVT(TruncBytes * 8),
5424 VecVT.getStoreSize() / TruncBytes);
5425 EVT ResVT = (TruncBytes < 4 ? MVT::i32 : TruncVT);
5426 return combineExtract(DL, ResVT, VecVT, Vec, NewIndex, DCI, true);
5427 }
5428 }
5429 }
5430 }
5431 return SDValue();
5432}
5433
5434SDValue SystemZTargetLowering::combineZERO_EXTEND(
5435 SDNode *N, DAGCombinerInfo &DCI) const {
5436 // Convert (zext (select_ccmask C1, C2)) into (select_ccmask C1', C2')
5437 SelectionDAG &DAG = DCI.DAG;
5438 SDValue N0 = N->getOperand(0);
5439 EVT VT = N->getValueType(0);
5440 if (N0.getOpcode() == SystemZISD::SELECT_CCMASK) {
5441 auto *TrueOp = dyn_cast<ConstantSDNode>(N0.getOperand(0));
5442 auto *FalseOp = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5443 if (TrueOp && FalseOp) {
5444 SDLoc DL(N0);
5445 SDValue Ops[] = { DAG.getConstant(TrueOp->getZExtValue(), DL, VT),
5446 DAG.getConstant(FalseOp->getZExtValue(), DL, VT),
5447 N0.getOperand(2), N0.getOperand(3), N0.getOperand(4) };
5448 SDValue NewSelect = DAG.getNode(SystemZISD::SELECT_CCMASK, DL, VT, Ops);
5449 // If N0 has multiple uses, change other uses as well.
5450 if (!N0.hasOneUse()) {
5451 SDValue TruncSelect =
5452 DAG.getNode(ISD::TRUNCATE, DL, N0.getValueType(), NewSelect);
5453 DCI.CombineTo(N0.getNode(), TruncSelect);
5454 }
5455 return NewSelect;
5456 }
5457 }
5458 return SDValue();
5459}
5460
5461SDValue SystemZTargetLowering::combineSIGN_EXTEND_INREG(
5462 SDNode *N, DAGCombinerInfo &DCI) const {
5463 // Convert (sext_in_reg (setcc LHS, RHS, COND), i1)
5464 // and (sext_in_reg (any_extend (setcc LHS, RHS, COND)), i1)
5465 // into (select_cc LHS, RHS, -1, 0, COND)
5466 SelectionDAG &DAG = DCI.DAG;
5467 SDValue N0 = N->getOperand(0);
5468 EVT VT = N->getValueType(0);
5469 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT();
5470 if (N0.hasOneUse() && N0.getOpcode() == ISD::ANY_EXTEND)
5471 N0 = N0.getOperand(0);
5472 if (EVT == MVT::i1 && N0.hasOneUse() && N0.getOpcode() == ISD::SETCC) {
5473 SDLoc DL(N0);
5474 SDValue Ops[] = { N0.getOperand(0), N0.getOperand(1),
5475 DAG.getConstant(-1, DL, VT), DAG.getConstant(0, DL, VT),
5476 N0.getOperand(2) };
5477 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops);
5478 }
5479 return SDValue();
5480}
5481
5482SDValue SystemZTargetLowering::combineSIGN_EXTEND(
5483 SDNode *N, DAGCombinerInfo &DCI) const {
5484 // Convert (sext (ashr (shl X, C1), C2)) to
5485 // (ashr (shl (anyext X), C1'), C2')), since wider shifts are as
5486 // cheap as narrower ones.
5487 SelectionDAG &DAG = DCI.DAG;
5488 SDValue N0 = N->getOperand(0);
5489 EVT VT = N->getValueType(0);
5490 if (N0.hasOneUse() && N0.getOpcode() == ISD::SRA) {
5491 auto *SraAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5492 SDValue Inner = N0.getOperand(0);
5493 if (SraAmt && Inner.hasOneUse() && Inner.getOpcode() == ISD::SHL) {
5494 if (auto *ShlAmt = dyn_cast<ConstantSDNode>(Inner.getOperand(1))) {
5495 unsigned Extra = (VT.getSizeInBits() - N0.getValueSizeInBits());
5496 unsigned NewShlAmt = ShlAmt->getZExtValue() + Extra;
5497 unsigned NewSraAmt = SraAmt->getZExtValue() + Extra;
5498 EVT ShiftVT = N0.getOperand(1).getValueType();
5499 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, SDLoc(Inner), VT,
5500 Inner.getOperand(0));
5501 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(Inner), VT, Ext,
5502 DAG.getConstant(NewShlAmt, SDLoc(Inner),
5503 ShiftVT));
5504 return DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl,
5505 DAG.getConstant(NewSraAmt, SDLoc(N0), ShiftVT));
5506 }
5507 }
5508 }
5509 return SDValue();
5510}
5511
5512SDValue SystemZTargetLowering::combineMERGE(
5513 SDNode *N, DAGCombinerInfo &DCI) const {
5514 SelectionDAG &DAG = DCI.DAG;
5515 unsigned Opcode = N->getOpcode();
5516 SDValue Op0 = N->getOperand(0);
5517 SDValue Op1 = N->getOperand(1);
5518 if (Op0.getOpcode() == ISD::BITCAST)
5519 Op0 = Op0.getOperand(0);
5520 if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
5521 // (z_merge_* 0, 0) -> 0. This is mostly useful for using VLLEZF
5522 // for v4f32.
5523 if (Op1 == N->getOperand(0))
5524 return Op1;
5525 // (z_merge_? 0, X) -> (z_unpackl_? 0, X).
5526 EVT VT = Op1.getValueType();
5527 unsigned ElemBytes = VT.getVectorElementType().getStoreSize();
5528 if (ElemBytes <= 4) {
5529 Opcode = (Opcode == SystemZISD::MERGE_HIGH ?
5530 SystemZISD::UNPACKL_HIGH : SystemZISD::UNPACKL_LOW);
5531 EVT InVT = VT.changeVectorElementTypeToInteger();
5532 EVT OutVT = MVT::getVectorVT(MVT::getIntegerVT(ElemBytes * 16),
5533 SystemZ::VectorBytes / ElemBytes / 2);
5534 if (VT != InVT) {
5535 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), InVT, Op1);
5536 DCI.AddToWorklist(Op1.getNode());
5537 }
5538 SDValue Op = DAG.getNode(Opcode, SDLoc(N), OutVT, Op1);
5539 DCI.AddToWorklist(Op.getNode());
5540 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
5541 }
5542 }
5543 return SDValue();
5544}
5545
5546SDValue SystemZTargetLowering::combineLOAD(
5547 SDNode *N, DAGCombinerInfo &DCI) const {
5548 SelectionDAG &DAG = DCI.DAG;
5549 EVT LdVT = N->getValueType(0);
5550 if (LdVT.isVector() || LdVT.isInteger())
5551 return SDValue();
5552 // Transform a scalar load that is REPLICATEd as well as having other
5553 // use(s) to the form where the other use(s) use the first element of the
5554 // REPLICATE instead of the load. Otherwise instruction selection will not
5555 // produce a VLREP. Avoid extracting to a GPR, so only do this for floating
5556 // point loads.
5557
5558 SDValue Replicate;
5559 SmallVector<SDNode*, 8> OtherUses;
5560 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5561 UI != UE; ++UI) {
5562 if (UI->getOpcode() == SystemZISD::REPLICATE) {
5563 if (Replicate)
5564 return SDValue(); // Should never happen
5565 Replicate = SDValue(*UI, 0);
5566 }
5567 else if (UI.getUse().getResNo() == 0)
5568 OtherUses.push_back(*UI);
5569 }
5570 if (!Replicate || OtherUses.empty())
5571 return SDValue();
5572
5573 SDLoc DL(N);
5574 SDValue Extract0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, LdVT,
5575 Replicate, DAG.getConstant(0, DL, MVT::i32));
5576 // Update uses of the loaded Value while preserving old chains.
5577 for (SDNode *U : OtherUses) {
5578 SmallVector<SDValue, 8> Ops;
5579 for (SDValue Op : U->ops())
5580 Ops.push_back((Op.getNode() == N && Op.getResNo() == 0) ? Extract0 : Op);
5581 DAG.UpdateNodeOperands(U, Ops);
5582 }
5583 return SDValue(N, 0);
5584}
5585
5586bool SystemZTargetLowering::canLoadStoreByteSwapped(EVT VT) const {
5587 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64)
5588 return true;
5589 if (Subtarget.hasVectorEnhancements2())
5590 if (VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v2i64)
5591 return true;
5592 return false;
5593}
5594
5595static bool isVectorElementSwap(ArrayRef<int> M, EVT VT) {
5596 if (!VT.isVector() || !VT.isSimple() ||
5597 VT.getSizeInBits() != 128 ||
5598 VT.getScalarSizeInBits() % 8 != 0)
5599 return false;
5600
5601 unsigned NumElts = VT.getVectorNumElements();
5602 for (unsigned i = 0; i < NumElts; ++i) {
5603 if (M[i] < 0) continue; // ignore UNDEF indices
5604 if ((unsigned) M[i] != NumElts - 1 - i)
5605 return false;
5606 }
5607
5608 return true;
5609}
5610
5611SDValue SystemZTargetLowering::combineSTORE(
5612 SDNode *N, DAGCombinerInfo &DCI) const {
5613 SelectionDAG &DAG = DCI.DAG;
5614 auto *SN = cast<StoreSDNode>(N);
5615 auto &Op1 = N->getOperand(1);
5616 EVT MemVT = SN->getMemoryVT();
5617 // If we have (truncstoreiN (extract_vector_elt X, Y), Z) then it is better
5618 // for the extraction to be done on a vMiN value, so that we can use VSTE.
5619 // If X has wider elements then convert it to:
5620 // (truncstoreiN (extract_vector_elt (bitcast X), Y2), Z).
5621 if (MemVT.isInteger() && SN->isTruncatingStore()) {
5622 if (SDValue Value =
5623 combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
5624 DCI.AddToWorklist(Value.getNode());
5625
5626 // Rewrite the store with the new form of stored value.
5627 return DAG.getTruncStore(SN->getChain(), SDLoc(SN), Value,
5628 SN->getBasePtr(), SN->getMemoryVT(),
5629 SN->getMemOperand());
5630 }
5631 }
5632 // Combine STORE (BSWAP) into STRVH/STRV/STRVG/VSTBR
5633 if (!SN->isTruncatingStore() &&
5634 Op1.getOpcode() == ISD::BSWAP &&
5635 Op1.getNode()->hasOneUse() &&
5636 canLoadStoreByteSwapped(Op1.getValueType())) {
5637
5638 SDValue BSwapOp = Op1.getOperand(0);
5639
5640 if (BSwapOp.getValueType() == MVT::i16)
5641 BSwapOp = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), MVT::i32, BSwapOp);
5642
5643 SDValue Ops[] = {
5644 N->getOperand(0), BSwapOp, N->getOperand(2)
5645 };
5646
5647 return
5648 DAG.getMemIntrinsicNode(SystemZISD::STRV, SDLoc(N), DAG.getVTList(MVT::Other),
5649 Ops, MemVT, SN->getMemOperand());
5650 }
5651 // Combine STORE (element-swap) into VSTER
5652 if (!SN->isTruncatingStore() &&
5653 Op1.getOpcode() == ISD::VECTOR_SHUFFLE &&
5654 Op1.getNode()->hasOneUse() &&
5655 Subtarget.hasVectorEnhancements2()) {
5656 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op1.getNode());
5657 ArrayRef<int> ShuffleMask = SVN->getMask();
5658 if (isVectorElementSwap(ShuffleMask, Op1.getValueType())) {
5659 SDValue Ops[] = {
5660 N->getOperand(0), Op1.getOperand(0), N->getOperand(2)
5661 };
5662
5663 return DAG.getMemIntrinsicNode(SystemZISD::VSTER, SDLoc(N),
5664 DAG.getVTList(MVT::Other),
5665 Ops, MemVT, SN->getMemOperand());
5666 }
5667 }
5668
5669 return SDValue();
5670}
5671
5672SDValue SystemZTargetLowering::combineVECTOR_SHUFFLE(
5673 SDNode *N, DAGCombinerInfo &DCI) const {
5674 SelectionDAG &DAG = DCI.DAG;
5675 // Combine element-swap (LOAD) into VLER
5676 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
5677 N->getOperand(0).hasOneUse() &&
5678 Subtarget.hasVectorEnhancements2()) {
5679 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
5680 ArrayRef<int> ShuffleMask = SVN->getMask();
5681 if (isVectorElementSwap(ShuffleMask, N->getValueType(0))) {
5682 SDValue Load = N->getOperand(0);
5683 LoadSDNode *LD = cast<LoadSDNode>(Load);
5684
5685 // Create the element-swapping load.
5686 SDValue Ops[] = {
5687 LD->getChain(), // Chain
5688 LD->getBasePtr() // Ptr
5689 };
5690 SDValue ESLoad =
5691 DAG.getMemIntrinsicNode(SystemZISD::VLER, SDLoc(N),
5692 DAG.getVTList(LD->getValueType(0), MVT::Other),
5693 Ops, LD->getMemoryVT(), LD->getMemOperand());
5694
5695 // First, combine the VECTOR_SHUFFLE away. This makes the value produced
5696 // by the load dead.
5697 DCI.CombineTo(N, ESLoad);
5698
5699 // Next, combine the load away, we give it a bogus result value but a real
5700 // chain result. The result value is dead because the shuffle is dead.
5701 DCI.CombineTo(Load.getNode(), ESLoad, ESLoad.getValue(1));
5702
5703 // Return N so it doesn't get rechecked!
5704 return SDValue(N, 0);
5705 }
5706 }
5707
5708 return SDValue();
5709}
5710
5711SDValue SystemZTargetLowering::combineEXTRACT_VECTOR_ELT(
5712 SDNode *N, DAGCombinerInfo &DCI) const {
5713 SelectionDAG &DAG = DCI.DAG;
5714
5715 if (!Subtarget.hasVector())
5716 return SDValue();
5717
5718 // Look through bitcasts that retain the number of vector elements.
5719 SDValue Op = N->getOperand(0);
5720 if (Op.getOpcode() == ISD::BITCAST &&
5721 Op.getValueType().isVector() &&
5722 Op.getOperand(0).getValueType().isVector() &&
5723 Op.getValueType().getVectorNumElements() ==
5724 Op.getOperand(0).getValueType().getVectorNumElements())
5725 Op = Op.getOperand(0);
5726
5727 // Pull BSWAP out of a vector extraction.
5728 if (Op.getOpcode() == ISD::BSWAP && Op.hasOneUse()) {
5729 EVT VecVT = Op.getValueType();
5730 EVT EltVT = VecVT.getVectorElementType();
5731 Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), EltVT,
5732 Op.getOperand(0), N->getOperand(1));
5733 DCI.AddToWorklist(Op.getNode());
5734 Op = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Op);
5735 if (EltVT != N->getValueType(0)) {
5736 DCI.AddToWorklist(Op.getNode());
5737 Op = DAG.getNode(ISD::BITCAST, SDLoc(N), N->getValueType(0), Op);
5738 }
5739 return Op;
5740 }
5741
5742 // Try to simplify a vector extraction.
5743 if (auto *IndexN = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
5744 SDValue Op0 = N->getOperand(0);
5745 EVT VecVT = Op0.getValueType();
5746 return combineExtract(SDLoc(N), N->getValueType(0), VecVT, Op0,
5747 IndexN->getZExtValue(), DCI, false);
5748 }
5749 return SDValue();
5750}
5751
5752SDValue SystemZTargetLowering::combineJOIN_DWORDS(
5753 SDNode *N, DAGCombinerInfo &DCI) const {
5754 SelectionDAG &DAG = DCI.DAG;
5755 // (join_dwords X, X) == (replicate X)
5756 if (N->getOperand(0) == N->getOperand(1))
5757 return DAG.getNode(SystemZISD::REPLICATE, SDLoc(N), N->getValueType(0),
5758 N->getOperand(0));
5759 return SDValue();
5760}
5761
5762SDValue SystemZTargetLowering::combineFP_ROUND(
5763 SDNode *N, DAGCombinerInfo &DCI) const {
5764
5765 if (!Subtarget.hasVector())
5766 return SDValue();
5767
5768 // (fpround (extract_vector_elt X 0))
5769 // (fpround (extract_vector_elt X 1)) ->
5770 // (extract_vector_elt (VROUND X) 0)
5771 // (extract_vector_elt (VROUND X) 2)
5772 //
5773 // This is a special case since the target doesn't really support v2f32s.
5774 SelectionDAG &DAG = DCI.DAG;
5775 SDValue Op0 = N->getOperand(0);
5776 if (N->getValueType(0) == MVT::f32 &&
5777 Op0.hasOneUse() &&
5778 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5779 Op0.getOperand(0).getValueType() == MVT::v2f64 &&
5780 Op0.getOperand(1).getOpcode() == ISD::Constant &&
5781 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
5782 SDValue Vec = Op0.getOperand(0);
5783 for (auto *U : Vec->uses()) {
5784 if (U != Op0.getNode() &&
5785 U->hasOneUse() &&
5786 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5787 U->getOperand(0) == Vec &&
5788 U->getOperand(1).getOpcode() == ISD::Constant &&
5789 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 1) {
5790 SDValue OtherRound = SDValue(*U->use_begin(), 0);
5791 if (OtherRound.getOpcode() == ISD::FP_ROUND &&
5792 OtherRound.getOperand(0) == SDValue(U, 0) &&
5793 OtherRound.getValueType() == MVT::f32) {
5794 SDValue VRound = DAG.getNode(SystemZISD::VROUND, SDLoc(N),
5795 MVT::v4f32, Vec);
5796 DCI.AddToWorklist(VRound.getNode());
5797 SDValue Extract1 =
5798 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f32,
5799 VRound, DAG.getConstant(2, SDLoc(U), MVT::i32));
5800 DCI.AddToWorklist(Extract1.getNode());
5801 DAG.ReplaceAllUsesOfValueWith(OtherRound, Extract1);
5802 SDValue Extract0 =
5803 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f32,
5804 VRound, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
5805 return Extract0;
5806 }
5807 }
5808 }
5809 }
5810 return SDValue();
5811}
5812
5813SDValue SystemZTargetLowering::combineFP_EXTEND(
5814 SDNode *N, DAGCombinerInfo &DCI) const {
5815
5816 if (!Subtarget.hasVector())
5817 return SDValue();
5818
5819 // (fpextend (extract_vector_elt X 0))
5820 // (fpextend (extract_vector_elt X 2)) ->
5821 // (extract_vector_elt (VEXTEND X) 0)
5822 // (extract_vector_elt (VEXTEND X) 1)
5823 //
5824 // This is a special case since the target doesn't really support v2f32s.
5825 SelectionDAG &DAG = DCI.DAG;
5826 SDValue Op0 = N->getOperand(0);
5827 if (N->getValueType(0) == MVT::f64 &&
5828 Op0.hasOneUse() &&
5829 Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5830 Op0.getOperand(0).getValueType() == MVT::v4f32 &&
5831 Op0.getOperand(1).getOpcode() == ISD::Constant &&
5832 cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue() == 0) {
5833 SDValue Vec = Op0.getOperand(0);
5834 for (auto *U : Vec->uses()) {
5835 if (U != Op0.getNode() &&
5836 U->hasOneUse() &&
5837 U->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
5838 U->getOperand(0) == Vec &&
5839 U->getOperand(1).getOpcode() == ISD::Constant &&
5840 cast<ConstantSDNode>(U->getOperand(1))->getZExtValue() == 2) {
5841 SDValue OtherExtend = SDValue(*U->use_begin(), 0);
5842 if (OtherExtend.getOpcode() == ISD::FP_EXTEND &&
5843 OtherExtend.getOperand(0) == SDValue(U, 0) &&
5844 OtherExtend.getValueType() == MVT::f64) {
5845 SDValue VExtend = DAG.getNode(SystemZISD::VEXTEND, SDLoc(N),
5846 MVT::v2f64, Vec);
5847 DCI.AddToWorklist(VExtend.getNode());
5848 SDValue Extract1 =
5849 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(U), MVT::f64,
5850 VExtend, DAG.getConstant(1, SDLoc(U), MVT::i32));
5851 DCI.AddToWorklist(Extract1.getNode());
5852 DAG.ReplaceAllUsesOfValueWith(OtherExtend, Extract1);
5853 SDValue Extract0 =
5854 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(Op0), MVT::f64,
5855 VExtend, DAG.getConstant(0, SDLoc(Op0), MVT::i32));
5856 return Extract0;
5857 }
5858 }
5859 }
5860 }
5861 return SDValue();
5862}
5863
5864SDValue SystemZTargetLowering::combineBSWAP(
5865 SDNode *N, DAGCombinerInfo &DCI) const {
5866 SelectionDAG &DAG = DCI.DAG;
5867 // Combine BSWAP (LOAD) into LRVH/LRV/LRVG/VLBR
5868 if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
5869 N->getOperand(0).hasOneUse() &&
5870 canLoadStoreByteSwapped(N->getValueType(0))) {
5871 SDValue Load = N->getOperand(0);
5872 LoadSDNode *LD = cast<LoadSDNode>(Load);
5873
5874 // Create the byte-swapping load.
5875 SDValue Ops[] = {
5876 LD->getChain(), // Chain
5877 LD->getBasePtr() // Ptr
5878 };
5879 EVT LoadVT = N->getValueType(0);
5880 if (LoadVT == MVT::i16)
5881 LoadVT = MVT::i32;
5882 SDValue BSLoad =
5883 DAG.getMemIntrinsicNode(SystemZISD::LRV, SDLoc(N),
5884 DAG.getVTList(LoadVT, MVT::Other),
5885 Ops, LD->getMemoryVT(), LD->getMemOperand());
5886
5887 // If this is an i16 load, insert the truncate.
5888 SDValue ResVal = BSLoad;
5889 if (N->getValueType(0) == MVT::i16)
5890 ResVal = DAG.getNode(ISD::TRUNCATE, SDLoc(N), MVT::i16, BSLoad);
5891
5892 // First, combine the bswap away. This makes the value produced by the
5893 // load dead.
5894 DCI.CombineTo(N, ResVal);
5895
5896 // Next, combine the load away, we give it a bogus result value but a real
5897 // chain result. The result value is dead because the bswap is dead.
5898 DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
5899
5900 // Return N so it doesn't get rechecked!
5901 return SDValue(N, 0);
5902 }
5903
5904 // Look through bitcasts that retain the number of vector elements.
5905 SDValue Op = N->getOperand(0);
5906 if (Op.getOpcode() == ISD::BITCAST &&
5907 Op.getValueType().isVector() &&
5908 Op.getOperand(0).getValueType().isVector() &&
5909 Op.getValueType().getVectorNumElements() ==
5910 Op.getOperand(0).getValueType().getVectorNumElements())
5911 Op = Op.getOperand(0);
5912
5913 // Push BSWAP into a vector insertion if at least one side then simplifies.
5914 if (Op.getOpcode() == ISD::INSERT_VECTOR_ELT && Op.hasOneUse()) {
5915 SDValue Vec = Op.getOperand(0);
5916 SDValue Elt = Op.getOperand(1);
5917 SDValue Idx = Op.getOperand(2);
5918
5919 if (DAG.isConstantIntBuildVectorOrConstantInt(Vec) ||
5920 Vec.getOpcode() == ISD::BSWAP || Vec.isUndef() ||
5921 DAG.isConstantIntBuildVectorOrConstantInt(Elt) ||
5922 Elt.getOpcode() == ISD::BSWAP || Elt.isUndef() ||
5923 (canLoadStoreByteSwapped(N->getValueType(0)) &&
5924 ISD::isNON_EXTLoad(Elt.getNode()) && Elt.hasOneUse())) {
5925 EVT VecVT = N->getValueType(0);
5926 EVT EltVT = N->getValueType(0).getVectorElementType();
5927 if (VecVT != Vec.getValueType()) {
5928 Vec = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Vec);
5929 DCI.AddToWorklist(Vec.getNode());
5930 }
5931 if (EltVT != Elt.getValueType()) {
5932 Elt = DAG.getNode(ISD::BITCAST, SDLoc(N), EltVT, Elt);
5933 DCI.AddToWorklist(Elt.getNode());
5934 }
5935 Vec = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Vec);
5936 DCI.AddToWorklist(Vec.getNode());
5937 Elt = DAG.getNode(ISD::BSWAP, SDLoc(N), EltVT, Elt);
5938 DCI.AddToWorklist(Elt.getNode());
5939 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(N), VecVT,
5940 Vec, Elt, Idx);
5941 }
5942 }
5943
5944 // Push BSWAP into a vector shuffle if at least one side then simplifies.
5945 ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(Op);
5946 if (SV && Op.hasOneUse()) {
5947 SDValue Op0 = Op.getOperand(0);
5948 SDValue Op1 = Op.getOperand(1);
5949
5950 if (DAG.isConstantIntBuildVectorOrConstantInt(Op0) ||
5951 Op0.getOpcode() == ISD::BSWAP || Op0.isUndef() ||
5952 DAG.isConstantIntBuildVectorOrConstantInt(Op1) ||
5953 Op1.getOpcode() == ISD::BSWAP || Op1.isUndef()) {
5954 EVT VecVT = N->getValueType(0);
5955 if (VecVT != Op0.getValueType()) {
5956 Op0 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op0);
5957 DCI.AddToWorklist(Op0.getNode());
5958 }
5959 if (VecVT != Op1.getValueType()) {
5960 Op1 = DAG.getNode(ISD::BITCAST, SDLoc(N), VecVT, Op1);
5961 DCI.AddToWorklist(Op1.getNode());
5962 }
5963 Op0 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op0);
5964 DCI.AddToWorklist(Op0.getNode());
5965 Op1 = DAG.getNode(ISD::BSWAP, SDLoc(N), VecVT, Op1);
5966 DCI.AddToWorklist(Op1.getNode());
5967 return DAG.getVectorShuffle(VecVT, SDLoc(N), Op0, Op1, SV->getMask());
5968 }
5969 }
5970
5971 return SDValue();
5972}
5973
5974static bool combineCCMask(SDValue &CCReg, int &CCValid, int &CCMask) {
5975 // We have a SELECT_CCMASK or BR_CCMASK comparing the condition code
5976 // set by the CCReg instruction using the CCValid / CCMask masks,
5977 // If the CCReg instruction is itself a ICMP testing the condition
5978 // code set by some other instruction, see whether we can directly
5979 // use that condition code.
5980
5981 // Verify that we have an ICMP against some constant.
5982 if (CCValid != SystemZ::CCMASK_ICMP)
5983 return false;
5984 auto *ICmp = CCReg.getNode();
5985 if (ICmp->getOpcode() != SystemZISD::ICMP)
5986 return false;
5987 auto *CompareLHS = ICmp->getOperand(0).getNode();
5988 auto *CompareRHS = dyn_cast<ConstantSDNode>(ICmp->getOperand(1));
5989 if (!CompareRHS)
5990 return false;
5991
5992 // Optimize the case where CompareLHS is a SELECT_CCMASK.
5993 if (CompareLHS->getOpcode() == SystemZISD::SELECT_CCMASK) {
5994 // Verify that we have an appropriate mask for a EQ or NE comparison.
5995 bool Invert = false;
5996 if (CCMask == SystemZ::CCMASK_CMP_NE)
5997 Invert = !Invert;
5998 else if (CCMask != SystemZ::CCMASK_CMP_EQ)
5999 return false;
6000
6001 // Verify that the ICMP compares against one of select values.
6002 auto *TrueVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(0));
6003 if (!TrueVal)
6004 return false;
6005 auto *FalseVal = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
6006 if (!FalseVal)
6007 return false;
6008 if (CompareRHS->getZExtValue() == FalseVal->getZExtValue())
6009 Invert = !Invert;
6010 else if (CompareRHS->getZExtValue() != TrueVal->getZExtValue())
6011 return false;
6012
6013 // Compute the effective CC mask for the new branch or select.
6014 auto *NewCCValid = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(2));
6015 auto *NewCCMask = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(3));
6016 if (!NewCCValid || !NewCCMask)
6017 return false;
6018 CCValid = NewCCValid->getZExtValue();
6019 CCMask = NewCCMask->getZExtValue();
6020 if (Invert)
6021 CCMask ^= CCValid;
6022
6023 // Return the updated CCReg link.
6024 CCReg = CompareLHS->getOperand(4);
6025 return true;
6026 }
6027
6028 // Optimize the case where CompareRHS is (SRA (SHL (IPM))).
6029 if (CompareLHS->getOpcode() == ISD::SRA) {
6030 auto *SRACount = dyn_cast<ConstantSDNode>(CompareLHS->getOperand(1));
6031 if (!SRACount || SRACount->getZExtValue() != 30)
6032 return false;
6033 auto *SHL = CompareLHS->getOperand(0).getNode();
6034 if (SHL->getOpcode() != ISD::SHL)
6035 return false;
6036 auto *SHLCount = dyn_cast<ConstantSDNode>(SHL->getOperand(1));
6037 if (!SHLCount || SHLCount->getZExtValue() != 30 - SystemZ::IPM_CC)
6038 return false;
6039 auto *IPM = SHL->getOperand(0).getNode();
6040 if (IPM->getOpcode() != SystemZISD::IPM)
6041 return false;
6042
6043 // Avoid introducing CC spills (because SRA would clobber CC).
6044 if (!CompareLHS->hasOneUse())
6045 return false;
6046 // Verify that the ICMP compares against zero.
6047 if (CompareRHS->getZExtValue() != 0)
6048 return false;
6049
6050 // Compute the effective CC mask for the new branch or select.
6051 switch (CCMask) {
6052 case SystemZ::CCMASK_CMP_EQ: break;
6053 case SystemZ::CCMASK_CMP_NE: break;
6054 case SystemZ::CCMASK_CMP_LT: CCMask = SystemZ::CCMASK_CMP_GT; break;
6055 case SystemZ::CCMASK_CMP_GT: CCMask = SystemZ::CCMASK_CMP_LT; break;
6056 case SystemZ::CCMASK_CMP_LE: CCMask = SystemZ::CCMASK_CMP_GE; break;
6057 case SystemZ::CCMASK_CMP_GE: CCMask = SystemZ::CCMASK_CMP_LE; break;
6058 default: return false;
6059 }
6060
6061 // Return the updated CCReg link.
6062 CCReg = IPM->getOperand(0);
6063 return true;
6064 }
6065
6066 return false;
6067}
6068
6069SDValue SystemZTargetLowering::combineBR_CCMASK(
6070 SDNode *N, DAGCombinerInfo &DCI) const {
6071 SelectionDAG &DAG = DCI.DAG;
6072
6073 // Combine BR_CCMASK (ICMP (SELECT_CCMASK)) into a single BR_CCMASK.
6074 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1));
6075 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2));
6076 if (!CCValid || !CCMask)
6077 return SDValue();
6078
6079 int CCValidVal = CCValid->getZExtValue();
6080 int CCMaskVal = CCMask->getZExtValue();
6081 SDValue Chain = N->getOperand(0);
6082 SDValue CCReg = N->getOperand(4);
6083
6084 if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
6085 return DAG.getNode(SystemZISD::BR_CCMASK, SDLoc(N), N->getValueType(0),
6086 Chain,
6087 DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),
6088 DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32),
6089 N->getOperand(3), CCReg);
6090 return SDValue();
6091}
6092
6093SDValue SystemZTargetLowering::combineSELECT_CCMASK(
6094 SDNode *N, DAGCombinerInfo &DCI) const {
6095 SelectionDAG &DAG = DCI.DAG;
6096
6097 // Combine SELECT_CCMASK (ICMP (SELECT_CCMASK)) into a single SELECT_CCMASK.
6098 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(2));
6099 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(3));
6100 if (!CCValid || !CCMask)
6101 return SDValue();
6102
6103 int CCValidVal = CCValid->getZExtValue();
6104 int CCMaskVal = CCMask->getZExtValue();
6105 SDValue CCReg = N->getOperand(4);
6106
6107 if (combineCCMask(CCReg, CCValidVal, CCMaskVal))
6108 return DAG.getNode(SystemZISD::SELECT_CCMASK, SDLoc(N), N->getValueType(0),
6109 N->getOperand(0), N->getOperand(1),
6110 DAG.getTargetConstant(CCValidVal, SDLoc(N), MVT::i32),
6111 DAG.getTargetConstant(CCMaskVal, SDLoc(N), MVT::i32),
6112 CCReg);
6113 return SDValue();
6114}
6115
6116
6117SDValue SystemZTargetLowering::combineGET_CCMASK(
6118 SDNode *N, DAGCombinerInfo &DCI) const {
6119
6120 // Optimize away GET_CCMASK (SELECT_CCMASK) if the CC masks are compatible
6121 auto *CCValid = dyn_cast<ConstantSDNode>(N->getOperand(1));
6122 auto *CCMask = dyn_cast<ConstantSDNode>(N->getOperand(2));
6123 if (!CCValid || !CCMask)
6124 return SDValue();
6125 int CCValidVal = CCValid->getZExtValue();
6126 int CCMaskVal = CCMask->getZExtValue();
6127
6128 SDValue Select = N->getOperand(0);
6129 if (Select->getOpcode() != SystemZISD::SELECT_CCMASK)
6130 return SDValue();
6131
6132 auto *SelectCCValid = dyn_cast<ConstantSDNode>(Select->getOperand(2));
6133 auto *SelectCCMask = dyn_cast<ConstantSDNode>(Select->getOperand(3));
6134 if (!SelectCCValid || !SelectCCMask)
6135 return SDValue();
6136 int SelectCCValidVal = SelectCCValid->getZExtValue();
6137 int SelectCCMaskVal = SelectCCMask->getZExtValue();
6138
6139 auto *TrueVal = dyn_cast<ConstantSDNode>(Select->getOperand(0));
6140 auto *FalseVal = dyn_cast<ConstantSDNode>(Select->getOperand(1));
6141 if (!TrueVal || !FalseVal)
6142 return SDValue();
6143 if (TrueVal->getZExtValue() != 0 && FalseVal->getZExtValue() == 0)
6144 ;
6145 else if (TrueVal->getZExtValue() == 0 && FalseVal->getZExtValue() != 0)
6146 SelectCCMaskVal ^= SelectCCValidVal;
6147 else
6148 return SDValue();
6149
6150 if (SelectCCValidVal & ~CCValidVal)
6151 return SDValue();
6152 if (SelectCCMaskVal != (CCMaskVal & SelectCCValidVal))
6153 return SDValue();
6154
6155 return Select->getOperand(4);
6156}
6157
6158SDValue SystemZTargetLowering::combineIntDIVREM(
6159 SDNode *N, DAGCombinerInfo &DCI) const {
6160 SelectionDAG &DAG = DCI.DAG;
6161 EVT VT = N->getValueType(0);
6162 // In the case where the divisor is a vector of constants a cheaper