Bug Summary

File:lib/Target/X86/X86ISelLowering.cpp
Warning:line 33034, column 3
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86ISelLowering.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mthread-model posix -mframe-pointer=none -fmath-errno -masm-verbose -mconstructor-aliases -munwind-tables -fuse-init-array -target-cpu x86-64 -dwarf-column-info -debugger-tuning=gdb -ffunction-sections -fdata-sections -resource-dir /usr/lib/llvm-10/lib/clang/10.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86 -I /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/include -I /build/llvm-toolchain-snapshot-10~svn374877/include -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/x86_64-linux-gnu/c++/6.3.0 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/6.3.0/../../../../include/c++/6.3.0/backward -internal-isystem /usr/local/include -internal-isystem /usr/lib/llvm-10/lib/clang/10.0.0/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir /build/llvm-toolchain-snapshot-10~svn374877/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-10~svn374877=. -ferror-limit 19 -fmessage-length 0 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fobjc-runtime=gcc -fdiagnostics-show-option -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -o /tmp/scan-build-2019-10-15-233810-7101-1 -x c++ /build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp

/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp

1//===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the interfaces that X86 uses to lower LLVM code into a
10// selection DAG.
11//
12//===----------------------------------------------------------------------===//
13
14#include "X86ISelLowering.h"
15#include "Utils/X86ShuffleDecode.h"
16#include "X86CallingConv.h"
17#include "X86FrameLowering.h"
18#include "X86InstrBuilder.h"
19#include "X86IntrinsicsInfo.h"
20#include "X86MachineFunctionInfo.h"
21#include "X86TargetMachine.h"
22#include "X86TargetObjectFile.h"
23#include "llvm/ADT/SmallBitVector.h"
24#include "llvm/ADT/SmallSet.h"
25#include "llvm/ADT/Statistic.h"
26#include "llvm/ADT/StringExtras.h"
27#include "llvm/ADT/StringSwitch.h"
28#include "llvm/Analysis/EHPersonalities.h"
29#include "llvm/CodeGen/IntrinsicLowering.h"
30#include "llvm/CodeGen/MachineFrameInfo.h"
31#include "llvm/CodeGen/MachineFunction.h"
32#include "llvm/CodeGen/MachineInstrBuilder.h"
33#include "llvm/CodeGen/MachineJumpTableInfo.h"
34#include "llvm/CodeGen/MachineModuleInfo.h"
35#include "llvm/CodeGen/MachineRegisterInfo.h"
36#include "llvm/CodeGen/TargetLowering.h"
37#include "llvm/CodeGen/WinEHFuncInfo.h"
38#include "llvm/IR/CallSite.h"
39#include "llvm/IR/CallingConv.h"
40#include "llvm/IR/Constants.h"
41#include "llvm/IR/DerivedTypes.h"
42#include "llvm/IR/DiagnosticInfo.h"
43#include "llvm/IR/Function.h"
44#include "llvm/IR/GlobalAlias.h"
45#include "llvm/IR/GlobalVariable.h"
46#include "llvm/IR/Instructions.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/MC/MCAsmInfo.h"
49#include "llvm/MC/MCContext.h"
50#include "llvm/MC/MCExpr.h"
51#include "llvm/MC/MCSymbol.h"
52#include "llvm/Support/CommandLine.h"
53#include "llvm/Support/Debug.h"
54#include "llvm/Support/ErrorHandling.h"
55#include "llvm/Support/KnownBits.h"
56#include "llvm/Support/MathExtras.h"
57#include "llvm/Target/TargetOptions.h"
58#include <algorithm>
59#include <bitset>
60#include <cctype>
61#include <numeric>
62using namespace llvm;
63
64#define DEBUG_TYPE"x86-isel" "x86-isel"
65
66STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"x86-isel", "NumTailCalls"
, "Number of tail calls"}
;
67
68static cl::opt<int> ExperimentalPrefLoopAlignment(
69 "x86-experimental-pref-loop-alignment", cl::init(4),
70 cl::desc(
71 "Sets the preferable loop alignment for experiments (as log2 bytes)"
72 "(the last x86-experimental-pref-loop-alignment bits"
73 " of the loop header PC will be 0)."),
74 cl::Hidden);
75
76// Added in 10.0.
77static cl::opt<bool> EnableOldKNLABI(
78 "x86-enable-old-knl-abi", cl::init(false),
79 cl::desc("Enables passing v32i16 and v64i8 in 2 YMM registers instead of "
80 "one ZMM register on AVX512F, but not AVX512BW targets."),
81 cl::Hidden);
82
83static cl::opt<bool> MulConstantOptimization(
84 "mul-constant-optimization", cl::init(true),
85 cl::desc("Replace 'mul x, Const' with more effective instructions like "
86 "SHIFT, LEA, etc."),
87 cl::Hidden);
88
89static cl::opt<bool> ExperimentalUnorderedISEL(
90 "x86-experimental-unordered-atomic-isel", cl::init(false),
91 cl::desc("Use LoadSDNode and StoreSDNode instead of "
92 "AtomicSDNode for unordered atomic loads and "
93 "stores respectively."),
94 cl::Hidden);
95
96/// Call this when the user attempts to do something unsupported, like
97/// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
98/// report_fatal_error, so calling code should attempt to recover without
99/// crashing.
100static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
101 const char *Msg) {
102 MachineFunction &MF = DAG.getMachineFunction();
103 DAG.getContext()->diagnose(
104 DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
105}
106
107X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
108 const X86Subtarget &STI)
109 : TargetLowering(TM), Subtarget(STI) {
110 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
111 X86ScalarSSEf64 = Subtarget.hasSSE2();
112 X86ScalarSSEf32 = Subtarget.hasSSE1();
113 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
114
115 // Set up the TargetLowering object.
116
117 // X86 is weird. It always uses i8 for shift amounts and setcc results.
118 setBooleanContents(ZeroOrOneBooleanContent);
119 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
120 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
121
122 // For 64-bit, since we have so many registers, use the ILP scheduler.
123 // For 32-bit, use the register pressure specific scheduling.
124 // For Atom, always use ILP scheduling.
125 if (Subtarget.isAtom())
126 setSchedulingPreference(Sched::ILP);
127 else if (Subtarget.is64Bit())
128 setSchedulingPreference(Sched::ILP);
129 else
130 setSchedulingPreference(Sched::RegPressure);
131 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
132 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
133
134 // Bypass expensive divides and use cheaper ones.
135 if (TM.getOptLevel() >= CodeGenOpt::Default) {
136 if (Subtarget.hasSlowDivide32())
137 addBypassSlowDiv(32, 8);
138 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
139 addBypassSlowDiv(64, 32);
140 }
141
142 if (Subtarget.isTargetWindowsMSVC() ||
143 Subtarget.isTargetWindowsItanium()) {
144 // Setup Windows compiler runtime calls.
145 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
146 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
147 setLibcallName(RTLIB::SREM_I64, "_allrem");
148 setLibcallName(RTLIB::UREM_I64, "_aullrem");
149 setLibcallName(RTLIB::MUL_I64, "_allmul");
150 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
151 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
152 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
153 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
154 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
155 }
156
157 if (Subtarget.isTargetDarwin()) {
158 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
159 setUseUnderscoreSetJmp(false);
160 setUseUnderscoreLongJmp(false);
161 } else if (Subtarget.isTargetWindowsGNU()) {
162 // MS runtime is weird: it exports _setjmp, but longjmp!
163 setUseUnderscoreSetJmp(true);
164 setUseUnderscoreLongJmp(false);
165 } else {
166 setUseUnderscoreSetJmp(true);
167 setUseUnderscoreLongJmp(true);
168 }
169
170 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
171 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
172 // FIXME: Should we be limitting the atomic size on other configs? Default is
173 // 1024.
174 if (!Subtarget.hasCmpxchg8b())
175 setMaxAtomicSizeInBitsSupported(32);
176
177 // Set up the register classes.
178 addRegisterClass(MVT::i8, &X86::GR8RegClass);
179 addRegisterClass(MVT::i16, &X86::GR16RegClass);
180 addRegisterClass(MVT::i32, &X86::GR32RegClass);
181 if (Subtarget.is64Bit())
182 addRegisterClass(MVT::i64, &X86::GR64RegClass);
183
184 for (MVT VT : MVT::integer_valuetypes())
185 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
186
187 // We don't accept any truncstore of integer registers.
188 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
189 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
190 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
191 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
192 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
193 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
194
195 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
196
197 // SETOEQ and SETUNE require checking two conditions.
198 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
199 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
200 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
201 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
202 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
203 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
204
205 // Integer absolute.
206 if (Subtarget.hasCMov()) {
207 setOperationAction(ISD::ABS , MVT::i16 , Custom);
208 setOperationAction(ISD::ABS , MVT::i32 , Custom);
209 }
210 setOperationAction(ISD::ABS , MVT::i64 , Custom);
211
212 // Funnel shifts.
213 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
214 setOperationAction(ShiftOp , MVT::i16 , Custom);
215 setOperationAction(ShiftOp , MVT::i32 , Custom);
216 if (Subtarget.is64Bit())
217 setOperationAction(ShiftOp , MVT::i64 , Custom);
218 }
219
220 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
221 // operation.
222 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
223 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
224 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
225
226 if (!Subtarget.useSoftFloat()) {
227 // We have an algorithm for SSE2->double, and we turn this into a
228 // 64-bit FILD followed by conditional FADD for other targets.
229 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
230 // We have an algorithm for SSE2, and we turn this into a 64-bit
231 // FILD or VCVTUSI2SS/SD for other targets.
232 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
233 } else {
234 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
235 }
236
237 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
238 // this operation.
239 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
240 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
241
242 if (!Subtarget.useSoftFloat()) {
243 // SSE has no i16 to fp conversion, only i32.
244 if (X86ScalarSSEf32) {
245 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
246 // f32 and f64 cases are Legal, f80 case is not
247 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
248 } else {
249 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
250 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
251 }
252 } else {
253 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
254 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Expand);
255 }
256
257 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
258 // this operation.
259 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
260 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
261
262 if (!Subtarget.useSoftFloat()) {
263 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
264 // are Legal, f80 is custom lowered.
265 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
266 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
267
268 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
269 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
270 } else {
271 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
272 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand);
273 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand);
274 }
275
276 // Handle FP_TO_UINT by promoting the destination to a larger signed
277 // conversion.
278 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
279 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
280 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
281
282 if (!Subtarget.useSoftFloat()) {
283 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
284 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
285 }
286
287 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
288 if (!X86ScalarSSEf64) {
289 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
290 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
291 if (Subtarget.is64Bit()) {
292 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
293 // Without SSE, i64->f64 goes through memory.
294 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
295 }
296 } else if (!Subtarget.is64Bit())
297 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
298
299 // Scalar integer divide and remainder are lowered to use operations that
300 // produce two results, to match the available instructions. This exposes
301 // the two-result form to trivial CSE, which is able to combine x/y and x%y
302 // into a single instruction.
303 //
304 // Scalar integer multiply-high is also lowered to use two-result
305 // operations, to match the available instructions. However, plain multiply
306 // (low) operations are left as Legal, as there are single-result
307 // instructions for this in x86. Using the two-result multiply instructions
308 // when both high and low results are needed must be arranged by dagcombine.
309 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
310 setOperationAction(ISD::MULHS, VT, Expand);
311 setOperationAction(ISD::MULHU, VT, Expand);
312 setOperationAction(ISD::SDIV, VT, Expand);
313 setOperationAction(ISD::UDIV, VT, Expand);
314 setOperationAction(ISD::SREM, VT, Expand);
315 setOperationAction(ISD::UREM, VT, Expand);
316 }
317
318 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
319 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
320 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
321 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
322 setOperationAction(ISD::BR_CC, VT, Expand);
323 setOperationAction(ISD::SELECT_CC, VT, Expand);
324 }
325 if (Subtarget.is64Bit())
326 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
327 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
328 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
329 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
330
331 setOperationAction(ISD::FREM , MVT::f32 , Expand);
332 setOperationAction(ISD::FREM , MVT::f64 , Expand);
333 setOperationAction(ISD::FREM , MVT::f80 , Expand);
334 setOperationAction(ISD::FREM , MVT::f128 , Expand);
335 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
336
337 // Promote the i8 variants and force them on up to i32 which has a shorter
338 // encoding.
339 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
340 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
341 if (!Subtarget.hasBMI()) {
342 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
343 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
344 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
345 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
346 if (Subtarget.is64Bit()) {
347 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
348 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
349 }
350 }
351
352 if (Subtarget.hasLZCNT()) {
353 // When promoting the i8 variants, force them to i32 for a shorter
354 // encoding.
355 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
356 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
357 } else {
358 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
359 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
360 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
361 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
362 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
363 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
364 if (Subtarget.is64Bit()) {
365 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
366 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
367 }
368 }
369
370 // Special handling for half-precision floating point conversions.
371 // If we don't have F16C support, then lower half float conversions
372 // into library calls.
373 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) {
374 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
375 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
376 }
377
378 // There's never any support for operations beyond MVT::f32.
379 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
380 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
381 setOperationAction(ISD::FP16_TO_FP, MVT::f128, Expand);
382 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
383 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
384 setOperationAction(ISD::FP_TO_FP16, MVT::f128, Expand);
385
386 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
387 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
388 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
389 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
390 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
391 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
392 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
393 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
394
395 if (Subtarget.hasPOPCNT()) {
396 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
397 } else {
398 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
399 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
400 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
401 if (Subtarget.is64Bit())
402 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
403 else
404 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
405 }
406
407 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
408
409 if (!Subtarget.hasMOVBE())
410 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
411
412 // These should be promoted to a larger select which is supported.
413 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
414 // X86 wants to expand cmov itself.
415 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
416 setOperationAction(ISD::SELECT, VT, Custom);
417 setOperationAction(ISD::SETCC, VT, Custom);
418 }
419 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
420 if (VT == MVT::i64 && !Subtarget.is64Bit())
421 continue;
422 setOperationAction(ISD::SELECT, VT, Custom);
423 setOperationAction(ISD::SETCC, VT, Custom);
424 }
425
426 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
427 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
428 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
429
430 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
431 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
432 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
433 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
434 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
435 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
436 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
437 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
438
439 // Darwin ABI issue.
440 for (auto VT : { MVT::i32, MVT::i64 }) {
441 if (VT == MVT::i64 && !Subtarget.is64Bit())
442 continue;
443 setOperationAction(ISD::ConstantPool , VT, Custom);
444 setOperationAction(ISD::JumpTable , VT, Custom);
445 setOperationAction(ISD::GlobalAddress , VT, Custom);
446 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
447 setOperationAction(ISD::ExternalSymbol , VT, Custom);
448 setOperationAction(ISD::BlockAddress , VT, Custom);
449 }
450
451 // 64-bit shl, sra, srl (iff 32-bit x86)
452 for (auto VT : { MVT::i32, MVT::i64 }) {
453 if (VT == MVT::i64 && !Subtarget.is64Bit())
454 continue;
455 setOperationAction(ISD::SHL_PARTS, VT, Custom);
456 setOperationAction(ISD::SRA_PARTS, VT, Custom);
457 setOperationAction(ISD::SRL_PARTS, VT, Custom);
458 }
459
460 if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
461 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
462
463 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
464
465 // Expand certain atomics
466 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
467 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
468 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
469 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
470 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
471 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
472 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
473 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
474 }
475
476 if (!Subtarget.is64Bit())
477 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
478
479 if (Subtarget.hasCmpxchg16b()) {
480 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
481 }
482
483 // FIXME - use subtarget debug flags
484 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
485 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
486 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
487 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
488 }
489
490 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
491 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
492
493 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
494 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
495
496 setOperationAction(ISD::TRAP, MVT::Other, Legal);
497 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
498
499 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
500 setOperationAction(ISD::VASTART , MVT::Other, Custom);
501 setOperationAction(ISD::VAEND , MVT::Other, Expand);
502 bool Is64Bit = Subtarget.is64Bit();
503 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
504 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
505
506 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
507 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
508
509 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
510
511 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
512 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
513 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
514
515 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
516 // f32 and f64 use SSE.
517 // Set up the FP register classes.
518 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
519 : &X86::FR32RegClass);
520 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
521 : &X86::FR64RegClass);
522
523 // Disable f32->f64 extload as we can only generate this in one instruction
524 // under optsize. So its easier to pattern match (fpext (load)) for that
525 // case instead of needing to emit 2 instructions for extload in the
526 // non-optsize case.
527 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
528
529 for (auto VT : { MVT::f32, MVT::f64 }) {
530 // Use ANDPD to simulate FABS.
531 setOperationAction(ISD::FABS, VT, Custom);
532
533 // Use XORP to simulate FNEG.
534 setOperationAction(ISD::FNEG, VT, Custom);
535
536 // Use ANDPD and ORPD to simulate FCOPYSIGN.
537 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
538
539 // These might be better off as horizontal vector ops.
540 setOperationAction(ISD::FADD, VT, Custom);
541 setOperationAction(ISD::FSUB, VT, Custom);
542
543 // We don't support sin/cos/fmod
544 setOperationAction(ISD::FSIN , VT, Expand);
545 setOperationAction(ISD::FCOS , VT, Expand);
546 setOperationAction(ISD::FSINCOS, VT, Expand);
547 }
548
549 // Lower this to MOVMSK plus an AND.
550 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
551 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
552
553 } else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) {
554 // Use SSE for f32, x87 for f64.
555 // Set up the FP register classes.
556 addRegisterClass(MVT::f32, &X86::FR32RegClass);
557 if (UseX87)
558 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
559
560 // Use ANDPS to simulate FABS.
561 setOperationAction(ISD::FABS , MVT::f32, Custom);
562
563 // Use XORP to simulate FNEG.
564 setOperationAction(ISD::FNEG , MVT::f32, Custom);
565
566 if (UseX87)
567 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
568
569 // Use ANDPS and ORPS to simulate FCOPYSIGN.
570 if (UseX87)
571 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
572 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
573
574 // We don't support sin/cos/fmod
575 setOperationAction(ISD::FSIN , MVT::f32, Expand);
576 setOperationAction(ISD::FCOS , MVT::f32, Expand);
577 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
578
579 if (UseX87) {
580 // Always expand sin/cos functions even though x87 has an instruction.
581 setOperationAction(ISD::FSIN, MVT::f64, Expand);
582 setOperationAction(ISD::FCOS, MVT::f64, Expand);
583 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
584 }
585 } else if (UseX87) {
586 // f32 and f64 in x87.
587 // Set up the FP register classes.
588 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
589 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
590
591 for (auto VT : { MVT::f32, MVT::f64 }) {
592 setOperationAction(ISD::UNDEF, VT, Expand);
593 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
594
595 // Always expand sin/cos functions even though x87 has an instruction.
596 setOperationAction(ISD::FSIN , VT, Expand);
597 setOperationAction(ISD::FCOS , VT, Expand);
598 setOperationAction(ISD::FSINCOS, VT, Expand);
599 }
600 }
601
602 // Expand FP32 immediates into loads from the stack, save special cases.
603 if (isTypeLegal(MVT::f32)) {
604 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
605 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
606 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
607 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
608 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
609 } else // SSE immediates.
610 addLegalFPImmediate(APFloat(+0.0f)); // xorps
611 }
612 // Expand FP64 immediates into loads from the stack, save special cases.
613 if (isTypeLegal(MVT::f64)) {
614 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
615 addLegalFPImmediate(APFloat(+0.0)); // FLD0
616 addLegalFPImmediate(APFloat(+1.0)); // FLD1
617 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
618 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
619 } else // SSE immediates.
620 addLegalFPImmediate(APFloat(+0.0)); // xorpd
621 }
622
623 // We don't support FMA.
624 setOperationAction(ISD::FMA, MVT::f64, Expand);
625 setOperationAction(ISD::FMA, MVT::f32, Expand);
626
627 // f80 always uses X87.
628 if (UseX87) {
629 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
630 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
631 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
632 {
633 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
634 addLegalFPImmediate(TmpFlt); // FLD0
635 TmpFlt.changeSign();
636 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
637
638 bool ignored;
639 APFloat TmpFlt2(+1.0);
640 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
641 &ignored);
642 addLegalFPImmediate(TmpFlt2); // FLD1
643 TmpFlt2.changeSign();
644 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
645 }
646
647 // Always expand sin/cos functions even though x87 has an instruction.
648 setOperationAction(ISD::FSIN , MVT::f80, Expand);
649 setOperationAction(ISD::FCOS , MVT::f80, Expand);
650 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
651
652 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
653 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
654 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
655 setOperationAction(ISD::FRINT, MVT::f80, Expand);
656 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
657 setOperationAction(ISD::FMA, MVT::f80, Expand);
658 setOperationAction(ISD::LROUND, MVT::f80, Expand);
659 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
660 setOperationAction(ISD::LRINT, MVT::f80, Expand);
661 setOperationAction(ISD::LLRINT, MVT::f80, Expand);
662 }
663
664 // f128 uses xmm registers, but most operations require libcalls.
665 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
666 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
667 : &X86::VR128RegClass);
668
669 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
670
671 setOperationAction(ISD::FADD, MVT::f128, Custom);
672 setOperationAction(ISD::FSUB, MVT::f128, Custom);
673 setOperationAction(ISD::FDIV, MVT::f128, Custom);
674 setOperationAction(ISD::FMUL, MVT::f128, Custom);
675 setOperationAction(ISD::FMA, MVT::f128, Expand);
676
677 setOperationAction(ISD::FABS, MVT::f128, Custom);
678 setOperationAction(ISD::FNEG, MVT::f128, Custom);
679 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
680
681 setOperationAction(ISD::FSIN, MVT::f128, Expand);
682 setOperationAction(ISD::FCOS, MVT::f128, Expand);
683 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
684 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
685
686 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
687 // We need to custom handle any FP_ROUND with an f128 input, but
688 // LegalizeDAG uses the result type to know when to run a custom handler.
689 // So we have to list all legal floating point result types here.
690 if (isTypeLegal(MVT::f32)) {
691 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
692 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
693 }
694 if (isTypeLegal(MVT::f64)) {
695 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
696 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
697 }
698 if (isTypeLegal(MVT::f80)) {
699 setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
700 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
701 }
702
703 setOperationAction(ISD::SETCC, MVT::f128, Custom);
704
705 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
706 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
707 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
708 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
709 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
710 setTruncStoreAction(MVT::f128, MVT::f80, Expand);
711 }
712
713 // Always use a library call for pow.
714 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
715 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
716 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
717 setOperationAction(ISD::FPOW , MVT::f128 , Expand);
718
719 setOperationAction(ISD::FLOG, MVT::f80, Expand);
720 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
721 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
722 setOperationAction(ISD::FEXP, MVT::f80, Expand);
723 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
724 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
725 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
726
727 // Some FP actions are always expanded for vector types.
728 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
729 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
730 setOperationAction(ISD::FSIN, VT, Expand);
731 setOperationAction(ISD::FSINCOS, VT, Expand);
732 setOperationAction(ISD::FCOS, VT, Expand);
733 setOperationAction(ISD::FREM, VT, Expand);
734 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
735 setOperationAction(ISD::FPOW, VT, Expand);
736 setOperationAction(ISD::FLOG, VT, Expand);
737 setOperationAction(ISD::FLOG2, VT, Expand);
738 setOperationAction(ISD::FLOG10, VT, Expand);
739 setOperationAction(ISD::FEXP, VT, Expand);
740 setOperationAction(ISD::FEXP2, VT, Expand);
741 }
742
743 // First set operation action for all vector types to either promote
744 // (for widening) or expand (for scalarization). Then we will selectively
745 // turn on ones that can be effectively codegen'd.
746 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
747 setOperationAction(ISD::SDIV, VT, Expand);
748 setOperationAction(ISD::UDIV, VT, Expand);
749 setOperationAction(ISD::SREM, VT, Expand);
750 setOperationAction(ISD::UREM, VT, Expand);
751 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
752 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
753 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
754 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
755 setOperationAction(ISD::FMA, VT, Expand);
756 setOperationAction(ISD::FFLOOR, VT, Expand);
757 setOperationAction(ISD::FCEIL, VT, Expand);
758 setOperationAction(ISD::FTRUNC, VT, Expand);
759 setOperationAction(ISD::FRINT, VT, Expand);
760 setOperationAction(ISD::FNEARBYINT, VT, Expand);
761 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
762 setOperationAction(ISD::MULHS, VT, Expand);
763 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
764 setOperationAction(ISD::MULHU, VT, Expand);
765 setOperationAction(ISD::SDIVREM, VT, Expand);
766 setOperationAction(ISD::UDIVREM, VT, Expand);
767 setOperationAction(ISD::CTPOP, VT, Expand);
768 setOperationAction(ISD::CTTZ, VT, Expand);
769 setOperationAction(ISD::CTLZ, VT, Expand);
770 setOperationAction(ISD::ROTL, VT, Expand);
771 setOperationAction(ISD::ROTR, VT, Expand);
772 setOperationAction(ISD::BSWAP, VT, Expand);
773 setOperationAction(ISD::SETCC, VT, Expand);
774 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
775 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
776 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
777 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
778 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
779 setOperationAction(ISD::TRUNCATE, VT, Expand);
780 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
781 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
782 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
783 setOperationAction(ISD::SELECT_CC, VT, Expand);
784 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
785 setTruncStoreAction(InnerVT, VT, Expand);
786
787 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
788 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
789
790 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
791 // types, we have to deal with them whether we ask for Expansion or not.
792 // Setting Expand causes its own optimisation problems though, so leave
793 // them legal.
794 if (VT.getVectorElementType() == MVT::i1)
795 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
796
797 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
798 // split/scalarized right now.
799 if (VT.getVectorElementType() == MVT::f16)
800 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
801 }
802 }
803
804 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
805 // with -msoft-float, disable use of MMX as well.
806 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
807 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
808 // No operations on x86mmx supported, everything uses intrinsics.
809 }
810
811 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
812 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
813 : &X86::VR128RegClass);
814
815 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
816 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
817 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
818 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
819 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
820 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
821 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
822 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
823 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
824
825 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
826 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
827
828 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Custom);
829 }
830
831 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
832 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
833 : &X86::VR128RegClass);
834
835 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
836 // registers cannot be used even for integer operations.
837 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
838 : &X86::VR128RegClass);
839 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
840 : &X86::VR128RegClass);
841 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
842 : &X86::VR128RegClass);
843 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
844 : &X86::VR128RegClass);
845
846 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
847 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
848 setOperationAction(ISD::SDIV, VT, Custom);
849 setOperationAction(ISD::SREM, VT, Custom);
850 setOperationAction(ISD::UDIV, VT, Custom);
851 setOperationAction(ISD::UREM, VT, Custom);
852 }
853
854 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
855 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
856 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
857
858 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
859 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
860 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
861 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
862 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
863 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
864 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
865 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
866 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
867 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
868 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
869 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
870 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
871
872 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
873 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
874 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
875 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
876 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
877 }
878
879 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
880 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
881 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
882 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
883 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
884 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
885 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
886 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
887 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
888 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
889 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
890 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
891
892 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
893 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
894 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
895
896 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
897 setOperationAction(ISD::SETCC, VT, Custom);
898 setOperationAction(ISD::CTPOP, VT, Custom);
899 setOperationAction(ISD::ABS, VT, Custom);
900
901 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
902 // setcc all the way to isel and prefer SETGT in some isel patterns.
903 setCondCodeAction(ISD::SETLT, VT, Custom);
904 setCondCodeAction(ISD::SETLE, VT, Custom);
905 }
906
907 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
908 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
909 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
910 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
911 setOperationAction(ISD::VSELECT, VT, Custom);
912 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
913 }
914
915 for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
916 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
917 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
918 setOperationAction(ISD::VSELECT, VT, Custom);
919
920 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
921 continue;
922
923 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
924 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
925 }
926
927 // Custom lower v2i64 and v2f64 selects.
928 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
929 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
931 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
932 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
933
934 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
935 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
936
937 // Custom legalize these to avoid over promotion or custom promotion.
938 setOperationAction(ISD::FP_TO_SINT, MVT::v2i8, Custom);
939 setOperationAction(ISD::FP_TO_SINT, MVT::v4i8, Custom);
940 setOperationAction(ISD::FP_TO_SINT, MVT::v8i8, Custom);
941 setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom);
942 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
943 setOperationAction(ISD::FP_TO_UINT, MVT::v2i8, Custom);
944 setOperationAction(ISD::FP_TO_UINT, MVT::v4i8, Custom);
945 setOperationAction(ISD::FP_TO_UINT, MVT::v8i8, Custom);
946 setOperationAction(ISD::FP_TO_UINT, MVT::v2i16, Custom);
947 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
948
949 // By marking FP_TO_SINT v8i16 as Custom, will trick type legalization into
950 // promoting v8i8 FP_TO_UINT into FP_TO_SINT. When the v8i16 FP_TO_SINT is
951 // split again based on the input type, this will cause an AssertSExt i16 to
952 // be emitted instead of an AssertZExt. This will allow packssdw followed by
953 // packuswb to be used to truncate to v8i8. This is necessary since packusdw
954 // isn't available until sse4.1.
955 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
956
957 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
958 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
959
960 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
961
962 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
963 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
964
965 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
966 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
967
968 // We want to legalize this to an f64 load rather than an i64 load on
969 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
970 // store.
971 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
972 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
973 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
974 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
975 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
976 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
977
978 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
979 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
980 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
981 if (!Subtarget.hasAVX512())
982 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
983
984 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
985 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
986 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
987
988 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
989
990 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
991 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
992 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
993 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
994 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
995 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
996
997 // In the customized shift lowering, the legal v4i32/v2i64 cases
998 // in AVX2 will be recognized.
999 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1000 setOperationAction(ISD::SRL, VT, Custom);
1001 setOperationAction(ISD::SHL, VT, Custom);
1002 setOperationAction(ISD::SRA, VT, Custom);
1003 }
1004
1005 setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
1006 setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
1007
1008 // With AVX512, expanding (and promoting the shifts) is better.
1009 if (!Subtarget.hasAVX512())
1010 setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
1011 }
1012
1013 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1014 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1015 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1016 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1017 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1018 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1019 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1020 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1021 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1022
1023 // These might be better off as horizontal vector ops.
1024 setOperationAction(ISD::ADD, MVT::i16, Custom);
1025 setOperationAction(ISD::ADD, MVT::i32, Custom);
1026 setOperationAction(ISD::SUB, MVT::i16, Custom);
1027 setOperationAction(ISD::SUB, MVT::i32, Custom);
1028 }
1029
1030 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1031 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1032 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1033 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1034 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1035 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1036 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1037 }
1038
1039 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1040 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1041 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1042 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1043 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1044 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1045 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1046 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1047
1048 // FIXME: Do we need to handle scalar-to-vector here?
1049 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1050
1051 // We directly match byte blends in the backend as they match the VSELECT
1052 // condition form.
1053 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1054
1055 // SSE41 brings specific instructions for doing vector sign extend even in
1056 // cases where we don't have SRA.
1057 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1058 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1059 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1060 }
1061
1062 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1063 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1064 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1065 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1066 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1067 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1068 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1069 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1070 }
1071
1072 // i8 vectors are custom because the source register and source
1073 // source memory operand types are not the same width.
1074 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1075 }
1076
1077 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1078 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1079 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1080 setOperationAction(ISD::ROTL, VT, Custom);
1081
1082 // XOP can efficiently perform BITREVERSE with VPPERM.
1083 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1084 setOperationAction(ISD::BITREVERSE, VT, Custom);
1085
1086 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1087 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1088 setOperationAction(ISD::BITREVERSE, VT, Custom);
1089 }
1090
1091 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1092 bool HasInt256 = Subtarget.hasInt256();
1093
1094 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1095 : &X86::VR256RegClass);
1096 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1097 : &X86::VR256RegClass);
1098 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1099 : &X86::VR256RegClass);
1100 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1101 : &X86::VR256RegClass);
1102 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1103 : &X86::VR256RegClass);
1104 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1105 : &X86::VR256RegClass);
1106
1107 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1108 setOperationAction(ISD::FFLOOR, VT, Legal);
1109 setOperationAction(ISD::FCEIL, VT, Legal);
1110 setOperationAction(ISD::FTRUNC, VT, Legal);
1111 setOperationAction(ISD::FRINT, VT, Legal);
1112 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1113 setOperationAction(ISD::FNEG, VT, Custom);
1114 setOperationAction(ISD::FABS, VT, Custom);
1115 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1116 }
1117
1118 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1119 // even though v8i16 is a legal type.
1120 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1121 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1122 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1123
1124 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1125
1126 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Custom);
1127
1128 if (!Subtarget.hasAVX512())
1129 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1130
1131 // In the customized shift lowering, the legal v8i32/v4i64 cases
1132 // in AVX2 will be recognized.
1133 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1134 setOperationAction(ISD::SRL, VT, Custom);
1135 setOperationAction(ISD::SHL, VT, Custom);
1136 setOperationAction(ISD::SRA, VT, Custom);
1137 }
1138
1139 // These types need custom splitting if their input is a 128-bit vector.
1140 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1141 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1142 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1143 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1144
1145 setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
1146 setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
1147
1148 // With BWI, expanding (and promoting the shifts) is the better.
1149 if (!Subtarget.hasBWI())
1150 setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
1151
1152 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1153 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1154 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1155 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1156 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1157 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1158
1159 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1160 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1161 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1162 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1163 }
1164
1165 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1166 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1167 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1168 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1169
1170 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1171 setOperationAction(ISD::SETCC, VT, Custom);
1172 setOperationAction(ISD::CTPOP, VT, Custom);
1173 setOperationAction(ISD::CTLZ, VT, Custom);
1174
1175 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1176 // setcc all the way to isel and prefer SETGT in some isel patterns.
1177 setCondCodeAction(ISD::SETLT, VT, Custom);
1178 setCondCodeAction(ISD::SETLE, VT, Custom);
1179 }
1180
1181 if (Subtarget.hasAnyFMA()) {
1182 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1183 MVT::v2f64, MVT::v4f64 })
1184 setOperationAction(ISD::FMA, VT, Legal);
1185 }
1186
1187 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1188 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1189 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1190 }
1191
1192 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1193 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1194 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1195 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1196
1197 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1198 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1199 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1200 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1201 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1202 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1203
1204 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1205 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1206 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1207 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1208 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1209
1210 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1211 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1212 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1213 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1214 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1215 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1216 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1217 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1218
1219 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1220 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1221 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1222 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1223 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1224 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1225 }
1226
1227 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1228 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1229 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1230 }
1231
1232 if (HasInt256) {
1233 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1234 // when we have a 256bit-wide blend with immediate.
1235 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1236
1237 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1238 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1239 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1240 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1241 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1242 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1243 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1244 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1245 }
1246 }
1247
1248 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1249 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1250 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1251 setOperationAction(ISD::MSTORE, VT, Legal);
1252 }
1253
1254 // Extract subvector is special because the value type
1255 // (result) is 128-bit but the source is 256-bit wide.
1256 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1257 MVT::v4f32, MVT::v2f64 }) {
1258 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1259 }
1260
1261 // Custom lower several nodes for 256-bit types.
1262 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1263 MVT::v8f32, MVT::v4f64 }) {
1264 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1265 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1266 setOperationAction(ISD::VSELECT, VT, Custom);
1267 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1268 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1269 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1270 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1271 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1272 setOperationAction(ISD::STORE, VT, Custom);
1273 }
1274
1275 if (HasInt256) {
1276 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1277
1278 // Custom legalize 2x32 to get a little better code.
1279 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1280 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1281
1282 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1283 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1284 setOperationAction(ISD::MGATHER, VT, Custom);
1285 }
1286 }
1287
1288 // This block controls legalization of the mask vector sizes that are
1289 // available with AVX512. 512-bit vectors are in a separate block controlled
1290 // by useAVX512Regs.
1291 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1292 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1293 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1294 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1295 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1296 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1297
1298 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1299 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1300 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1301
1302 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1303 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1304 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1305 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1306 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1307 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1308
1309 // There is no byte sized k-register load or store without AVX512DQ.
1310 if (!Subtarget.hasDQI()) {
1311 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1312 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1313 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1314 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1315
1316 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1317 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1318 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1319 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1320 }
1321
1322 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1323 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1324 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1325 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1326 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1327 }
1328
1329 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1330 setOperationAction(ISD::ADD, VT, Custom);
1331 setOperationAction(ISD::SUB, VT, Custom);
1332 setOperationAction(ISD::MUL, VT, Custom);
1333 setOperationAction(ISD::SETCC, VT, Custom);
1334 setOperationAction(ISD::SELECT, VT, Custom);
1335 setOperationAction(ISD::TRUNCATE, VT, Custom);
1336 setOperationAction(ISD::UADDSAT, VT, Custom);
1337 setOperationAction(ISD::SADDSAT, VT, Custom);
1338 setOperationAction(ISD::USUBSAT, VT, Custom);
1339 setOperationAction(ISD::SSUBSAT, VT, Custom);
1340
1341 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1342 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1343 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1344 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1345 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1346 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1347 setOperationAction(ISD::VSELECT, VT, Expand);
1348 }
1349
1350 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1351 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1352 }
1353
1354 // This block controls legalization for 512-bit operations with 32/64 bit
1355 // elements. 512-bits can be disabled based on prefer-vector-width and
1356 // required-vector-width function attributes.
1357 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1358 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1359 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1360 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1361 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1362
1363 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1364 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1365 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1366 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1367 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1368 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1369 }
1370
1371 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1372 setOperationAction(ISD::FNEG, VT, Custom);
1373 setOperationAction(ISD::FABS, VT, Custom);
1374 setOperationAction(ISD::FMA, VT, Legal);
1375 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1376 }
1377
1378 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1379 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i16, MVT::v16i32);
1380 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i8, MVT::v16i32);
1381 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i1, MVT::v16i32);
1382 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1383 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i1, MVT::v16i32);
1384 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i8, MVT::v16i32);
1385 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i16, MVT::v16i32);
1386 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1387 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1388
1389 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f32, Custom);
1390
1391 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1392 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1393 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1394 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1395 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1396
1397 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1398 // to 512-bit rather than use the AVX2 instructions so that we can use
1399 // k-masks.
1400 if (!Subtarget.hasVLX()) {
1401 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1402 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1403 setOperationAction(ISD::MLOAD, VT, Custom);
1404 setOperationAction(ISD::MSTORE, VT, Custom);
1405 }
1406 }
1407
1408 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1409 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1410 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1411 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1412 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1413 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1414 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1415 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1416
1417 // Need to custom widen this if we don't have AVX512BW.
1418 setOperationAction(ISD::ANY_EXTEND, MVT::v8i8, Custom);
1419 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i8, Custom);
1420 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i8, Custom);
1421
1422 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1423 setOperationAction(ISD::FFLOOR, VT, Legal);
1424 setOperationAction(ISD::FCEIL, VT, Legal);
1425 setOperationAction(ISD::FTRUNC, VT, Legal);
1426 setOperationAction(ISD::FRINT, VT, Legal);
1427 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1428
1429 setOperationAction(ISD::SELECT, VT, Custom);
1430 }
1431
1432 // Without BWI we need to use custom lowering to handle MVT::v64i8 input.
1433 for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) {
1434 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1435 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1436 }
1437
1438 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1439 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1440 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1441 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1442
1443 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1444 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1445
1446 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1447 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1448
1449 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1450 setOperationAction(ISD::SMAX, VT, Legal);
1451 setOperationAction(ISD::UMAX, VT, Legal);
1452 setOperationAction(ISD::SMIN, VT, Legal);
1453 setOperationAction(ISD::UMIN, VT, Legal);
1454 setOperationAction(ISD::ABS, VT, Legal);
1455 setOperationAction(ISD::SRL, VT, Custom);
1456 setOperationAction(ISD::SHL, VT, Custom);
1457 setOperationAction(ISD::SRA, VT, Custom);
1458 setOperationAction(ISD::CTPOP, VT, Custom);
1459 setOperationAction(ISD::ROTL, VT, Custom);
1460 setOperationAction(ISD::ROTR, VT, Custom);
1461 setOperationAction(ISD::SETCC, VT, Custom);
1462 setOperationAction(ISD::SELECT, VT, Custom);
1463
1464 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1465 // setcc all the way to isel and prefer SETGT in some isel patterns.
1466 setCondCodeAction(ISD::SETLT, VT, Custom);
1467 setCondCodeAction(ISD::SETLE, VT, Custom);
1468 }
1469
1470 if (Subtarget.hasDQI()) {
1471 setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1472 setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1473 setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1474 setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1475
1476 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1477 }
1478
1479 if (Subtarget.hasCDI()) {
1480 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1481 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1482 setOperationAction(ISD::CTLZ, VT, Legal);
1483 }
1484 } // Subtarget.hasCDI()
1485
1486 if (Subtarget.hasVPOPCNTDQ()) {
1487 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1488 setOperationAction(ISD::CTPOP, VT, Legal);
1489 }
1490
1491 // Extract subvector is special because the value type
1492 // (result) is 256-bit but the source is 512-bit wide.
1493 // 128-bit was made Legal under AVX1.
1494 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1495 MVT::v8f32, MVT::v4f64 })
1496 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1497
1498 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1499 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1500 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1501 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1502 setOperationAction(ISD::VSELECT, VT, Custom);
1503 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1504 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1505 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1506 setOperationAction(ISD::MLOAD, VT, Legal);
1507 setOperationAction(ISD::MSTORE, VT, Legal);
1508 setOperationAction(ISD::MGATHER, VT, Custom);
1509 setOperationAction(ISD::MSCATTER, VT, Custom);
1510 }
1511 if (!Subtarget.hasBWI()) {
1512 // Need to custom split v32i16/v64i8 bitcasts.
1513 setOperationAction(ISD::BITCAST, MVT::v32i16, Custom);
1514 setOperationAction(ISD::BITCAST, MVT::v64i8, Custom);
1515
1516 // Better to split these into two 256-bit ops.
1517 setOperationAction(ISD::BITREVERSE, MVT::v8i64, Custom);
1518 setOperationAction(ISD::BITREVERSE, MVT::v16i32, Custom);
1519 }
1520
1521 if (Subtarget.hasVBMI2()) {
1522 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1523 setOperationAction(ISD::FSHL, VT, Custom);
1524 setOperationAction(ISD::FSHR, VT, Custom);
1525 }
1526 }
1527 }// has AVX-512
1528
1529 // This block controls legalization for operations that don't have
1530 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1531 // narrower widths.
1532 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1533 // These operations are handled on non-VLX by artificially widening in
1534 // isel patterns.
1535 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1536
1537 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1538 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1539 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1540 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1541 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1542
1543 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1544 setOperationAction(ISD::SMAX, VT, Legal);
1545 setOperationAction(ISD::UMAX, VT, Legal);
1546 setOperationAction(ISD::SMIN, VT, Legal);
1547 setOperationAction(ISD::UMIN, VT, Legal);
1548 setOperationAction(ISD::ABS, VT, Legal);
1549 }
1550
1551 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1552 setOperationAction(ISD::ROTL, VT, Custom);
1553 setOperationAction(ISD::ROTR, VT, Custom);
1554 }
1555
1556 // Custom legalize 2x32 to get a little better code.
1557 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1558 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1559
1560 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1561 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1562 setOperationAction(ISD::MSCATTER, VT, Custom);
1563
1564 if (Subtarget.hasDQI()) {
1565 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1566 setOperationAction(ISD::SINT_TO_FP, VT, Legal);
1567 setOperationAction(ISD::UINT_TO_FP, VT, Legal);
1568 setOperationAction(ISD::FP_TO_SINT, VT, Legal);
1569 setOperationAction(ISD::FP_TO_UINT, VT, Legal);
1570
1571 setOperationAction(ISD::MUL, VT, Legal);
1572 }
1573 }
1574
1575 if (Subtarget.hasCDI()) {
1576 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1577 setOperationAction(ISD::CTLZ, VT, Legal);
1578 }
1579 } // Subtarget.hasCDI()
1580
1581 if (Subtarget.hasVPOPCNTDQ()) {
1582 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1583 setOperationAction(ISD::CTPOP, VT, Legal);
1584 }
1585 }
1586
1587 // This block control legalization of v32i1/v64i1 which are available with
1588 // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1589 // useBWIRegs.
1590 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1591 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1592 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1593
1594 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1595 setOperationAction(ISD::ADD, VT, Custom);
1596 setOperationAction(ISD::SUB, VT, Custom);
1597 setOperationAction(ISD::MUL, VT, Custom);
1598 setOperationAction(ISD::VSELECT, VT, Expand);
1599 setOperationAction(ISD::UADDSAT, VT, Custom);
1600 setOperationAction(ISD::SADDSAT, VT, Custom);
1601 setOperationAction(ISD::USUBSAT, VT, Custom);
1602 setOperationAction(ISD::SSUBSAT, VT, Custom);
1603
1604 setOperationAction(ISD::TRUNCATE, VT, Custom);
1605 setOperationAction(ISD::SETCC, VT, Custom);
1606 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1607 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1608 setOperationAction(ISD::SELECT, VT, Custom);
1609 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1610 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1611 }
1612
1613 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom);
1614 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom);
1615 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom);
1616 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom);
1617 for (auto VT : { MVT::v16i1, MVT::v32i1 })
1618 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1619
1620 // Extends from v32i1 masks to 256-bit vectors.
1621 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1622 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1623 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
1624 }
1625
1626 // This block controls legalization for v32i16 and v64i8. 512-bits can be
1627 // disabled based on prefer-vector-width and required-vector-width function
1628 // attributes.
1629 if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) {
1630 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1631 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1632
1633 // Extends from v64i1 masks to 512-bit vectors.
1634 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1635 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1636 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1637
1638 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1639 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1640 setOperationAction(ISD::MULHS, MVT::v32i16, Legal);
1641 setOperationAction(ISD::MULHU, MVT::v32i16, Legal);
1642 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1643 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1644 setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom);
1645 setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom);
1646 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Legal);
1647 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Legal);
1648 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom);
1649 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom);
1650 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom);
1651 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom);
1652 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1653 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1654 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1655 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
1656 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom);
1657 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom);
1658 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom);
1659 setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom);
1660 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1661
1662 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1663 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom);
1664
1665 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1666
1667 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1668 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1669 setOperationAction(ISD::VSELECT, VT, Custom);
1670 setOperationAction(ISD::ABS, VT, Legal);
1671 setOperationAction(ISD::SRL, VT, Custom);
1672 setOperationAction(ISD::SHL, VT, Custom);
1673 setOperationAction(ISD::SRA, VT, Custom);
1674 setOperationAction(ISD::MLOAD, VT, Legal);
1675 setOperationAction(ISD::MSTORE, VT, Legal);
1676 setOperationAction(ISD::CTPOP, VT, Custom);
1677 setOperationAction(ISD::CTLZ, VT, Custom);
1678 setOperationAction(ISD::SMAX, VT, Legal);
1679 setOperationAction(ISD::UMAX, VT, Legal);
1680 setOperationAction(ISD::SMIN, VT, Legal);
1681 setOperationAction(ISD::UMIN, VT, Legal);
1682 setOperationAction(ISD::SETCC, VT, Custom);
1683 setOperationAction(ISD::UADDSAT, VT, Legal);
1684 setOperationAction(ISD::SADDSAT, VT, Legal);
1685 setOperationAction(ISD::USUBSAT, VT, Legal);
1686 setOperationAction(ISD::SSUBSAT, VT, Legal);
1687 setOperationAction(ISD::SELECT, VT, Custom);
1688
1689 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1690 // setcc all the way to isel and prefer SETGT in some isel patterns.
1691 setCondCodeAction(ISD::SETLT, VT, Custom);
1692 setCondCodeAction(ISD::SETLE, VT, Custom);
1693 }
1694
1695 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1696 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1697 }
1698
1699 if (Subtarget.hasBITALG()) {
1700 for (auto VT : { MVT::v64i8, MVT::v32i16 })
1701 setOperationAction(ISD::CTPOP, VT, Legal);
1702 }
1703
1704 if (Subtarget.hasVBMI2()) {
1705 setOperationAction(ISD::FSHL, MVT::v32i16, Custom);
1706 setOperationAction(ISD::FSHR, MVT::v32i16, Custom);
1707 }
1708 }
1709
1710 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1711 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1712 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1713 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1714 }
1715
1716 // These operations are handled on non-VLX by artificially widening in
1717 // isel patterns.
1718 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1719
1720 if (Subtarget.hasBITALG()) {
1721 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
1722 setOperationAction(ISD::CTPOP, VT, Legal);
1723 }
1724 }
1725
1726 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1727 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1728 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1729 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1730 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1731 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1732
1733 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1734 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1735 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1736 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1737 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1738
1739 if (Subtarget.hasDQI()) {
1740 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1741 // v2f32 UINT_TO_FP is already custom under SSE2.
1742 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
1743 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&((isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) && "Unexpected operation action!"
) ? static_cast<void> (0) : __assert_fail ("isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) && \"Unexpected operation action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 1744, __PRETTY_FUNCTION__))
1744 "Unexpected operation action!")((isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) && "Unexpected operation action!"
) ? static_cast<void> (0) : __assert_fail ("isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) && \"Unexpected operation action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 1744, __PRETTY_FUNCTION__))
;
1745 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1746 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
1747 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
1748 }
1749
1750 if (Subtarget.hasBWI()) {
1751 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1752 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1753 }
1754
1755 if (Subtarget.hasVBMI2()) {
1756 // TODO: Make these legal even without VLX?
1757 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1758 MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1759 setOperationAction(ISD::FSHL, VT, Custom);
1760 setOperationAction(ISD::FSHR, VT, Custom);
1761 }
1762 }
1763
1764 setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
1765 setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
1766 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1767 }
1768
1769 // We want to custom lower some of our intrinsics.
1770 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1771 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1772 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1773 if (!Subtarget.is64Bit()) {
1774 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1775 }
1776
1777 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1778 // handle type legalization for these operations here.
1779 //
1780 // FIXME: We really should do custom legalization for addition and
1781 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1782 // than generic legalization for 64-bit multiplication-with-overflow, though.
1783 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1784 if (VT == MVT::i64 && !Subtarget.is64Bit())
1785 continue;
1786 // Add/Sub/Mul with overflow operations are custom lowered.
1787 setOperationAction(ISD::SADDO, VT, Custom);
1788 setOperationAction(ISD::UADDO, VT, Custom);
1789 setOperationAction(ISD::SSUBO, VT, Custom);
1790 setOperationAction(ISD::USUBO, VT, Custom);
1791 setOperationAction(ISD::SMULO, VT, Custom);
1792 setOperationAction(ISD::UMULO, VT, Custom);
1793
1794 // Support carry in as value rather than glue.
1795 setOperationAction(ISD::ADDCARRY, VT, Custom);
1796 setOperationAction(ISD::SUBCARRY, VT, Custom);
1797 setOperationAction(ISD::SETCCCARRY, VT, Custom);
1798 }
1799
1800 if (!Subtarget.is64Bit()) {
1801 // These libcalls are not available in 32-bit.
1802 setLibcallName(RTLIB::SHL_I128, nullptr);
1803 setLibcallName(RTLIB::SRL_I128, nullptr);
1804 setLibcallName(RTLIB::SRA_I128, nullptr);
1805 setLibcallName(RTLIB::MUL_I128, nullptr);
1806 }
1807
1808 // Combine sin / cos into _sincos_stret if it is available.
1809 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1810 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1811 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1812 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1813 }
1814
1815 if (Subtarget.isTargetWin64()) {
1816 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1817 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1818 setOperationAction(ISD::SREM, MVT::i128, Custom);
1819 setOperationAction(ISD::UREM, MVT::i128, Custom);
1820 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1821 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1822 }
1823
1824 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1825 // is. We should promote the value to 64-bits to solve this.
1826 // This is what the CRT headers do - `fmodf` is an inline header
1827 // function casting to f64 and calling `fmod`.
1828 if (Subtarget.is32Bit() &&
1829 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
1830 for (ISD::NodeType Op :
1831 {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG,
1832 ISD::FLOG10, ISD::FPOW, ISD::FSIN})
1833 if (isOperationExpand(Op, MVT::f32))
1834 setOperationAction(Op, MVT::f32, Promote);
1835
1836 // We have target-specific dag combine patterns for the following nodes:
1837 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1838 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
1839 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1840 setTargetDAGCombine(ISD::CONCAT_VECTORS);
1841 setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1842 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
1843 setTargetDAGCombine(ISD::BITCAST);
1844 setTargetDAGCombine(ISD::VSELECT);
1845 setTargetDAGCombine(ISD::SELECT);
1846 setTargetDAGCombine(ISD::SHL);
1847 setTargetDAGCombine(ISD::SRA);
1848 setTargetDAGCombine(ISD::SRL);
1849 setTargetDAGCombine(ISD::OR);
1850 setTargetDAGCombine(ISD::AND);
1851 setTargetDAGCombine(ISD::ADD);
1852 setTargetDAGCombine(ISD::FADD);
1853 setTargetDAGCombine(ISD::FSUB);
1854 setTargetDAGCombine(ISD::FNEG);
1855 setTargetDAGCombine(ISD::FMA);
1856 setTargetDAGCombine(ISD::FMINNUM);
1857 setTargetDAGCombine(ISD::FMAXNUM);
1858 setTargetDAGCombine(ISD::SUB);
1859 setTargetDAGCombine(ISD::LOAD);
1860 setTargetDAGCombine(ISD::MLOAD);
1861 setTargetDAGCombine(ISD::STORE);
1862 setTargetDAGCombine(ISD::MSTORE);
1863 setTargetDAGCombine(ISD::TRUNCATE);
1864 setTargetDAGCombine(ISD::ZERO_EXTEND);
1865 setTargetDAGCombine(ISD::ANY_EXTEND);
1866 setTargetDAGCombine(ISD::SIGN_EXTEND);
1867 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1868 setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
1869 setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
1870 setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
1871 setTargetDAGCombine(ISD::SINT_TO_FP);
1872 setTargetDAGCombine(ISD::UINT_TO_FP);
1873 setTargetDAGCombine(ISD::SETCC);
1874 setTargetDAGCombine(ISD::MUL);
1875 setTargetDAGCombine(ISD::XOR);
1876 setTargetDAGCombine(ISD::MSCATTER);
1877 setTargetDAGCombine(ISD::MGATHER);
1878
1879 computeRegisterProperties(Subtarget.getRegisterInfo());
1880
1881 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1882 MaxStoresPerMemsetOptSize = 8;
1883 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1884 MaxStoresPerMemcpyOptSize = 4;
1885 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1886 MaxStoresPerMemmoveOptSize = 4;
1887
1888 // TODO: These control memcmp expansion in CGP and could be raised higher, but
1889 // that needs to benchmarked and balanced with the potential use of vector
1890 // load/store types (PR33329, PR33914).
1891 MaxLoadsPerMemcmp = 2;
1892 MaxLoadsPerMemcmpOptSize = 2;
1893
1894 // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
1895 setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));
1896
1897 // An out-of-order CPU can speculatively execute past a predictable branch,
1898 // but a conditional move could be stalled by an expensive earlier operation.
1899 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
1900 EnableExtLdPromotion = true;
1901 setPrefFunctionAlignment(Align(16));
1902
1903 verifyIntrinsicTables();
1904}
1905
1906// This has so far only been implemented for 64-bit MachO.
1907bool X86TargetLowering::useLoadStackGuardNode() const {
1908 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
1909}
1910
1911bool X86TargetLowering::useStackGuardXorFP() const {
1912 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
1913 return Subtarget.getTargetTriple().isOSMSVCRT();
1914}
1915
1916SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
1917 const SDLoc &DL) const {
1918 EVT PtrTy = getPointerTy(DAG.getDataLayout());
1919 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
1920 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
1921 return SDValue(Node, 0);
1922}
1923
1924TargetLoweringBase::LegalizeTypeAction
1925X86TargetLowering::getPreferredVectorAction(MVT VT) const {
1926 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1927 return TypeSplitVector;
1928
1929 if (VT.getVectorNumElements() != 1 &&
1930 VT.getVectorElementType() != MVT::i1)
1931 return TypeWidenVector;
1932
1933 return TargetLoweringBase::getPreferredVectorAction(VT);
1934}
1935
1936MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1937 CallingConv::ID CC,
1938 EVT VT) const {
1939 // v32i1 vectors should be promoted to v32i8 to match avx2.
1940 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1941 return MVT::v32i8;
1942 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
1943 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
1944 Subtarget.hasAVX512() &&
1945 (!isPowerOf2_32(VT.getVectorNumElements()) ||
1946 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
1947 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
1948 return MVT::i8;
1949 // FIXME: Should we just make these types legal and custom split operations?
1950 if ((VT == MVT::v32i16 || VT == MVT::v64i8) &&
1951 Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI)
1952 return MVT::v16i32;
1953 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1954}
1955
1956unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1957 CallingConv::ID CC,
1958 EVT VT) const {
1959 // v32i1 vectors should be promoted to v32i8 to match avx2.
1960 if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI())
1961 return 1;
1962 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
1963 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
1964 Subtarget.hasAVX512() &&
1965 (!isPowerOf2_32(VT.getVectorNumElements()) ||
1966 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
1967 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI())))
1968 return VT.getVectorNumElements();
1969 // FIXME: Should we just make these types legal and custom split operations?
1970 if ((VT == MVT::v32i16 || VT == MVT::v64i8) &&
1971 Subtarget.hasAVX512() && !Subtarget.hasBWI() && !EnableOldKNLABI)
1972 return 1;
1973 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1974}
1975
1976unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
1977 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
1978 unsigned &NumIntermediates, MVT &RegisterVT) const {
1979 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
1980 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
1981 Subtarget.hasAVX512() &&
1982 (!isPowerOf2_32(VT.getVectorNumElements()) ||
1983 (VT.getVectorNumElements() > 16 && !Subtarget.hasBWI()) ||
1984 (VT.getVectorNumElements() > 64 && Subtarget.hasBWI()))) {
1985 RegisterVT = MVT::i8;
1986 IntermediateVT = MVT::i1;
1987 NumIntermediates = VT.getVectorNumElements();
1988 return NumIntermediates;
1989 }
1990
1991 return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
1992 NumIntermediates, RegisterVT);
1993}
1994
1995EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
1996 LLVMContext& Context,
1997 EVT VT) const {
1998 if (!VT.isVector())
1999 return MVT::i8;
2000
2001 if (Subtarget.hasAVX512()) {
2002 const unsigned NumElts = VT.getVectorNumElements();
2003
2004 // Figure out what this type will be legalized to.
2005 EVT LegalVT = VT;
2006 while (getTypeAction(Context, LegalVT) != TypeLegal)
2007 LegalVT = getTypeToTransformTo(Context, LegalVT);
2008
2009 // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2010 if (LegalVT.getSimpleVT().is512BitVector())
2011 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2012
2013 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2014 // If we legalized to less than a 512-bit vector, then we will use a vXi1
2015 // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2016 // vXi16/vXi8.
2017 MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2018 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2019 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2020 }
2021 }
2022
2023 return VT.changeVectorElementTypeToInteger();
2024}
2025
2026/// Helper for getByValTypeAlignment to determine
2027/// the desired ByVal argument alignment.
2028static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
2029 if (MaxAlign == 16)
2030 return;
2031 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2032 if (VTy->getBitWidth() == 128)
2033 MaxAlign = 16;
2034 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2035 unsigned EltAlign = 0;
2036 getMaxByValAlign(ATy->getElementType(), EltAlign);
2037 if (EltAlign > MaxAlign)
2038 MaxAlign = EltAlign;
2039 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2040 for (auto *EltTy : STy->elements()) {
2041 unsigned EltAlign = 0;
2042 getMaxByValAlign(EltTy, EltAlign);
2043 if (EltAlign > MaxAlign)
2044 MaxAlign = EltAlign;
2045 if (MaxAlign == 16)
2046 break;
2047 }
2048 }
2049}
2050
2051/// Return the desired alignment for ByVal aggregate
2052/// function arguments in the caller parameter area. For X86, aggregates
2053/// that contain SSE vectors are placed at 16-byte boundaries while the rest
2054/// are at 4-byte boundaries.
2055unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
2056 const DataLayout &DL) const {
2057 if (Subtarget.is64Bit()) {
2058 // Max of 8 and alignment of type.
2059 unsigned TyAlign = DL.getABITypeAlignment(Ty);
2060 if (TyAlign > 8)
2061 return TyAlign;
2062 return 8;
2063 }
2064
2065 unsigned Align = 4;
2066 if (Subtarget.hasSSE1())
2067 getMaxByValAlign(Ty, Align);
2068 return Align;
2069}
2070
2071/// Returns the target specific optimal type for load
2072/// and store operations as a result of memset, memcpy, and memmove
2073/// lowering. If DstAlign is zero that means it's safe to destination
2074/// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
2075/// means there isn't a need to check it against alignment requirement,
2076/// probably because the source does not need to be loaded. If 'IsMemset' is
2077/// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
2078/// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
2079/// source is constant so it does not need to be loaded.
2080/// It returns EVT::Other if the type should be determined using generic
2081/// target-independent logic.
2082/// For vector ops we check that the overall size isn't larger than our
2083/// preferred vector width.
2084EVT X86TargetLowering::getOptimalMemOpType(
2085 uint64_t Size, unsigned DstAlign, unsigned SrcAlign, bool IsMemset,
2086 bool ZeroMemset, bool MemcpyStrSrc,
2087 const AttributeList &FuncAttributes) const {
2088 if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
2089 if (Size >= 16 && (!Subtarget.isUnalignedMem16Slow() ||
2090 ((DstAlign == 0 || DstAlign >= 16) &&
2091 (SrcAlign == 0 || SrcAlign >= 16)))) {
2092 // FIXME: Check if unaligned 64-byte accesses are slow.
2093 if (Size >= 64 && Subtarget.hasAVX512() &&
2094 (Subtarget.getPreferVectorWidth() >= 512)) {
2095 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2096 }
2097 // FIXME: Check if unaligned 32-byte accesses are slow.
2098 if (Size >= 32 && Subtarget.hasAVX() &&
2099 (Subtarget.getPreferVectorWidth() >= 256)) {
2100 // Although this isn't a well-supported type for AVX1, we'll let
2101 // legalization and shuffle lowering produce the optimal codegen. If we
2102 // choose an optimal type with a vector element larger than a byte,
2103 // getMemsetStores() may create an intermediate splat (using an integer
2104 // multiply) before we splat as a vector.
2105 return MVT::v32i8;
2106 }
2107 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2108 return MVT::v16i8;
2109 // TODO: Can SSE1 handle a byte vector?
2110 // If we have SSE1 registers we should be able to use them.
2111 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2112 (Subtarget.getPreferVectorWidth() >= 128))
2113 return MVT::v4f32;
2114 } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 &&
2115 !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2116 // Do not use f64 to lower memcpy if source is string constant. It's
2117 // better to use i32 to avoid the loads.
2118 // Also, do not use f64 to lower memset unless this is a memset of zeros.
2119 // The gymnastics of splatting a byte value into an XMM register and then
2120 // only using 8-byte stores (because this is a CPU with slow unaligned
2121 // 16-byte accesses) makes that a loser.
2122 return MVT::f64;
2123 }
2124 }
2125 // This is a compromise. If we reach here, unaligned accesses may be slow on
2126 // this target. However, creating smaller, aligned accesses could be even
2127 // slower and would certainly be a lot more code.
2128 if (Subtarget.is64Bit() && Size >= 8)
2129 return MVT::i64;
2130 return MVT::i32;
2131}
2132
2133bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2134 if (VT == MVT::f32)
2135 return X86ScalarSSEf32;
2136 else if (VT == MVT::f64)
2137 return X86ScalarSSEf64;
2138 return true;
2139}
2140
2141bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2142 EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
2143 bool *Fast) const {
2144 if (Fast) {
2145 switch (VT.getSizeInBits()) {
2146 default:
2147 // 8-byte and under are always assumed to be fast.
2148 *Fast = true;
2149 break;
2150 case 128:
2151 *Fast = !Subtarget.isUnalignedMem16Slow();
2152 break;
2153 case 256:
2154 *Fast = !Subtarget.isUnalignedMem32Slow();
2155 break;
2156 // TODO: What about AVX-512 (512-bit) accesses?
2157 }
2158 }
2159 // NonTemporal vector memory ops must be aligned.
2160 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2161 // NT loads can only be vector aligned, so if its less aligned than the
2162 // minimum vector size (which we can split the vector down to), we might as
2163 // well use a regular unaligned vector load.
2164 // We don't have any NT loads pre-SSE41.
2165 if (!!(Flags & MachineMemOperand::MOLoad))
2166 return (Align < 16 || !Subtarget.hasSSE41());
2167 return false;
2168 }
2169 // Misaligned accesses of any size are always allowed.
2170 return true;
2171}
2172
2173/// Return the entry encoding for a jump table in the
2174/// current function. The returned value is a member of the
2175/// MachineJumpTableInfo::JTEntryKind enum.
2176unsigned X86TargetLowering::getJumpTableEncoding() const {
2177 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2178 // symbol.
2179 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2180 return MachineJumpTableInfo::EK_Custom32;
2181
2182 // Otherwise, use the normal jump table encoding heuristics.
2183 return TargetLowering::getJumpTableEncoding();
2184}
2185
2186bool X86TargetLowering::useSoftFloat() const {
2187 return Subtarget.useSoftFloat();
2188}
2189
2190void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2191 ArgListTy &Args) const {
2192
2193 // Only relabel X86-32 for C / Stdcall CCs.
2194 if (Subtarget.is64Bit())
2195 return;
2196 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2197 return;
2198 unsigned ParamRegs = 0;
2199 if (auto *M = MF->getFunction().getParent())
2200 ParamRegs = M->getNumberRegisterParameters();
2201
2202 // Mark the first N int arguments as having reg
2203 for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
2204 Type *T = Args[Idx].Ty;
2205 if (T->isIntOrPtrTy())
2206 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2207 unsigned numRegs = 1;
2208 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2209 numRegs = 2;
2210 if (ParamRegs < numRegs)
2211 return;
2212 ParamRegs -= numRegs;
2213 Args[Idx].IsInReg = true;
2214 }
2215 }
2216}
2217
2218const MCExpr *
2219X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2220 const MachineBasicBlock *MBB,
2221 unsigned uid,MCContext &Ctx) const{
2222 assert(isPositionIndependent() && Subtarget.isPICStyleGOT())((isPositionIndependent() && Subtarget.isPICStyleGOT(
)) ? static_cast<void> (0) : __assert_fail ("isPositionIndependent() && Subtarget.isPICStyleGOT()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2222, __PRETTY_FUNCTION__))
;
2223 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2224 // entries.
2225 return MCSymbolRefExpr::create(MBB->getSymbol(),
2226 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2227}
2228
2229/// Returns relocation base for the given PIC jumptable.
2230SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2231 SelectionDAG &DAG) const {
2232 if (!Subtarget.is64Bit())
2233 // This doesn't have SDLoc associated with it, but is not really the
2234 // same as a Register.
2235 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2236 getPointerTy(DAG.getDataLayout()));
2237 return Table;
2238}
2239
2240/// This returns the relocation base for the given PIC jumptable,
2241/// the same as getPICJumpTableRelocBase, but as an MCExpr.
2242const MCExpr *X86TargetLowering::
2243getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2244 MCContext &Ctx) const {
2245 // X86-64 uses RIP relative addressing based on the jump table label.
2246 if (Subtarget.isPICStyleRIPRel())
2247 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2248
2249 // Otherwise, the reference is relative to the PIC base.
2250 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2251}
2252
2253std::pair<const TargetRegisterClass *, uint8_t>
2254X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2255 MVT VT) const {
2256 const TargetRegisterClass *RRC = nullptr;
2257 uint8_t Cost = 1;
2258 switch (VT.SimpleTy) {
2259 default:
2260 return TargetLowering::findRepresentativeClass(TRI, VT);
2261 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2262 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2263 break;
2264 case MVT::x86mmx:
2265 RRC = &X86::VR64RegClass;
2266 break;
2267 case MVT::f32: case MVT::f64:
2268 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2269 case MVT::v4f32: case MVT::v2f64:
2270 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2271 case MVT::v8f32: case MVT::v4f64:
2272 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2273 case MVT::v16f32: case MVT::v8f64:
2274 RRC = &X86::VR128XRegClass;
2275 break;
2276 }
2277 return std::make_pair(RRC, Cost);
2278}
2279
2280unsigned X86TargetLowering::getAddressSpace() const {
2281 if (Subtarget.is64Bit())
2282 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2283 return 256;
2284}
2285
2286static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2287 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2288 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2289}
2290
2291static Constant* SegmentOffset(IRBuilder<> &IRB,
2292 unsigned Offset, unsigned AddressSpace) {
2293 return ConstantExpr::getIntToPtr(
2294 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2295 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2296}
2297
2298Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
2299 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2300 // tcbhead_t; use it instead of the usual global variable (see
2301 // sysdeps/{i386,x86_64}/nptl/tls.h)
2302 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2303 if (Subtarget.isTargetFuchsia()) {
2304 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2305 return SegmentOffset(IRB, 0x10, getAddressSpace());
2306 } else {
2307 // %fs:0x28, unless we're using a Kernel code model, in which case
2308 // it's %gs:0x28. gs:0x14 on i386.
2309 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2310 return SegmentOffset(IRB, Offset, getAddressSpace());
2311 }
2312 }
2313
2314 return TargetLowering::getIRStackGuard(IRB);
2315}
2316
2317void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2318 // MSVC CRT provides functionalities for stack protection.
2319 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2320 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2321 // MSVC CRT has a global variable holding security cookie.
2322 M.getOrInsertGlobal("__security_cookie",
2323 Type::getInt8PtrTy(M.getContext()));
2324
2325 // MSVC CRT has a function to validate security cookie.
2326 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2327 "__security_check_cookie", Type::getVoidTy(M.getContext()),
2328 Type::getInt8PtrTy(M.getContext()));
2329 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2330 F->setCallingConv(CallingConv::X86_FastCall);
2331 F->addAttribute(1, Attribute::AttrKind::InReg);
2332 }
2333 return;
2334 }
2335 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2336 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2337 return;
2338 TargetLowering::insertSSPDeclarations(M);
2339}
2340
2341Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2342 // MSVC CRT has a global variable holding security cookie.
2343 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2344 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2345 return M.getGlobalVariable("__security_cookie");
2346 }
2347 return TargetLowering::getSDagStackGuard(M);
2348}
2349
2350Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2351 // MSVC CRT has a function to validate security cookie.
2352 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2353 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2354 return M.getFunction("__security_check_cookie");
2355 }
2356 return TargetLowering::getSSPStackGuardCheck(M);
2357}
2358
2359Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2360 if (Subtarget.getTargetTriple().isOSContiki())
2361 return getDefaultSafeStackPointerLocation(IRB, false);
2362
2363 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2364 // definition of TLS_SLOT_SAFESTACK in
2365 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2366 if (Subtarget.isTargetAndroid()) {
2367 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2368 // %gs:0x24 on i386
2369 unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2370 return SegmentOffset(IRB, Offset, getAddressSpace());
2371 }
2372
2373 // Fuchsia is similar.
2374 if (Subtarget.isTargetFuchsia()) {
2375 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2376 return SegmentOffset(IRB, 0x18, getAddressSpace());
2377 }
2378
2379 return TargetLowering::getSafeStackPointerLocation(IRB);
2380}
2381
2382bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2383 unsigned DestAS) const {
2384 assert(SrcAS != DestAS && "Expected different address spaces!")((SrcAS != DestAS && "Expected different address spaces!"
) ? static_cast<void> (0) : __assert_fail ("SrcAS != DestAS && \"Expected different address spaces!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2384, __PRETTY_FUNCTION__))
;
2385
2386 return SrcAS < 256 && DestAS < 256;
2387}
2388
2389//===----------------------------------------------------------------------===//
2390// Return Value Calling Convention Implementation
2391//===----------------------------------------------------------------------===//
2392
2393bool X86TargetLowering::CanLowerReturn(
2394 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2395 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2396 SmallVector<CCValAssign, 16> RVLocs;
2397 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2398 return CCInfo.CheckReturn(Outs, RetCC_X86);
2399}
2400
2401const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2402 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2403 return ScratchRegs;
2404}
2405
2406/// Lowers masks values (v*i1) to the local register values
2407/// \returns DAG node after lowering to register type
2408static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2409 const SDLoc &Dl, SelectionDAG &DAG) {
2410 EVT ValVT = ValArg.getValueType();
2411
2412 if (ValVT == MVT::v1i1)
2413 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2414 DAG.getIntPtrConstant(0, Dl));
2415
2416 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2417 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2418 // Two stage lowering might be required
2419 // bitcast: v8i1 -> i8 / v16i1 -> i16
2420 // anyextend: i8 -> i32 / i16 -> i32
2421 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2422 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2423 if (ValLoc == MVT::i32)
2424 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2425 return ValToCopy;
2426 }
2427
2428 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2429 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2430 // One stage lowering is required
2431 // bitcast: v32i1 -> i32 / v64i1 -> i64
2432 return DAG.getBitcast(ValLoc, ValArg);
2433 }
2434
2435 return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
2436}
2437
2438/// Breaks v64i1 value into two registers and adds the new node to the DAG
2439static void Passv64i1ArgInRegs(
2440 const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
2441 SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass, CCValAssign &VA,
2442 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2443 assert(Subtarget.hasBWI() && "Expected AVX512BW target!")((Subtarget.hasBWI() && "Expected AVX512BW target!") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected AVX512BW target!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2443, __PRETTY_FUNCTION__))
;
2444 assert(Subtarget.is32Bit() && "Expecting 32 bit target")((Subtarget.is32Bit() && "Expecting 32 bit target") ?
static_cast<void> (0) : __assert_fail ("Subtarget.is32Bit() && \"Expecting 32 bit target\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2444, __PRETTY_FUNCTION__))
;
2445 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value")((Arg.getValueType() == MVT::i64 && "Expecting 64 bit value"
) ? static_cast<void> (0) : __assert_fail ("Arg.getValueType() == MVT::i64 && \"Expecting 64 bit value\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2445, __PRETTY_FUNCTION__))
;
2446 assert(VA.isRegLoc() && NextVA.isRegLoc() &&((VA.isRegLoc() && NextVA.isRegLoc() && "The value should reside in two registers"
) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && NextVA.isRegLoc() && \"The value should reside in two registers\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2447, __PRETTY_FUNCTION__))
2447 "The value should reside in two registers")((VA.isRegLoc() && NextVA.isRegLoc() && "The value should reside in two registers"
) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && NextVA.isRegLoc() && \"The value should reside in two registers\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2447, __PRETTY_FUNCTION__))
;
2448
2449 // Before splitting the value we cast it to i64
2450 Arg = DAG.getBitcast(MVT::i64, Arg);
2451
2452 // Splitting the value into two i32 types
2453 SDValue Lo, Hi;
2454 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2455 DAG.getConstant(0, Dl, MVT::i32));
2456 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2457 DAG.getConstant(1, Dl, MVT::i32));
2458
2459 // Attach the two i32 types into corresponding registers
2460 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2461 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2462}
2463
2464SDValue
2465X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2466 bool isVarArg,
2467 const SmallVectorImpl<ISD::OutputArg> &Outs,
2468 const SmallVectorImpl<SDValue> &OutVals,
2469 const SDLoc &dl, SelectionDAG &DAG) const {
2470 MachineFunction &MF = DAG.getMachineFunction();
2471 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2472
2473 // In some cases we need to disable registers from the default CSR list.
2474 // For example, when they are used for argument passing.
2475 bool ShouldDisableCalleeSavedRegister =
2476 CallConv == CallingConv::X86_RegCall ||
2477 MF.getFunction().hasFnAttribute("no_caller_saved_registers");
2478
2479 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2480 report_fatal_error("X86 interrupts may not return any value");
2481
2482 SmallVector<CCValAssign, 16> RVLocs;
2483 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2484 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2485
2486 SDValue Flag;
2487 SmallVector<SDValue, 6> RetOps;
2488 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2489 // Operand #1 = Bytes To Pop
2490 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2491 MVT::i32));
2492
2493 // Copy the result values into the output registers.
2494 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2495 ++I, ++OutsIndex) {
2496 CCValAssign &VA = RVLocs[I];
2497 assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ?
static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2497, __PRETTY_FUNCTION__))
;
2498
2499 // Add the register to the CalleeSaveDisableRegs list.
2500 if (ShouldDisableCalleeSavedRegister)
2501 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2502
2503 SDValue ValToCopy = OutVals[OutsIndex];
2504 EVT ValVT = ValToCopy.getValueType();
2505
2506 // Promote values to the appropriate types.
2507 if (VA.getLocInfo() == CCValAssign::SExt)
2508 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2509 else if (VA.getLocInfo() == CCValAssign::ZExt)
2510 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2511 else if (VA.getLocInfo() == CCValAssign::AExt) {
2512 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2513 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2514 else
2515 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2516 }
2517 else if (VA.getLocInfo() == CCValAssign::BCvt)
2518 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2519
2520 assert(VA.getLocInfo() != CCValAssign::FPExt &&((VA.getLocInfo() != CCValAssign::FPExt && "Unexpected FP-extend for return value."
) ? static_cast<void> (0) : __assert_fail ("VA.getLocInfo() != CCValAssign::FPExt && \"Unexpected FP-extend for return value.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2521, __PRETTY_FUNCTION__))
2521 "Unexpected FP-extend for return value.")((VA.getLocInfo() != CCValAssign::FPExt && "Unexpected FP-extend for return value."
) ? static_cast<void> (0) : __assert_fail ("VA.getLocInfo() != CCValAssign::FPExt && \"Unexpected FP-extend for return value.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2521, __PRETTY_FUNCTION__))
;
2522
2523 // If this is x86-64, and we disabled SSE, we can't return FP values,
2524 // or SSE or MMX vectors.
2525 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2526 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2527 (Subtarget.is64Bit() && !Subtarget.hasSSE1())) {
2528 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2529 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2530 } else if (ValVT == MVT::f64 &&
2531 (Subtarget.is64Bit() && !Subtarget.hasSSE2())) {
2532 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2533 // llvm-gcc has never done it right and no one has noticed, so this
2534 // should be OK for now.
2535 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2536 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2537 }
2538
2539 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2540 // the RET instruction and handled by the FP Stackifier.
2541 if (VA.getLocReg() == X86::FP0 ||
2542 VA.getLocReg() == X86::FP1) {
2543 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2544 // change the value to the FP stack register class.
2545 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2546 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2547 RetOps.push_back(ValToCopy);
2548 // Don't emit a copytoreg.
2549 continue;
2550 }
2551
2552 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2553 // which is returned in RAX / RDX.
2554 if (Subtarget.is64Bit()) {
2555 if (ValVT == MVT::x86mmx) {
2556 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2557 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2558 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2559 ValToCopy);
2560 // If we don't have SSE2 available, convert to v4f32 so the generated
2561 // register is legal.
2562 if (!Subtarget.hasSSE2())
2563 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2564 }
2565 }
2566 }
2567
2568 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2569
2570 if (VA.needsCustom()) {
2571 assert(VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2572, __PRETTY_FUNCTION__))
2572 "Currently the only custom case is when we split v64i1 to 2 regs")((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2572, __PRETTY_FUNCTION__))
;
2573
2574 Passv64i1ArgInRegs(dl, DAG, ValToCopy, RegsToPass, VA, RVLocs[++I],
2575 Subtarget);
2576
2577 assert(2 == RegsToPass.size() &&((2 == RegsToPass.size() && "Expecting two registers after Pass64BitArgInRegs"
) ? static_cast<void> (0) : __assert_fail ("2 == RegsToPass.size() && \"Expecting two registers after Pass64BitArgInRegs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2578, __PRETTY_FUNCTION__))
2578 "Expecting two registers after Pass64BitArgInRegs")((2 == RegsToPass.size() && "Expecting two registers after Pass64BitArgInRegs"
) ? static_cast<void> (0) : __assert_fail ("2 == RegsToPass.size() && \"Expecting two registers after Pass64BitArgInRegs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2578, __PRETTY_FUNCTION__))
;
2579
2580 // Add the second register to the CalleeSaveDisableRegs list.
2581 if (ShouldDisableCalleeSavedRegister)
2582 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2583 } else {
2584 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2585 }
2586
2587 // Add nodes to the DAG and add the values into the RetOps list
2588 for (auto &Reg : RegsToPass) {
2589 Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag);
2590 Flag = Chain.getValue(1);
2591 RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2592 }
2593 }
2594
2595 // Swift calling convention does not require we copy the sret argument
2596 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2597
2598 // All x86 ABIs require that for returning structs by value we copy
2599 // the sret argument into %rax/%eax (depending on ABI) for the return.
2600 // We saved the argument into a virtual register in the entry block,
2601 // so now we copy the value out and into %rax/%eax.
2602 //
2603 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2604 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2605 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2606 // either case FuncInfo->setSRetReturnReg() will have been called.
2607 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2608 // When we have both sret and another return value, we should use the
2609 // original Chain stored in RetOps[0], instead of the current Chain updated
2610 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2611
2612 // For the case of sret and another return value, we have
2613 // Chain_0 at the function entry
2614 // Chain_1 = getCopyToReg(Chain_0) in the above loop
2615 // If we use Chain_1 in getCopyFromReg, we will have
2616 // Val = getCopyFromReg(Chain_1)
2617 // Chain_2 = getCopyToReg(Chain_1, Val) from below
2618
2619 // getCopyToReg(Chain_0) will be glued together with
2620 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2621 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2622 // Data dependency from Unit B to Unit A due to usage of Val in
2623 // getCopyToReg(Chain_1, Val)
2624 // Chain dependency from Unit A to Unit B
2625
2626 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2627 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2628 getPointerTy(MF.getDataLayout()));
2629
2630 unsigned RetValReg
2631 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2632 X86::RAX : X86::EAX;
2633 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2634 Flag = Chain.getValue(1);
2635
2636 // RAX/EAX now acts like a return value.
2637 RetOps.push_back(
2638 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2639
2640 // Add the returned register to the CalleeSaveDisableRegs list.
2641 if (ShouldDisableCalleeSavedRegister)
2642 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2643 }
2644
2645 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2646 const MCPhysReg *I =
2647 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2648 if (I) {
2649 for (; *I; ++I) {
2650 if (X86::GR64RegClass.contains(*I))
2651 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2652 else
2653 llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2653)
;
2654 }
2655 }
2656
2657 RetOps[0] = Chain; // Update chain.
2658
2659 // Add the flag if we have it.
2660 if (Flag.getNode())
2661 RetOps.push_back(Flag);
2662
2663 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2664 if (CallConv == CallingConv::X86_INTR)
2665 opcode = X86ISD::IRET;
2666 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2667}
2668
2669bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2670 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2671 return false;
2672
2673 SDValue TCChain = Chain;
2674 SDNode *Copy = *N->use_begin();
2675 if (Copy->getOpcode() == ISD::CopyToReg) {
2676 // If the copy has a glue operand, we conservatively assume it isn't safe to
2677 // perform a tail call.
2678 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2679 return false;
2680 TCChain = Copy->getOperand(0);
2681 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2682 return false;
2683
2684 bool HasRet = false;
2685 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2686 UI != UE; ++UI) {
2687 if (UI->getOpcode() != X86ISD::RET_FLAG)
2688 return false;
2689 // If we are returning more than one value, we can definitely
2690 // not make a tail call see PR19530
2691 if (UI->getNumOperands() > 4)
2692 return false;
2693 if (UI->getNumOperands() == 4 &&
2694 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2695 return false;
2696 HasRet = true;
2697 }
2698
2699 if (!HasRet)
2700 return false;
2701
2702 Chain = TCChain;
2703 return true;
2704}
2705
2706EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2707 ISD::NodeType ExtendKind) const {
2708 MVT ReturnMVT = MVT::i32;
2709
2710 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2711 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2712 // The ABI does not require i1, i8 or i16 to be extended.
2713 //
2714 // On Darwin, there is code in the wild relying on Clang's old behaviour of
2715 // always extending i8/i16 return values, so keep doing that for now.
2716 // (PR26665).
2717 ReturnMVT = MVT::i8;
2718 }
2719
2720 EVT MinVT = getRegisterType(Context, ReturnMVT);
2721 return VT.bitsLT(MinVT) ? MinVT : VT;
2722}
2723
2724/// Reads two 32 bit registers and creates a 64 bit mask value.
2725/// \param VA The current 32 bit value that need to be assigned.
2726/// \param NextVA The next 32 bit value that need to be assigned.
2727/// \param Root The parent DAG node.
2728/// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2729/// glue purposes. In the case the DAG is already using
2730/// physical register instead of virtual, we should glue
2731/// our new SDValue to InFlag SDvalue.
2732/// \return a new SDvalue of size 64bit.
2733static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2734 SDValue &Root, SelectionDAG &DAG,
2735 const SDLoc &Dl, const X86Subtarget &Subtarget,
2736 SDValue *InFlag = nullptr) {
2737 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!")(((Subtarget.hasBWI()) && "Expected AVX512BW target!"
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasBWI()) && \"Expected AVX512BW target!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2737, __PRETTY_FUNCTION__))
;
2738 assert(Subtarget.is32Bit() && "Expecting 32 bit target")((Subtarget.is32Bit() && "Expecting 32 bit target") ?
static_cast<void> (0) : __assert_fail ("Subtarget.is32Bit() && \"Expecting 32 bit target\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2738, __PRETTY_FUNCTION__))
;
2739 assert(VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Expecting first location of 64 bit width type"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Expecting first location of 64 bit width type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2740, __PRETTY_FUNCTION__))
2740 "Expecting first location of 64 bit width type")((VA.getValVT() == MVT::v64i1 && "Expecting first location of 64 bit width type"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Expecting first location of 64 bit width type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2740, __PRETTY_FUNCTION__))
;
2741 assert(NextVA.getValVT() == VA.getValVT() &&((NextVA.getValVT() == VA.getValVT() && "The locations should have the same type"
) ? static_cast<void> (0) : __assert_fail ("NextVA.getValVT() == VA.getValVT() && \"The locations should have the same type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2742, __PRETTY_FUNCTION__))
2742 "The locations should have the same type")((NextVA.getValVT() == VA.getValVT() && "The locations should have the same type"
) ? static_cast<void> (0) : __assert_fail ("NextVA.getValVT() == VA.getValVT() && \"The locations should have the same type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2742, __PRETTY_FUNCTION__))
;
2743 assert(VA.isRegLoc() && NextVA.isRegLoc() &&((VA.isRegLoc() && NextVA.isRegLoc() && "The values should reside in two registers"
) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && NextVA.isRegLoc() && \"The values should reside in two registers\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2744, __PRETTY_FUNCTION__))
2744 "The values should reside in two registers")((VA.isRegLoc() && NextVA.isRegLoc() && "The values should reside in two registers"
) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && NextVA.isRegLoc() && \"The values should reside in two registers\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2744, __PRETTY_FUNCTION__))
;
2745
2746 SDValue Lo, Hi;
2747 SDValue ArgValueLo, ArgValueHi;
2748
2749 MachineFunction &MF = DAG.getMachineFunction();
2750 const TargetRegisterClass *RC = &X86::GR32RegClass;
2751
2752 // Read a 32 bit value from the registers.
2753 if (nullptr == InFlag) {
2754 // When no physical register is present,
2755 // create an intermediate virtual register.
2756 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2757 ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2758 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2759 ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2760 } else {
2761 // When a physical register is available read the value from it and glue
2762 // the reads together.
2763 ArgValueLo =
2764 DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2765 *InFlag = ArgValueLo.getValue(2);
2766 ArgValueHi =
2767 DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2768 *InFlag = ArgValueHi.getValue(2);
2769 }
2770
2771 // Convert the i32 type into v32i1 type.
2772 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2773
2774 // Convert the i32 type into v32i1 type.
2775 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2776
2777 // Concatenate the two values together.
2778 return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2779}
2780
2781/// The function will lower a register of various sizes (8/16/32/64)
2782/// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2783/// \returns a DAG node contains the operand after lowering to mask type.
2784static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2785 const EVT &ValLoc, const SDLoc &Dl,
2786 SelectionDAG &DAG) {
2787 SDValue ValReturned = ValArg;
2788
2789 if (ValVT == MVT::v1i1)
2790 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2791
2792 if (ValVT == MVT::v64i1) {
2793 // In 32 bit machine, this case is handled by getv64i1Argument
2794 assert(ValLoc == MVT::i64 && "Expecting only i64 locations")((ValLoc == MVT::i64 && "Expecting only i64 locations"
) ? static_cast<void> (0) : __assert_fail ("ValLoc == MVT::i64 && \"Expecting only i64 locations\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2794, __PRETTY_FUNCTION__))
;
2795 // In 64 bit machine, There is no need to truncate the value only bitcast
2796 } else {
2797 MVT maskLen;
2798 switch (ValVT.getSimpleVT().SimpleTy) {
2799 case MVT::v8i1:
2800 maskLen = MVT::i8;
2801 break;
2802 case MVT::v16i1:
2803 maskLen = MVT::i16;
2804 break;
2805 case MVT::v32i1:
2806 maskLen = MVT::i32;
2807 break;
2808 default:
2809 llvm_unreachable("Expecting a vector of i1 types")::llvm::llvm_unreachable_internal("Expecting a vector of i1 types"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2809)
;
2810 }
2811
2812 ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2813 }
2814 return DAG.getBitcast(ValVT, ValReturned);
2815}
2816
2817/// Lower the result values of a call into the
2818/// appropriate copies out of appropriate physical registers.
2819///
2820SDValue X86TargetLowering::LowerCallResult(
2821 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2822 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2823 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2824 uint32_t *RegMask) const {
2825
2826 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2827 // Assign locations to each value returned by this call.
2828 SmallVector<CCValAssign, 16> RVLocs;
2829 bool Is64Bit = Subtarget.is64Bit();
2830 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2831 *DAG.getContext());
2832 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2833
2834 // Copy all of the result registers out of their specified physreg.
2835 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
2836 ++I, ++InsIndex) {
2837 CCValAssign &VA = RVLocs[I];
2838 EVT CopyVT = VA.getLocVT();
2839
2840 // In some calling conventions we need to remove the used registers
2841 // from the register mask.
2842 if (RegMask) {
2843 for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
2844 SubRegs.isValid(); ++SubRegs)
2845 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
2846 }
2847
2848 // If this is x86-64, and we disabled SSE, we can't return FP values
2849 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) &&
2850 ((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) {
2851 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2852 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2853 } else if (CopyVT == MVT::f64 &&
2854 (Is64Bit && !Subtarget.hasSSE2())) {
2855 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2856 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2857 }
2858
2859 // If we prefer to use the value in xmm registers, copy it out as f80 and
2860 // use a truncate to move it from fp stack reg to xmm reg.
2861 bool RoundAfterCopy = false;
2862 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2863 isScalarFPTypeInSSEReg(VA.getValVT())) {
2864 if (!Subtarget.hasX87())
2865 report_fatal_error("X87 register return with X87 disabled");
2866 CopyVT = MVT::f80;
2867 RoundAfterCopy = (CopyVT != VA.getLocVT());
2868 }
2869
2870 SDValue Val;
2871 if (VA.needsCustom()) {
2872 assert(VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2873, __PRETTY_FUNCTION__))
2873 "Currently the only custom case is when we split v64i1 to 2 regs")((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 2873, __PRETTY_FUNCTION__))
;
2874 Val =
2875 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
2876 } else {
2877 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
2878 .getValue(1);
2879 Val = Chain.getValue(0);
2880 InFlag = Chain.getValue(2);
2881 }
2882
2883 if (RoundAfterCopy)
2884 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2885 // This truncation won't change the value.
2886 DAG.getIntPtrConstant(1, dl));
2887
2888 if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
2889 if (VA.getValVT().isVector() &&
2890 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
2891 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
2892 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
2893 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
2894 } else
2895 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
2896 }
2897
2898 InVals.push_back(Val);
2899 }
2900
2901 return Chain;
2902}
2903
2904//===----------------------------------------------------------------------===//
2905// C & StdCall & Fast Calling Convention implementation
2906//===----------------------------------------------------------------------===//
2907// StdCall calling convention seems to be standard for many Windows' API
2908// routines and around. It differs from C calling convention just a little:
2909// callee should clean up the stack, not caller. Symbols should be also
2910// decorated in some fancy way :) It doesn't support any vector arguments.
2911// For info on fast calling convention see Fast Calling Convention (tail call)
2912// implementation LowerX86_32FastCCCallTo.
2913
2914/// CallIsStructReturn - Determines whether a call uses struct return
2915/// semantics.
2916enum StructReturnType {
2917 NotStructReturn,
2918 RegStructReturn,
2919 StackStructReturn
2920};
2921static StructReturnType
2922callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
2923 if (Outs.empty())
2924 return NotStructReturn;
2925
2926 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2927 if (!Flags.isSRet())
2928 return NotStructReturn;
2929 if (Flags.isInReg() || IsMCU)
2930 return RegStructReturn;
2931 return StackStructReturn;
2932}
2933
2934/// Determines whether a function uses struct return semantics.
2935static StructReturnType
2936argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
2937 if (Ins.empty())
2938 return NotStructReturn;
2939
2940 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2941 if (!Flags.isSRet())
2942 return NotStructReturn;
2943 if (Flags.isInReg() || IsMCU)
2944 return RegStructReturn;
2945 return StackStructReturn;
2946}
2947
2948/// Make a copy of an aggregate at address specified by "Src" to address
2949/// "Dst" with size and alignment information specified by the specific
2950/// parameter attribute. The copy will be passed as a byval function parameter.
2951static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
2952 SDValue Chain, ISD::ArgFlagsTy Flags,
2953 SelectionDAG &DAG, const SDLoc &dl) {
2954 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
2955
2956 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2957 /*isVolatile*/false, /*AlwaysInline=*/true,
2958 /*isTailCall*/false,
2959 MachinePointerInfo(), MachinePointerInfo());
2960}
2961
2962/// Return true if the calling convention is one that we can guarantee TCO for.
2963static bool canGuaranteeTCO(CallingConv::ID CC) {
2964 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2965 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
2966 CC == CallingConv::HHVM || CC == CallingConv::Tail);
2967}
2968
2969/// Return true if we might ever do TCO for calls with this calling convention.
2970static bool mayTailCallThisCC(CallingConv::ID CC) {
2971 switch (CC) {
2972 // C calling conventions:
2973 case CallingConv::C:
2974 case CallingConv::Win64:
2975 case CallingConv::X86_64_SysV:
2976 // Callee pop conventions:
2977 case CallingConv::X86_ThisCall:
2978 case CallingConv::X86_StdCall:
2979 case CallingConv::X86_VectorCall:
2980 case CallingConv::X86_FastCall:
2981 // Swift:
2982 case CallingConv::Swift:
2983 return true;
2984 default:
2985 return canGuaranteeTCO(CC);
2986 }
2987}
2988
2989/// Return true if the function is being made into a tailcall target by
2990/// changing its ABI.
2991static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
2992 return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) || CC == CallingConv::Tail;
2993}
2994
2995bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2996 auto Attr =
2997 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2998 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2999 return false;
3000
3001 ImmutableCallSite CS(CI);
3002 CallingConv::ID CalleeCC = CS.getCallingConv();
3003 if (!mayTailCallThisCC(CalleeCC))
3004 return false;
3005
3006 return true;
3007}
3008
3009SDValue
3010X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3011 const SmallVectorImpl<ISD::InputArg> &Ins,
3012 const SDLoc &dl, SelectionDAG &DAG,
3013 const CCValAssign &VA,
3014 MachineFrameInfo &MFI, unsigned i) const {
3015 // Create the nodes corresponding to a load from this parameter slot.
3016 ISD::ArgFlagsTy Flags = Ins[i].Flags;
3017 bool AlwaysUseMutable = shouldGuaranteeTCO(
3018 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3019 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3020 EVT ValVT;
3021 MVT PtrVT = getPointerTy(DAG.getDataLayout());
3022
3023 // If value is passed by pointer we have address passed instead of the value
3024 // itself. No need to extend if the mask value and location share the same
3025 // absolute size.
3026 bool ExtendedInMem =
3027 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3028 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3029
3030 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3031 ValVT = VA.getLocVT();
3032 else
3033 ValVT = VA.getValVT();
3034
3035 // FIXME: For now, all byval parameter objects are marked mutable. This can be
3036 // changed with more analysis.
3037 // In case of tail call optimization mark all arguments mutable. Since they
3038 // could be overwritten by lowering of arguments in case of a tail call.
3039 if (Flags.isByVal()) {
3040 unsigned Bytes = Flags.getByValSize();
3041 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3042
3043 // FIXME: For now, all byval parameter objects are marked as aliasing. This
3044 // can be improved with deeper analysis.
3045 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3046 /*isAliased=*/true);
3047 return DAG.getFrameIndex(FI, PtrVT);
3048 }
3049
3050 // This is an argument in memory. We might be able to perform copy elision.
3051 // If the argument is passed directly in memory without any extension, then we
3052 // can perform copy elision. Large vector types, for example, may be passed
3053 // indirectly by pointer.
3054 if (Flags.isCopyElisionCandidate() &&
3055 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem) {
3056 EVT ArgVT = Ins[i].ArgVT;
3057 SDValue PartAddr;
3058 if (Ins[i].PartOffset == 0) {
3059 // If this is a one-part value or the first part of a multi-part value,
3060 // create a stack object for the entire argument value type and return a
3061 // load from our portion of it. This assumes that if the first part of an
3062 // argument is in memory, the rest will also be in memory.
3063 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3064 /*IsImmutable=*/false);
3065 PartAddr = DAG.getFrameIndex(FI, PtrVT);
3066 return DAG.getLoad(
3067 ValVT, dl, Chain, PartAddr,
3068 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3069 } else {
3070 // This is not the first piece of an argument in memory. See if there is
3071 // already a fixed stack object including this offset. If so, assume it
3072 // was created by the PartOffset == 0 branch above and create a load from
3073 // the appropriate offset into it.
3074 int64_t PartBegin = VA.getLocMemOffset();
3075 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3076 int FI = MFI.getObjectIndexBegin();
3077 for (; MFI.isFixedObjectIndex(FI); ++FI) {
3078 int64_t ObjBegin = MFI.getObjectOffset(FI);
3079 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3080 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3081 break;
3082 }
3083 if (MFI.isFixedObjectIndex(FI)) {
3084 SDValue Addr =
3085 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3086 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3087 return DAG.getLoad(
3088 ValVT, dl, Chain, Addr,
3089 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3090 Ins[i].PartOffset));
3091 }
3092 }
3093 }
3094
3095 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3096 VA.getLocMemOffset(), isImmutable);
3097
3098 // Set SExt or ZExt flag.
3099 if (VA.getLocInfo() == CCValAssign::ZExt) {
3100 MFI.setObjectZExt(FI, true);
3101 } else if (VA.getLocInfo() == CCValAssign::SExt) {
3102 MFI.setObjectSExt(FI, true);
3103 }
3104
3105 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3106 SDValue Val = DAG.getLoad(
3107 ValVT, dl, Chain, FIN,
3108 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3109 return ExtendedInMem
3110 ? (VA.getValVT().isVector()
3111 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3112 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3113 : Val;
3114}
3115
3116// FIXME: Get this from tablegen.
3117static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3118 const X86Subtarget &Subtarget) {
3119 assert(Subtarget.is64Bit())((Subtarget.is64Bit()) ? static_cast<void> (0) : __assert_fail
("Subtarget.is64Bit()", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3119, __PRETTY_FUNCTION__))
;
3120
3121 if (Subtarget.isCallingConvWin64(CallConv)) {
3122 static const MCPhysReg GPR64ArgRegsWin64[] = {
3123 X86::RCX, X86::RDX, X86::R8, X86::R9
3124 };
3125 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3126 }
3127
3128 static const MCPhysReg GPR64ArgRegs64Bit[] = {
3129 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3130 };
3131 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3132}
3133
3134// FIXME: Get this from tablegen.
3135static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3136 CallingConv::ID CallConv,
3137 const X86Subtarget &Subtarget) {
3138 assert(Subtarget.is64Bit())((Subtarget.is64Bit()) ? static_cast<void> (0) : __assert_fail
("Subtarget.is64Bit()", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3138, __PRETTY_FUNCTION__))
;
3139 if (Subtarget.isCallingConvWin64(CallConv)) {
3140 // The XMM registers which might contain var arg parameters are shadowed
3141 // in their paired GPR. So we only need to save the GPR to their home
3142 // slots.
3143 // TODO: __vectorcall will change this.
3144 return None;
3145 }
3146
3147 const Function &F = MF.getFunction();
3148 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
3149 bool isSoftFloat = Subtarget.useSoftFloat();
3150 assert(!(isSoftFloat && NoImplicitFloatOps) &&((!(isSoftFloat && NoImplicitFloatOps) && "SSE register cannot be used when SSE is disabled!"
) ? static_cast<void> (0) : __assert_fail ("!(isSoftFloat && NoImplicitFloatOps) && \"SSE register cannot be used when SSE is disabled!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3151, __PRETTY_FUNCTION__))
3151 "SSE register cannot be used when SSE is disabled!")((!(isSoftFloat && NoImplicitFloatOps) && "SSE register cannot be used when SSE is disabled!"
) ? static_cast<void> (0) : __assert_fail ("!(isSoftFloat && NoImplicitFloatOps) && \"SSE register cannot be used when SSE is disabled!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3151, __PRETTY_FUNCTION__))
;
3152 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
3153 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3154 // registers.
3155 return None;
3156
3157 static const MCPhysReg XMMArgRegs64Bit[] = {
3158 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3159 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3160 };
3161 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3162}
3163
3164#ifndef NDEBUG
3165static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3166 return std::is_sorted(ArgLocs.begin(), ArgLocs.end(),
3167 [](const CCValAssign &A, const CCValAssign &B) -> bool {
3168 return A.getValNo() < B.getValNo();
3169 });
3170}
3171#endif
3172
3173SDValue X86TargetLowering::LowerFormalArguments(
3174 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3175 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3176 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3177 MachineFunction &MF = DAG.getMachineFunction();
3178 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3179 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
3180
3181 const Function &F = MF.getFunction();
3182 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3183 F.getName() == "main")
3184 FuncInfo->setForceFramePointer(true);
3185
3186 MachineFrameInfo &MFI = MF.getFrameInfo();
3187 bool Is64Bit = Subtarget.is64Bit();
3188 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3189
3190 assert(((!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling conv' regcall, fastcc, ghc or hipe"
) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling conv' regcall, fastcc, ghc or hipe\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3192, __PRETTY_FUNCTION__))
3191 !(isVarArg && canGuaranteeTCO(CallConv)) &&((!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling conv' regcall, fastcc, ghc or hipe"
) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling conv' regcall, fastcc, ghc or hipe\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3192, __PRETTY_FUNCTION__))
3192 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe")((!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling conv' regcall, fastcc, ghc or hipe"
) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling conv' regcall, fastcc, ghc or hipe\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3192, __PRETTY_FUNCTION__))
;
3193
3194 // Assign locations to all of the incoming arguments.
3195 SmallVector<CCValAssign, 16> ArgLocs;
3196 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3197
3198 // Allocate shadow area for Win64.
3199 if (IsWin64)
3200 CCInfo.AllocateStack(32, 8);
3201
3202 CCInfo.AnalyzeArguments(Ins, CC_X86);
3203
3204 // In vectorcall calling convention a second pass is required for the HVA
3205 // types.
3206 if (CallingConv::X86_VectorCall == CallConv) {
3207 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
3208 }
3209
3210 // The next loop assumes that the locations are in the same order of the
3211 // input arguments.
3212 assert(isSortedByValueNo(ArgLocs) &&((isSortedByValueNo(ArgLocs) && "Argument Location list must be sorted before lowering"
) ? static_cast<void> (0) : __assert_fail ("isSortedByValueNo(ArgLocs) && \"Argument Location list must be sorted before lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3213, __PRETTY_FUNCTION__))
3213 "Argument Location list must be sorted before lowering")((isSortedByValueNo(ArgLocs) && "Argument Location list must be sorted before lowering"
) ? static_cast<void> (0) : __assert_fail ("isSortedByValueNo(ArgLocs) && \"Argument Location list must be sorted before lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3213, __PRETTY_FUNCTION__))
;
3214
3215 SDValue ArgValue;
3216 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
3217 ++I, ++InsIndex) {
3218 assert(InsIndex < Ins.size() && "Invalid Ins index")((InsIndex < Ins.size() && "Invalid Ins index") ? static_cast
<void> (0) : __assert_fail ("InsIndex < Ins.size() && \"Invalid Ins index\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3218, __PRETTY_FUNCTION__))
;
3219 CCValAssign &VA = ArgLocs[I];
3220
3221 if (VA.isRegLoc()) {
3222 EVT RegVT = VA.getLocVT();
3223 if (VA.needsCustom()) {
3224 assert(((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3226, __PRETTY_FUNCTION__))
3225 VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3226, __PRETTY_FUNCTION__))
3226 "Currently the only custom case is when we split v64i1 to 2 regs")((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3226, __PRETTY_FUNCTION__))
;
3227
3228 // v64i1 values, in regcall calling convention, that are
3229 // compiled to 32 bit arch, are split up into two registers.
3230 ArgValue =
3231 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
3232 } else {
3233 const TargetRegisterClass *RC;
3234 if (RegVT == MVT::i8)
3235 RC = &X86::GR8RegClass;
3236 else if (RegVT == MVT::i16)
3237 RC = &X86::GR16RegClass;
3238 else if (RegVT == MVT::i32)
3239 RC = &X86::GR32RegClass;
3240 else if (Is64Bit && RegVT == MVT::i64)
3241 RC = &X86::GR64RegClass;
3242 else if (RegVT == MVT::f32)
3243 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
3244 else if (RegVT == MVT::f64)
3245 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
3246 else if (RegVT == MVT::f80)
3247 RC = &X86::RFP80RegClass;
3248 else if (RegVT == MVT::f128)
3249 RC = &X86::VR128RegClass;
3250 else if (RegVT.is512BitVector())
3251 RC = &X86::VR512RegClass;
3252 else if (RegVT.is256BitVector())
3253 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
3254 else if (RegVT.is128BitVector())
3255 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
3256 else if (RegVT == MVT::x86mmx)
3257 RC = &X86::VR64RegClass;
3258 else if (RegVT == MVT::v1i1)
3259 RC = &X86::VK1RegClass;
3260 else if (RegVT == MVT::v8i1)
3261 RC = &X86::VK8RegClass;
3262 else if (RegVT == MVT::v16i1)
3263 RC = &X86::VK16RegClass;
3264 else if (RegVT == MVT::v32i1)
3265 RC = &X86::VK32RegClass;
3266 else if (RegVT == MVT::v64i1)
3267 RC = &X86::VK64RegClass;
3268 else
3269 llvm_unreachable("Unknown argument type!")::llvm::llvm_unreachable_internal("Unknown argument type!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3269)
;
3270
3271 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3272 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3273 }
3274
3275 // If this is an 8 or 16-bit value, it is really passed promoted to 32
3276 // bits. Insert an assert[sz]ext to capture this, then truncate to the
3277 // right size.
3278 if (VA.getLocInfo() == CCValAssign::SExt)
3279 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3280 DAG.getValueType(VA.getValVT()));
3281 else if (VA.getLocInfo() == CCValAssign::ZExt)
3282 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3283 DAG.getValueType(VA.getValVT()));
3284 else if (VA.getLocInfo() == CCValAssign::BCvt)
3285 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
3286
3287 if (VA.isExtInLoc()) {
3288 // Handle MMX values passed in XMM regs.
3289 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
3290 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
3291 else if (VA.getValVT().isVector() &&
3292 VA.getValVT().getScalarType() == MVT::i1 &&
3293 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3294 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3295 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3296 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
3297 } else
3298 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3299 }
3300 } else {
3301 assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail
("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3301, __PRETTY_FUNCTION__))
;
3302 ArgValue =
3303 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3304 }
3305
3306 // If value is passed via pointer - do a load.
3307 if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
3308 ArgValue =
3309 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3310
3311 InVals.push_back(ArgValue);
3312 }
3313
3314 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3315 // Swift calling convention does not require we copy the sret argument
3316 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3317 if (CallConv == CallingConv::Swift)
3318 continue;
3319
3320 // All x86 ABIs require that for returning structs by value we copy the
3321 // sret argument into %rax/%eax (depending on ABI) for the return. Save
3322 // the argument into a virtual register so that we can access it from the
3323 // return points.
3324 if (Ins[I].Flags.isSRet()) {
3325 unsigned Reg = FuncInfo->getSRetReturnReg();
3326 if (!Reg) {
3327 MVT PtrTy = getPointerTy(DAG.getDataLayout());
3328 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3329 FuncInfo->setSRetReturnReg(Reg);
3330 }
3331 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3332 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3333 break;
3334 }
3335 }
3336
3337 unsigned StackSize = CCInfo.getNextStackOffset();
3338 // Align stack specially for tail calls.
3339 if (shouldGuaranteeTCO(CallConv,
3340 MF.getTarget().Options.GuaranteedTailCallOpt))
3341 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3342
3343 // If the function takes variable number of arguments, make a frame index for
3344 // the start of the first vararg value... for expansion of llvm.va_start. We
3345 // can skip this if there are no va_start calls.
3346 if (MFI.hasVAStart() &&
3347 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
3348 CallConv != CallingConv::X86_ThisCall))) {
3349 FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true));
3350 }
3351
3352 // Figure out if XMM registers are in use.
3353 assert(!(Subtarget.useSoftFloat() &&((!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute
::NoImplicitFloat)) && "SSE register cannot be used when SSE is disabled!"
) ? static_cast<void> (0) : __assert_fail ("!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute::NoImplicitFloat)) && \"SSE register cannot be used when SSE is disabled!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3355, __PRETTY_FUNCTION__))
3354 F.hasFnAttribute(Attribute::NoImplicitFloat)) &&((!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute
::NoImplicitFloat)) && "SSE register cannot be used when SSE is disabled!"
) ? static_cast<void> (0) : __assert_fail ("!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute::NoImplicitFloat)) && \"SSE register cannot be used when SSE is disabled!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3355, __PRETTY_FUNCTION__))
3355 "SSE register cannot be used when SSE is disabled!")((!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute
::NoImplicitFloat)) && "SSE register cannot be used when SSE is disabled!"
) ? static_cast<void> (0) : __assert_fail ("!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute::NoImplicitFloat)) && \"SSE register cannot be used when SSE is disabled!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3355, __PRETTY_FUNCTION__))
;
3356
3357 // 64-bit calling conventions support varargs and register parameters, so we
3358 // have to do extra work to spill them in the prologue.
3359 if (Is64Bit && isVarArg && MFI.hasVAStart()) {
3360 // Find the first unallocated argument registers.
3361 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3362 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
3363 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3364 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3365 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&((!(NumXMMRegs && !Subtarget.hasSSE1()) && "SSE register cannot be used when SSE is disabled!"
) ? static_cast<void> (0) : __assert_fail ("!(NumXMMRegs && !Subtarget.hasSSE1()) && \"SSE register cannot be used when SSE is disabled!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3366, __PRETTY_FUNCTION__))
3366 "SSE register cannot be used when SSE is disabled!")((!(NumXMMRegs && !Subtarget.hasSSE1()) && "SSE register cannot be used when SSE is disabled!"
) ? static_cast<void> (0) : __assert_fail ("!(NumXMMRegs && !Subtarget.hasSSE1()) && \"SSE register cannot be used when SSE is disabled!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3366, __PRETTY_FUNCTION__))
;
3367
3368 // Gather all the live in physical registers.
3369 SmallVector<SDValue, 6> LiveGPRs;
3370 SmallVector<SDValue, 8> LiveXMMRegs;
3371 SDValue ALVal;
3372 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3373 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
3374 LiveGPRs.push_back(
3375 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
3376 }
3377 if (!ArgXMMs.empty()) {
3378 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3379 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
3380 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
3381 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
3382 LiveXMMRegs.push_back(
3383 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
3384 }
3385 }
3386
3387 if (IsWin64) {
3388 // Get to the caller-allocated home save location. Add 8 to account
3389 // for the return address.
3390 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
3391 FuncInfo->setRegSaveFrameIndex(
3392 MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3393 // Fixup to set vararg frame on shadow area (4 x i64).
3394 if (NumIntRegs < 4)
3395 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3396 } else {
3397 // For X86-64, if there are vararg parameters that are passed via
3398 // registers, then we must store them to their spots on the stack so
3399 // they may be loaded by dereferencing the result of va_next.
3400 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3401 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3402 FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject(
3403 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
3404 }
3405
3406 // Store the integer parameter registers.
3407 SmallVector<SDValue, 8> MemOps;
3408 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3409 getPointerTy(DAG.getDataLayout()));
3410 unsigned Offset = FuncInfo->getVarArgsGPOffset();
3411 for (SDValue Val : LiveGPRs) {
3412 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3413 RSFIN, DAG.getIntPtrConstant(Offset, dl));
3414 SDValue Store =
3415 DAG.getStore(Val.getValue(1), dl, Val, FIN,
3416 MachinePointerInfo::getFixedStack(
3417 DAG.getMachineFunction(),
3418 FuncInfo->getRegSaveFrameIndex(), Offset));
3419 MemOps.push_back(Store);
3420 Offset += 8;
3421 }
3422
3423 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
3424 // Now store the XMM (fp + vector) parameter registers.
3425 SmallVector<SDValue, 12> SaveXMMOps;
3426 SaveXMMOps.push_back(Chain);
3427 SaveXMMOps.push_back(ALVal);
3428 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3429 FuncInfo->getRegSaveFrameIndex(), dl));
3430 SaveXMMOps.push_back(DAG.getIntPtrConstant(
3431 FuncInfo->getVarArgsFPOffset(), dl));
3432 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3433 LiveXMMRegs.end());
3434 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
3435 MVT::Other, SaveXMMOps));
3436 }
3437
3438 if (!MemOps.empty())
3439 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3440 }
3441
3442 if (isVarArg && MFI.hasMustTailInVarArgFunc()) {
3443 // Find the largest legal vector type.
3444 MVT VecVT = MVT::Other;
3445 // FIXME: Only some x86_32 calling conventions support AVX512.
3446 if (Subtarget.useAVX512Regs() &&
3447 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
3448 CallConv == CallingConv::Intel_OCL_BI)))
3449 VecVT = MVT::v16f32;
3450 else if (Subtarget.hasAVX())
3451 VecVT = MVT::v8f32;
3452 else if (Subtarget.hasSSE2())
3453 VecVT = MVT::v4f32;
3454
3455 // We forward some GPRs and some vector types.
3456 SmallVector<MVT, 2> RegParmTypes;
3457 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
3458 RegParmTypes.push_back(IntVT);
3459 if (VecVT != MVT::Other)
3460 RegParmTypes.push_back(VecVT);
3461
3462 // Compute the set of forwarded registers. The rest are scratch.
3463 SmallVectorImpl<ForwardedRegister> &Forwards =
3464 FuncInfo->getForwardedMustTailRegParms();
3465 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3466
3467 // Conservatively forward AL on x86_64, since it might be used for varargs.
3468 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
3469 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
3470 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3471 }
3472
3473 // Copy all forwards from physical to virtual registers.
3474 for (ForwardedRegister &FR : Forwards) {
3475 // FIXME: Can we use a less constrained schedule?
3476 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, FR.VReg, FR.VT);
3477 FR.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(FR.VT));
3478 Chain = DAG.getCopyToReg(Chain, dl, FR.VReg, RegVal);
3479 }
3480 }
3481
3482 // Some CCs need callee pop.
3483 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3484 MF.getTarget().Options.GuaranteedTailCallOpt)) {
3485 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3486 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3487 // X86 interrupts must pop the error code (and the alignment padding) if
3488 // present.
3489 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3490 } else {
3491 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3492 // If this is an sret function, the return should pop the hidden pointer.
3493 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3494 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3495 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3496 FuncInfo->setBytesToPopOnReturn(4);
3497 }
3498
3499 if (!Is64Bit) {
3500 // RegSaveFrameIndex is X86-64 only.
3501 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3502 if (CallConv == CallingConv::X86_FastCall ||
3503 CallConv == CallingConv::X86_ThisCall)
3504 // fastcc functions can't have varargs.
3505 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3506 }
3507
3508 FuncInfo->setArgumentStackSize(StackSize);
3509
3510 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3511 EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
3512 if (Personality == EHPersonality::CoreCLR) {
3513 assert(Is64Bit)((Is64Bit) ? static_cast<void> (0) : __assert_fail ("Is64Bit"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3513, __PRETTY_FUNCTION__))
;
3514 // TODO: Add a mechanism to frame lowering that will allow us to indicate
3515 // that we'd prefer this slot be allocated towards the bottom of the frame
3516 // (i.e. near the stack pointer after allocating the frame). Every
3517 // funclet needs a copy of this slot in its (mostly empty) frame, and the
3518 // offset from the bottom of this and each funclet's frame must be the
3519 // same, so the size of funclets' (mostly empty) frames is dictated by
3520 // how far this slot is from the bottom (since they allocate just enough
3521 // space to accommodate holding this slot at the correct offset).
3522 int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false);
3523 EHInfo->PSPSymFrameIdx = PSPSymFI;
3524 }
3525 }
3526
3527 if (CallConv == CallingConv::X86_RegCall ||
3528 F.hasFnAttribute("no_caller_saved_registers")) {
3529 MachineRegisterInfo &MRI = MF.getRegInfo();
3530 for (std::pair<unsigned, unsigned> Pair : MRI.liveins())
3531 MRI.disableCalleeSavedRegister(Pair.first);
3532 }
3533
3534 return Chain;
3535}
3536
3537SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3538 SDValue Arg, const SDLoc &dl,
3539 SelectionDAG &DAG,
3540 const CCValAssign &VA,
3541 ISD::ArgFlagsTy Flags) const {
3542 unsigned LocMemOffset = VA.getLocMemOffset();
3543 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3544 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3545 StackPtr, PtrOff);
3546 if (Flags.isByVal())
3547 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3548
3549 return DAG.getStore(
3550 Chain, dl, Arg, PtrOff,
3551 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3552}
3553
3554/// Emit a load of return address if tail call
3555/// optimization is performed and it is required.
3556SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3557 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3558 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3559 // Adjust the Return address stack slot.
3560 EVT VT = getPointerTy(DAG.getDataLayout());
3561 OutRetAddr = getReturnAddressFrameIndex(DAG);
3562
3563 // Load the "old" Return address.
3564 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3565 return SDValue(OutRetAddr.getNode(), 1);
3566}
3567
3568/// Emit a store of the return address if tail call
3569/// optimization is performed and it is required (FPDiff!=0).
3570static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3571 SDValue Chain, SDValue RetAddrFrIdx,
3572 EVT PtrVT, unsigned SlotSize,
3573 int FPDiff, const SDLoc &dl) {
3574 // Store the return address to the appropriate stack slot.
3575 if (!FPDiff) return Chain;
3576 // Calculate the new stack slot for the return address.
3577 int NewReturnAddrFI =
3578 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3579 false);
3580 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3581 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3582 MachinePointerInfo::getFixedStack(
3583 DAG.getMachineFunction(), NewReturnAddrFI));
3584 return Chain;
3585}
3586
3587/// Returns a vector_shuffle mask for an movs{s|d}, movd
3588/// operation of specified width.
3589static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3590 SDValue V2) {
3591 unsigned NumElems = VT.getVectorNumElements();
3592 SmallVector<int, 8> Mask;
3593 Mask.push_back(NumElems);
3594 for (unsigned i = 1; i != NumElems; ++i)
3595 Mask.push_back(i);
3596 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3597}
3598
3599SDValue
3600X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3601 SmallVectorImpl<SDValue> &InVals) const {
3602 SelectionDAG &DAG = CLI.DAG;
3603 SDLoc &dl = CLI.DL;
3604 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3605 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3606 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3607 SDValue Chain = CLI.Chain;
3608 SDValue Callee = CLI.Callee;
3609 CallingConv::ID CallConv = CLI.CallConv;
3610 bool &isTailCall = CLI.IsTailCall;
3611 bool isVarArg = CLI.IsVarArg;
3612
3613 MachineFunction &MF = DAG.getMachineFunction();
3614 bool Is64Bit = Subtarget.is64Bit();
3615 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3616 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3617 bool IsSibcall = false;
3618 bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
3619 CallConv == CallingConv::Tail;
3620 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3621 auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls");
3622 const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction());
3623 const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3624 bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3625 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3626 const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction());
3627 bool HasNoCfCheck =
3628 (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
3629 const Module *M = MF.getMMI().getModule();
3630 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
3631
3632 MachineFunction::CallSiteInfo CSInfo;
3633
3634 if (CallConv == CallingConv::X86_INTR)
3635 report_fatal_error("X86 interrupts may not be called directly");
3636
3637 if (Attr.getValueAsString() == "true")
3638 isTailCall = false;
3639
3640 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO) {
3641 // If we are using a GOT, disable tail calls to external symbols with
3642 // default visibility. Tail calling such a symbol requires using a GOT
3643 // relocation, which forces early binding of the symbol. This breaks code
3644 // that require lazy function symbol resolution. Using musttail or
3645 // GuaranteedTailCallOpt will override this.
3646 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3647 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3648 G->getGlobal()->hasDefaultVisibility()))
3649 isTailCall = false;
3650 }
3651
3652 bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall();
3653 if (IsMustTail) {
3654 // Force this to be a tail call. The verifier rules are enough to ensure
3655 // that we can lower this successfully without moving the return address
3656 // around.
3657 isTailCall = true;
3658 } else if (isTailCall) {
3659 // Check if it's really possible to do a tail call.
3660 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3661 isVarArg, SR != NotStructReturn,
3662 MF.getFunction().hasStructRetAttr(), CLI.RetTy,
3663 Outs, OutVals, Ins, DAG);
3664
3665 // Sibcalls are automatically detected tailcalls which do not require
3666 // ABI changes.
3667 if (!IsGuaranteeTCO && isTailCall)
3668 IsSibcall = true;
3669
3670 if (isTailCall)
3671 ++NumTailCalls;
3672 }
3673
3674 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&((!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe"
) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling convention fastcc, ghc or hipe\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3675, __PRETTY_FUNCTION__))
3675 "Var args not supported with calling convention fastcc, ghc or hipe")((!(isVarArg && canGuaranteeTCO(CallConv)) &&
"Var args not supported with calling convention fastcc, ghc or hipe"
) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling convention fastcc, ghc or hipe\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3675, __PRETTY_FUNCTION__))
;
3676
3677 // Analyze operands of the call, assigning locations to each operand.
3678 SmallVector<CCValAssign, 16> ArgLocs;
3679 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3680
3681 // Allocate shadow area for Win64.
3682 if (IsWin64)
3683 CCInfo.AllocateStack(32, 8);
3684
3685 CCInfo.AnalyzeArguments(Outs, CC_X86);
3686
3687 // In vectorcall calling convention a second pass is required for the HVA
3688 // types.
3689 if (CallingConv::X86_VectorCall == CallConv) {
3690 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3691 }
3692
3693 // Get a count of how many bytes are to be pushed on the stack.
3694 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3695 if (IsSibcall)
3696 // This is a sibcall. The memory operands are available in caller's
3697 // own caller's stack.
3698 NumBytes = 0;
3699 else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
3700 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3701
3702 int FPDiff = 0;
3703 if (isTailCall && !IsSibcall && !IsMustTail) {
3704 // Lower arguments at fp - stackoffset + fpdiff.
3705 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3706
3707 FPDiff = NumBytesCallerPushed - NumBytes;
3708
3709 // Set the delta of movement of the returnaddr stackslot.
3710 // But only set if delta is greater than previous delta.
3711 if (FPDiff < X86Info->getTCReturnAddrDelta())
3712 X86Info->setTCReturnAddrDelta(FPDiff);
3713 }
3714
3715 unsigned NumBytesToPush = NumBytes;
3716 unsigned NumBytesToPop = NumBytes;
3717
3718 // If we have an inalloca argument, all stack space has already been allocated
3719 // for us and be right at the top of the stack. We don't support multiple
3720 // arguments passed in memory when using inalloca.
3721 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3722 NumBytesToPush = 0;
3723 if (!ArgLocs.back().isMemLoc())
3724 report_fatal_error("cannot use inalloca attribute on a register "
3725 "parameter");
3726 if (ArgLocs.back().getLocMemOffset() != 0)
3727 report_fatal_error("any parameter with the inalloca attribute must be "
3728 "the only memory argument");
3729 }
3730
3731 if (!IsSibcall)
3732 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3733 NumBytes - NumBytesToPush, dl);
3734
3735 SDValue RetAddrFrIdx;
3736 // Load return address for tail calls.
3737 if (isTailCall && FPDiff)
3738 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3739 Is64Bit, FPDiff, dl);
3740
3741 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
3742 SmallVector<SDValue, 8> MemOpChains;
3743 SDValue StackPtr;
3744
3745 // The next loop assumes that the locations are in the same order of the
3746 // input arguments.
3747 assert(isSortedByValueNo(ArgLocs) &&((isSortedByValueNo(ArgLocs) && "Argument Location list must be sorted before lowering"
) ? static_cast<void> (0) : __assert_fail ("isSortedByValueNo(ArgLocs) && \"Argument Location list must be sorted before lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3748, __PRETTY_FUNCTION__))
3748 "Argument Location list must be sorted before lowering")((isSortedByValueNo(ArgLocs) && "Argument Location list must be sorted before lowering"
) ? static_cast<void> (0) : __assert_fail ("isSortedByValueNo(ArgLocs) && \"Argument Location list must be sorted before lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3748, __PRETTY_FUNCTION__))
;
3749
3750 // Walk the register/memloc assignments, inserting copies/loads. In the case
3751 // of tail call optimization arguments are handle later.
3752 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3753 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
3754 ++I, ++OutIndex) {
3755 assert(OutIndex < Outs.size() && "Invalid Out index")((OutIndex < Outs.size() && "Invalid Out index") ?
static_cast<void> (0) : __assert_fail ("OutIndex < Outs.size() && \"Invalid Out index\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3755, __PRETTY_FUNCTION__))
;
3756 // Skip inalloca arguments, they have already been written.
3757 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
3758 if (Flags.isInAlloca())
3759 continue;
3760
3761 CCValAssign &VA = ArgLocs[I];
3762 EVT RegVT = VA.getLocVT();
3763 SDValue Arg = OutVals[OutIndex];
3764 bool isByVal = Flags.isByVal();
3765
3766 // Promote the value if needed.
3767 switch (VA.getLocInfo()) {
3768 default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3768)
;
3769 case CCValAssign::Full: break;
3770 case CCValAssign::SExt:
3771 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
3772 break;
3773 case CCValAssign::ZExt:
3774 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
3775 break;
3776 case CCValAssign::AExt:
3777 if (Arg.getValueType().isVector() &&
3778 Arg.getValueType().getVectorElementType() == MVT::i1)
3779 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
3780 else if (RegVT.is128BitVector()) {
3781 // Special case: passing MMX values in XMM registers.
3782 Arg = DAG.getBitcast(MVT::i64, Arg);
3783 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
3784 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
3785 } else
3786 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
3787 break;
3788 case CCValAssign::BCvt:
3789 Arg = DAG.getBitcast(RegVT, Arg);
3790 break;
3791 case CCValAssign::Indirect: {
3792 if (isByVal) {
3793 // Memcpy the argument to a temporary stack slot to prevent
3794 // the caller from seeing any modifications the callee may make
3795 // as guaranteed by the `byval` attribute.
3796 int FrameIdx = MF.getFrameInfo().CreateStackObject(
3797 Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()),
3798 false);
3799 SDValue StackSlot =
3800 DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
3801 Chain =
3802 CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
3803 // From now on treat this as a regular pointer
3804 Arg = StackSlot;
3805 isByVal = false;
3806 } else {
3807 // Store the argument.
3808 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
3809 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
3810 Chain = DAG.getStore(
3811 Chain, dl, Arg, SpillSlot,
3812 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3813 Arg = SpillSlot;
3814 }
3815 break;
3816 }
3817 }
3818
3819 if (VA.needsCustom()) {
3820 assert(VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3821, __PRETTY_FUNCTION__))
3821 "Currently the only custom case is when we split v64i1 to 2 regs")((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs"
) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3821, __PRETTY_FUNCTION__))
;
3822 // Split v64i1 value into two registers
3823 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
3824 } else if (VA.isRegLoc()) {
3825 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3826 const TargetOptions &Options = DAG.getTarget().Options;
3827 if (Options.EnableDebugEntryValues)
3828 CSInfo.emplace_back(VA.getLocReg(), I);
3829 if (isVarArg && IsWin64) {
3830 // Win64 ABI requires argument XMM reg to be copied to the corresponding
3831 // shadow reg if callee is a varargs function.
3832 unsigned ShadowReg = 0;
3833 switch (VA.getLocReg()) {
3834 case X86::XMM0: ShadowReg = X86::RCX; break;
3835 case X86::XMM1: ShadowReg = X86::RDX; break;
3836 case X86::XMM2: ShadowReg = X86::R8; break;
3837 case X86::XMM3: ShadowReg = X86::R9; break;
3838 }
3839 if (ShadowReg)
3840 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
3841 }
3842 } else if (!IsSibcall && (!isTailCall || isByVal)) {
3843 assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail
("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3843, __PRETTY_FUNCTION__))
;
3844 if (!StackPtr.getNode())
3845 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3846 getPointerTy(DAG.getDataLayout()));
3847 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
3848 dl, DAG, VA, Flags));
3849 }
3850 }
3851
3852 if (!MemOpChains.empty())
3853 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
3854
3855 if (Subtarget.isPICStyleGOT()) {
3856 // ELF / PIC requires GOT in the EBX register before function calls via PLT
3857 // GOT pointer.
3858 if (!isTailCall) {
3859 RegsToPass.push_back(std::make_pair(
3860 unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
3861 getPointerTy(DAG.getDataLayout()))));
3862 } else {
3863 // If we are tail calling and generating PIC/GOT style code load the
3864 // address of the callee into ECX. The value in ecx is used as target of
3865 // the tail jump. This is done to circumvent the ebx/callee-saved problem
3866 // for tail calls on PIC/GOT architectures. Normally we would just put the
3867 // address of GOT into ebx and then call target@PLT. But for tail calls
3868 // ebx would be restored (since ebx is callee saved) before jumping to the
3869 // target@PLT.
3870
3871 // Note: The actual moving to ECX is done further down.
3872 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3873 if (G && !G->getGlobal()->hasLocalLinkage() &&
3874 G->getGlobal()->hasDefaultVisibility())
3875 Callee = LowerGlobalAddress(Callee, DAG);
3876 else if (isa<ExternalSymbolSDNode>(Callee))
3877 Callee = LowerExternalSymbol(Callee, DAG);
3878 }
3879 }
3880
3881 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3882 // From AMD64 ABI document:
3883 // For calls that may call functions that use varargs or stdargs
3884 // (prototype-less calls or calls to functions containing ellipsis (...) in
3885 // the declaration) %al is used as hidden argument to specify the number
3886 // of SSE registers used. The contents of %al do not need to match exactly
3887 // the number of registers, but must be an ubound on the number of SSE
3888 // registers used and is in the range 0 - 8 inclusive.
3889
3890 // Count the number of XMM registers allocated.
3891 static const MCPhysReg XMMArgRegs[] = {
3892 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3893 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3894 };
3895 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
3896 assert((Subtarget.hasSSE1() || !NumXMMRegs)(((Subtarget.hasSSE1() || !NumXMMRegs) && "SSE registers cannot be used when SSE is disabled"
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasSSE1() || !NumXMMRegs) && \"SSE registers cannot be used when SSE is disabled\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3897, __PRETTY_FUNCTION__))
3897 && "SSE registers cannot be used when SSE is disabled")(((Subtarget.hasSSE1() || !NumXMMRegs) && "SSE registers cannot be used when SSE is disabled"
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasSSE1() || !NumXMMRegs) && \"SSE registers cannot be used when SSE is disabled\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3897, __PRETTY_FUNCTION__))
;
3898
3899 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3900 DAG.getConstant(NumXMMRegs, dl,
3901 MVT::i8)));
3902 }
3903
3904 if (isVarArg && IsMustTail) {
3905 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3906 for (const auto &F : Forwards) {
3907 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3908 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3909 }
3910 }
3911
3912 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3913 // don't need this because the eligibility check rejects calls that require
3914 // shuffling arguments passed in memory.
3915 if (!IsSibcall && isTailCall) {
3916 // Force all the incoming stack arguments to be loaded from the stack
3917 // before any new outgoing arguments are stored to the stack, because the
3918 // outgoing stack slots may alias the incoming argument stack slots, and
3919 // the alias isn't otherwise explicit. This is slightly more conservative
3920 // than necessary, because it means that each store effectively depends
3921 // on every argument instead of just those arguments it would clobber.
3922 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3923
3924 SmallVector<SDValue, 8> MemOpChains2;
3925 SDValue FIN;
3926 int FI = 0;
3927 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
3928 ++I, ++OutsIndex) {
3929 CCValAssign &VA = ArgLocs[I];
3930
3931 if (VA.isRegLoc()) {
3932 if (VA.needsCustom()) {
3933 assert((CallConv == CallingConv::X86_RegCall) &&(((CallConv == CallingConv::X86_RegCall) && "Expecting custom case only in regcall calling convention"
) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::X86_RegCall) && \"Expecting custom case only in regcall calling convention\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3934, __PRETTY_FUNCTION__))
3934 "Expecting custom case only in regcall calling convention")(((CallConv == CallingConv::X86_RegCall) && "Expecting custom case only in regcall calling convention"
) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::X86_RegCall) && \"Expecting custom case only in regcall calling convention\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3934, __PRETTY_FUNCTION__))
;
3935 // This means that we are in special case where one argument was
3936 // passed through two register locations - Skip the next location
3937 ++I;
3938 }
3939
3940 continue;
3941 }
3942
3943 assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail
("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3943, __PRETTY_FUNCTION__))
;
3944 SDValue Arg = OutVals[OutsIndex];
3945 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
3946 // Skip inalloca arguments. They don't require any work.
3947 if (Flags.isInAlloca())
3948 continue;
3949 // Create frame index.
3950 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3951 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3952 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
3953 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3954
3955 if (Flags.isByVal()) {
3956 // Copy relative to framepointer.
3957 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
3958 if (!StackPtr.getNode())
3959 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
3960 getPointerTy(DAG.getDataLayout()));
3961 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3962 StackPtr, Source);
3963
3964 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3965 ArgChain,
3966 Flags, DAG, dl));
3967 } else {
3968 // Store relative to framepointer.
3969 MemOpChains2.push_back(DAG.getStore(
3970 ArgChain, dl, Arg, FIN,
3971 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
3972 }
3973 }
3974
3975 if (!MemOpChains2.empty())
3976 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3977
3978 // Store the return address to the appropriate stack slot.
3979 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3980 getPointerTy(DAG.getDataLayout()),
3981 RegInfo->getSlotSize(), FPDiff, dl);
3982 }
3983
3984 // Build a sequence of copy-to-reg nodes chained together with token chain
3985 // and flag operands which copy the outgoing args into registers.
3986 SDValue InFlag;
3987 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3988 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3989 RegsToPass[i].second, InFlag);
3990 InFlag = Chain.getValue(1);
3991 }
3992
3993 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3994 assert(Is64Bit && "Large code model is only legal in 64-bit mode.")((Is64Bit && "Large code model is only legal in 64-bit mode."
) ? static_cast<void> (0) : __assert_fail ("Is64Bit && \"Large code model is only legal in 64-bit mode.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 3994, __PRETTY_FUNCTION__))
;
3995 // In the 64-bit large code model, we have to make all calls
3996 // through a register, since the call instruction's 32-bit
3997 // pc-relative offset may not be large enough to hold the whole
3998 // address.
3999 } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4000 Callee->getOpcode() == ISD::ExternalSymbol) {
4001 // Lower direct calls to global addresses and external symbols. Setting
4002 // ForCall to true here has the effect of removing WrapperRIP when possible
4003 // to allow direct calls to be selected without first materializing the
4004 // address into a register.
4005 Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4006 } else if (Subtarget.isTarget64BitILP32() &&
4007 Callee->getValueType(0) == MVT::i32) {
4008 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4009 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4010 }
4011
4012 // Returns a chain & a flag for retval copy to use.
4013 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4014 SmallVector<SDValue, 8> Ops;
4015
4016 if (!IsSibcall && isTailCall) {
4017 Chain = DAG.getCALLSEQ_END(Chain,
4018 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4019 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4020 InFlag = Chain.getValue(1);
4021 }
4022
4023 Ops.push_back(Chain);
4024 Ops.push_back(Callee);
4025
4026 if (isTailCall)
4027 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
4028
4029 // Add argument registers to the end of the list so that they are known live
4030 // into the call.
4031 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4032 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4033 RegsToPass[i].second.getValueType()));
4034
4035 // Add a register mask operand representing the call-preserved registers.
4036 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
4037 // set X86_INTR calling convention because it has the same CSR mask
4038 // (same preserved registers).
4039 const uint32_t *Mask = RegInfo->getCallPreservedMask(
4040 MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
4041 assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention"
) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 4041, __PRETTY_FUNCTION__))
;
4042
4043 // If this is an invoke in a 32-bit function using a funclet-based
4044 // personality, assume the function clobbers all registers. If an exception
4045 // is thrown, the runtime will not restore CSRs.
4046 // FIXME: Model this more precisely so that we can register allocate across
4047 // the normal edge and spill and fill across the exceptional edge.
4048 if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) {
4049 const Function &CallerFn = MF.getFunction();
4050 EHPersonality Pers =
4051 CallerFn.hasPersonalityFn()
4052 ? classifyEHPersonality(CallerFn.getPersonalityFn())
4053 : EHPersonality::Unknown;
4054 if (isFuncletEHPersonality(Pers))
4055 Mask = RegInfo->getNoPreservedMask();
4056 }
4057
4058 // Define a new register mask from the existing mask.
4059 uint32_t *RegMask = nullptr;
4060
4061 // In some calling conventions we need to remove the used physical registers
4062 // from the reg mask.
4063 if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4064 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4065
4066 // Allocate a new Reg Mask and copy Mask.
4067 RegMask = MF.allocateRegMask();
4068 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4069 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4070
4071 // Make sure all sub registers of the argument registers are reset
4072 // in the RegMask.
4073 for (auto const &RegPair : RegsToPass)
4074 for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4075 SubRegs.isValid(); ++SubRegs)
4076 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4077
4078 // Create the RegMask Operand according to our updated mask.
4079 Ops.push_back(DAG.getRegisterMask(RegMask));
4080 } else {
4081 // Create the RegMask Operand according to the static mask.
4082 Ops.push_back(DAG.getRegisterMask(Mask));
4083 }
4084
4085 if (InFlag.getNode())
4086 Ops.push_back(InFlag);
4087
4088 if (isTailCall) {
4089 // We used to do:
4090 //// If this is the first return lowered for this function, add the regs
4091 //// to the liveout set for the function.
4092 // This isn't right, although it's probably harmless on x86; liveouts
4093 // should be computed from returns not tail calls. Consider a void
4094 // function making a tail call to a function returning int.
4095 MF.getFrameInfo().setHasTailCall();
4096 SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4097 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4098 return Ret;
4099 }
4100
4101 if (HasNoCfCheck && IsCFProtectionSupported) {
4102 Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4103 } else {
4104 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4105 }
4106 InFlag = Chain.getValue(1);
4107 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4108
4109 // Save heapallocsite metadata.
4110 if (CLI.CS)
4111 if (MDNode *HeapAlloc = CLI.CS->getMetadata("heapallocsite"))
4112 DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4113
4114 // Create the CALLSEQ_END node.
4115 unsigned NumBytesForCalleeToPop;
4116 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4117 DAG.getTarget().Options.GuaranteedTailCallOpt))
4118 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
4119 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
4120 !Subtarget.getTargetTriple().isOSMSVCRT() &&
4121 SR == StackStructReturn)
4122 // If this is a call to a struct-return function, the callee
4123 // pops the hidden struct pointer, so we have to push it back.
4124 // This is common for Darwin/X86, Linux & Mingw32 targets.
4125 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
4126 NumBytesForCalleeToPop = 4;
4127 else
4128 NumBytesForCalleeToPop = 0; // Callee pops nothing.
4129
4130 if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) {
4131 // No need to reset the stack after the call if the call doesn't return. To
4132 // make the MI verify, we'll pretend the callee does it for us.
4133 NumBytesForCalleeToPop = NumBytes;
4134 }
4135
4136 // Returns a flag for retval copy to use.
4137 if (!IsSibcall) {
4138 Chain = DAG.getCALLSEQ_END(Chain,
4139 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4140 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4141 true),
4142 InFlag, dl);
4143 InFlag = Chain.getValue(1);
4144 }
4145
4146 // Handle result values, copying them out of physregs into vregs that we
4147 // return.
4148 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4149 InVals, RegMask);
4150}
4151
4152//===----------------------------------------------------------------------===//
4153// Fast Calling Convention (tail call) implementation
4154//===----------------------------------------------------------------------===//
4155
4156// Like std call, callee cleans arguments, convention except that ECX is
4157// reserved for storing the tail called function address. Only 2 registers are
4158// free for argument passing (inreg). Tail call optimization is performed
4159// provided:
4160// * tailcallopt is enabled
4161// * caller/callee are fastcc
4162// On X86_64 architecture with GOT-style position independent code only local
4163// (within module) calls are supported at the moment.
4164// To keep the stack aligned according to platform abi the function
4165// GetAlignedArgumentStackSize ensures that argument delta is always multiples
4166// of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
4167// If a tail called function callee has more arguments than the caller the
4168// caller needs to make sure that there is room to move the RETADDR to. This is
4169// achieved by reserving an area the size of the argument delta right after the
4170// original RETADDR, but before the saved framepointer or the spilled registers
4171// e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4172// stack layout:
4173// arg1
4174// arg2
4175// RETADDR
4176// [ new RETADDR
4177// move area ]
4178// (possible EBP)
4179// ESI
4180// EDI
4181// local1 ..
4182
4183/// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4184/// requirement.
4185unsigned
4186X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
4187 SelectionDAG& DAG) const {
4188 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4189 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
4190 unsigned StackAlignment = TFI.getStackAlignment();
4191 uint64_t AlignMask = StackAlignment - 1;
4192 int64_t Offset = StackSize;
4193 unsigned SlotSize = RegInfo->getSlotSize();
4194 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
4195 // Number smaller than 12 so just add the difference.
4196 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
4197 } else {
4198 // Mask out lower bits, add stackalignment once plus the 12 bytes.
4199 Offset = ((~AlignMask) & Offset) + StackAlignment +
4200 (StackAlignment-SlotSize);
4201 }
4202 return Offset;
4203}
4204
4205/// Return true if the given stack call argument is already available in the
4206/// same position (relatively) of the caller's incoming argument stack.
4207static
4208bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4209 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4210 const X86InstrInfo *TII, const CCValAssign &VA) {
4211 unsigned Bytes = Arg.getValueSizeInBits() / 8;
4212
4213 for (;;) {
4214 // Look through nodes that don't alter the bits of the incoming value.
4215 unsigned Op = Arg.getOpcode();
4216 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4217 Arg = Arg.getOperand(0);
4218 continue;
4219 }
4220 if (Op == ISD::TRUNCATE) {
4221 const SDValue &TruncInput = Arg.getOperand(0);
4222 if (TruncInput.getOpcode() == ISD::AssertZext &&
4223 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4224 Arg.getValueType()) {
4225 Arg = TruncInput.getOperand(0);
4226 continue;
4227 }
4228 }
4229 break;
4230 }
4231
4232 int FI = INT_MAX2147483647;
4233 if (Arg.getOpcode() == ISD::CopyFromReg) {
4234 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4235 if (!Register::isVirtualRegister(VR))
4236 return false;
4237 MachineInstr *Def = MRI->getVRegDef(VR);
4238 if (!Def)
4239 return false;
4240 if (!Flags.isByVal()) {
4241 if (!TII->isLoadFromStackSlot(*Def, FI))
4242 return false;
4243 } else {
4244 unsigned Opcode = Def->getOpcode();
4245 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4246 Opcode == X86::LEA64_32r) &&
4247 Def->getOperand(1).isFI()) {
4248 FI = Def->getOperand(1).getIndex();
4249 Bytes = Flags.getByValSize();
4250 } else
4251 return false;
4252 }
4253 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4254 if (Flags.isByVal())
4255 // ByVal argument is passed in as a pointer but it's now being
4256 // dereferenced. e.g.
4257 // define @foo(%struct.X* %A) {
4258 // tail call @bar(%struct.X* byval %A)
4259 // }
4260 return false;
4261 SDValue Ptr = Ld->getBasePtr();
4262 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4263 if (!FINode)
4264 return false;
4265 FI = FINode->getIndex();
4266 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4267 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4268 FI = FINode->getIndex();
4269 Bytes = Flags.getByValSize();
4270 } else
4271 return false;
4272
4273 assert(FI != INT_MAX)((FI != 2147483647) ? static_cast<void> (0) : __assert_fail
("FI != INT_MAX", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 4273, __PRETTY_FUNCTION__))
;
4274 if (!MFI.isFixedObjectIndex(FI))
4275 return false;
4276
4277 if (Offset != MFI.getObjectOffset(FI))
4278 return false;
4279
4280 // If this is not byval, check that the argument stack object is immutable.
4281 // inalloca and argument copy elision can create mutable argument stack
4282 // objects. Byval objects can be mutated, but a byval call intends to pass the
4283 // mutated memory.
4284 if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4285 return false;
4286
4287 if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
4288 // If the argument location is wider than the argument type, check that any
4289 // extension flags match.
4290 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4291 Flags.isSExt() != MFI.isObjectSExt(FI)) {
4292 return false;
4293 }
4294 }
4295
4296 return Bytes == MFI.getObjectSize(FI);
4297}
4298
4299/// Check whether the call is eligible for tail call optimization. Targets
4300/// that want to do tail call optimization should implement this function.
4301bool X86TargetLowering::IsEligibleForTailCallOptimization(
4302 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
4303 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
4304 const SmallVectorImpl<ISD::OutputArg> &Outs,
4305 const SmallVectorImpl<SDValue> &OutVals,
4306 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4307 if (!mayTailCallThisCC(CalleeCC))
4308 return false;
4309
4310 // If -tailcallopt is specified, make fastcc functions tail-callable.
4311 MachineFunction &MF = DAG.getMachineFunction();
4312 const Function &CallerF = MF.getFunction();
4313
4314 // If the function return type is x86_fp80 and the callee return type is not,
4315 // then the FP_EXTEND of the call result is not a nop. It's not safe to
4316 // perform a tailcall optimization here.
4317 if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4318 return false;
4319
4320 CallingConv::ID CallerCC = CallerF.getCallingConv();
4321 bool CCMatch = CallerCC == CalleeCC;
4322 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4323 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4324 bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
4325 CalleeCC == CallingConv::Tail;
4326
4327 // Win64 functions have extra shadow space for argument homing. Don't do the
4328 // sibcall if the caller and callee have mismatched expectations for this
4329 // space.
4330 if (IsCalleeWin64 != IsCallerWin64)
4331 return false;
4332
4333 if (IsGuaranteeTCO) {
4334 if (canGuaranteeTCO(CalleeCC) && CCMatch)
4335 return true;
4336 return false;
4337 }
4338
4339 // Look for obvious safe cases to perform tail call optimization that do not
4340 // require ABI changes. This is what gcc calls sibcall.
4341
4342 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4343 // emit a special epilogue.
4344 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4345 if (RegInfo->needsStackRealignment(MF))
4346 return false;
4347
4348 // Also avoid sibcall optimization if either caller or callee uses struct
4349 // return semantics.
4350 if (isCalleeStructRet || isCallerStructRet)
4351 return false;
4352
4353 // Do not sibcall optimize vararg calls unless all arguments are passed via
4354 // registers.
4355 LLVMContext &C = *DAG.getContext();
4356 if (isVarArg && !Outs.empty()) {
4357 // Optimizing for varargs on Win64 is unlikely to be safe without
4358 // additional testing.
4359 if (IsCalleeWin64 || IsCallerWin64)
4360 return false;
4361
4362 SmallVector<CCValAssign, 16> ArgLocs;
4363 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4364
4365 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4366 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4367 if (!ArgLocs[i].isRegLoc())
4368 return false;
4369 }
4370
4371 // If the call result is in ST0 / ST1, it needs to be popped off the x87
4372 // stack. Therefore, if it's not used by the call it is not safe to optimize
4373 // this into a sibcall.
4374 bool Unused = false;
4375 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4376 if (!Ins[i].Used) {
4377 Unused = true;
4378 break;
4379 }
4380 }
4381 if (Unused) {
4382 SmallVector<CCValAssign, 16> RVLocs;
4383 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4384 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4385 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4386 CCValAssign &VA = RVLocs[i];
4387 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4388 return false;
4389 }
4390 }
4391
4392 // Check that the call results are passed in the same way.
4393 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4394 RetCC_X86, RetCC_X86))
4395 return false;
4396 // The callee has to preserve all registers the caller needs to preserve.
4397 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4398 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4399 if (!CCMatch) {
4400 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4401 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4402 return false;
4403 }
4404
4405 unsigned StackArgsSize = 0;
4406
4407 // If the callee takes no arguments then go on to check the results of the
4408 // call.
4409 if (!Outs.empty()) {
4410 // Check if stack adjustment is needed. For now, do not do this if any
4411 // argument is passed on the stack.
4412 SmallVector<CCValAssign, 16> ArgLocs;
4413 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4414
4415 // Allocate shadow area for Win64
4416 if (IsCalleeWin64)
4417 CCInfo.AllocateStack(32, 8);
4418
4419 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4420 StackArgsSize = CCInfo.getNextStackOffset();
4421
4422 if (CCInfo.getNextStackOffset()) {
4423 // Check if the arguments are already laid out in the right way as
4424 // the caller's fixed stack objects.
4425 MachineFrameInfo &MFI = MF.getFrameInfo();
4426 const MachineRegisterInfo *MRI = &MF.getRegInfo();
4427 const X86InstrInfo *TII = Subtarget.getInstrInfo();
4428 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4429 CCValAssign &VA = ArgLocs[i];
4430 SDValue Arg = OutVals[i];
4431 ISD::ArgFlagsTy Flags = Outs[i].Flags;
4432 if (VA.getLocInfo() == CCValAssign::Indirect)
4433 return false;
4434 if (!VA.isRegLoc()) {
4435 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4436 MFI, MRI, TII, VA))
4437 return false;
4438 }
4439 }
4440 }
4441
4442 bool PositionIndependent = isPositionIndependent();
4443 // If the tailcall address may be in a register, then make sure it's
4444 // possible to register allocate for it. In 32-bit, the call address can
4445 // only target EAX, EDX, or ECX since the tail call must be scheduled after
4446 // callee-saved registers are restored. These happen to be the same
4447 // registers used to pass 'inreg' arguments so watch out for those.
4448 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4449 !isa<ExternalSymbolSDNode>(Callee)) ||
4450 PositionIndependent)) {
4451 unsigned NumInRegs = 0;
4452 // In PIC we need an extra register to formulate the address computation
4453 // for the callee.
4454 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4455
4456 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4457 CCValAssign &VA = ArgLocs[i];
4458 if (!VA.isRegLoc())
4459 continue;
4460 Register Reg = VA.getLocReg();
4461 switch (Reg) {
4462 default: break;
4463 case X86::EAX: case X86::EDX: case X86::ECX:
4464 if (++NumInRegs == MaxInRegs)
4465 return false;
4466 break;
4467 }
4468 }
4469 }
4470
4471 const MachineRegisterInfo &MRI = MF.getRegInfo();
4472 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4473 return false;
4474 }
4475
4476 bool CalleeWillPop =
4477 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4478 MF.getTarget().Options.GuaranteedTailCallOpt);
4479
4480 if (unsigned BytesToPop =
4481 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4482 // If we have bytes to pop, the callee must pop them.
4483 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4484 if (!CalleePopMatches)
4485 return false;
4486 } else if (CalleeWillPop && StackArgsSize > 0) {
4487 // If we don't have bytes to pop, make sure the callee doesn't pop any.
4488 return false;
4489 }
4490
4491 return true;
4492}
4493
4494FastISel *
4495X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4496 const TargetLibraryInfo *libInfo) const {
4497 return X86::createFastISel(funcInfo, libInfo);
4498}
4499
4500//===----------------------------------------------------------------------===//
4501// Other Lowering Hooks
4502//===----------------------------------------------------------------------===//
4503
4504static bool MayFoldLoad(SDValue Op) {
4505 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4506}
4507
4508static bool MayFoldIntoStore(SDValue Op) {
4509 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4510}
4511
4512static bool MayFoldIntoZeroExtend(SDValue Op) {
4513 if (Op.hasOneUse()) {
4514 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4515 return (ISD::ZERO_EXTEND == Opcode);
4516 }
4517 return false;
4518}
4519
4520static bool isTargetShuffle(unsigned Opcode) {
4521 switch(Opcode) {
4522 default: return false;
4523 case X86ISD::BLENDI:
4524 case X86ISD::PSHUFB:
4525 case X86ISD::PSHUFD:
4526 case X86ISD::PSHUFHW:
4527 case X86ISD::PSHUFLW:
4528 case X86ISD::SHUFP:
4529 case X86ISD::INSERTPS:
4530 case X86ISD::EXTRQI:
4531 case X86ISD::INSERTQI:
4532 case X86ISD::PALIGNR:
4533 case X86ISD::VSHLDQ:
4534 case X86ISD::VSRLDQ:
4535 case X86ISD::MOVLHPS:
4536 case X86ISD::MOVHLPS:
4537 case X86ISD::MOVSHDUP:
4538 case X86ISD::MOVSLDUP:
4539 case X86ISD::MOVDDUP:
4540 case X86ISD::MOVSS:
4541 case X86ISD::MOVSD:
4542 case X86ISD::UNPCKL:
4543 case X86ISD::UNPCKH:
4544 case X86ISD::VBROADCAST:
4545 case X86ISD::VPERMILPI:
4546 case X86ISD::VPERMILPV:
4547 case X86ISD::VPERM2X128:
4548 case X86ISD::SHUF128:
4549 case X86ISD::VPERMIL2:
4550 case X86ISD::VPERMI:
4551 case X86ISD::VPPERM:
4552 case X86ISD::VPERMV:
4553 case X86ISD::VPERMV3:
4554 case X86ISD::VZEXT_MOVL:
4555 return true;
4556 }
4557}
4558
4559static bool isTargetShuffleVariableMask(unsigned Opcode) {
4560 switch (Opcode) {
4561 default: return false;
4562 // Target Shuffles.
4563 case X86ISD::PSHUFB:
4564 case X86ISD::VPERMILPV:
4565 case X86ISD::VPERMIL2:
4566 case X86ISD::VPPERM:
4567 case X86ISD::VPERMV:
4568 case X86ISD::VPERMV3:
4569 return true;
4570 // 'Faux' Target Shuffles.
4571 case ISD::OR:
4572 case ISD::AND:
4573 case X86ISD::ANDNP:
4574 return true;
4575 }
4576}
4577
4578SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4579 MachineFunction &MF = DAG.getMachineFunction();
4580 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4581 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4582 int ReturnAddrIndex = FuncInfo->getRAIndex();
4583
4584 if (ReturnAddrIndex == 0) {
4585 // Set up a frame object for the return address.
4586 unsigned SlotSize = RegInfo->getSlotSize();
4587 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4588 -(int64_t)SlotSize,
4589 false);
4590 FuncInfo->setRAIndex(ReturnAddrIndex);
4591 }
4592
4593 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4594}
4595
4596bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4597 bool hasSymbolicDisplacement) {
4598 // Offset should fit into 32 bit immediate field.
4599 if (!isInt<32>(Offset))
4600 return false;
4601
4602 // If we don't have a symbolic displacement - we don't have any extra
4603 // restrictions.
4604 if (!hasSymbolicDisplacement)
4605 return true;
4606
4607 // FIXME: Some tweaks might be needed for medium code model.
4608 if (M != CodeModel::Small && M != CodeModel::Kernel)
4609 return false;
4610
4611 // For small code model we assume that latest object is 16MB before end of 31
4612 // bits boundary. We may also accept pretty large negative constants knowing
4613 // that all objects are in the positive half of address space.
4614 if (M == CodeModel::Small && Offset < 16*1024*1024)
4615 return true;
4616
4617 // For kernel code model we know that all object resist in the negative half
4618 // of 32bits address space. We may not accept negative offsets, since they may
4619 // be just off and we may accept pretty large positive ones.
4620 if (M == CodeModel::Kernel && Offset >= 0)
4621 return true;
4622
4623 return false;
4624}
4625
4626/// Determines whether the callee is required to pop its own arguments.
4627/// Callee pop is necessary to support tail calls.
4628bool X86::isCalleePop(CallingConv::ID CallingConv,
4629 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4630 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4631 // can guarantee TCO.
4632 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4633 return true;
4634
4635 switch (CallingConv) {
4636 default:
4637 return false;
4638 case CallingConv::X86_StdCall:
4639 case CallingConv::X86_FastCall:
4640 case CallingConv::X86_ThisCall:
4641 case CallingConv::X86_VectorCall:
4642 return !is64Bit;
4643 }
4644}
4645
4646/// Return true if the condition is an unsigned comparison operation.
4647static bool isX86CCUnsigned(unsigned X86CC) {
4648 switch (X86CC) {
4649 default:
4650 llvm_unreachable("Invalid integer condition!")::llvm::llvm_unreachable_internal("Invalid integer condition!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 4650)
;
4651 case X86::COND_E:
4652 case X86::COND_NE:
4653 case X86::COND_B:
4654 case X86::COND_A:
4655 case X86::COND_BE:
4656 case X86::COND_AE:
4657 return true;
4658 case X86::COND_G:
4659 case X86::COND_GE:
4660 case X86::COND_L:
4661 case X86::COND_LE:
4662 return false;
4663 }
4664}
4665
4666static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4667 switch (SetCCOpcode) {
4668 default: llvm_unreachable("Invalid integer condition!")::llvm::llvm_unreachable_internal("Invalid integer condition!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 4668)
;
4669 case ISD::SETEQ: return X86::COND_E;
4670 case ISD::SETGT: return X86::COND_G;
4671 case ISD::SETGE: return X86::COND_GE;
4672 case ISD::SETLT: return X86::COND_L;
4673 case ISD::SETLE: return X86::COND_LE;
4674 case ISD::SETNE: return X86::COND_NE;
4675 case ISD::SETULT: return X86::COND_B;
4676 case ISD::SETUGT: return X86::COND_A;
4677 case ISD::SETULE: return X86::COND_BE;
4678 case ISD::SETUGE: return X86::COND_AE;
4679 }
4680}
4681
4682/// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4683/// condition code, returning the condition code and the LHS/RHS of the
4684/// comparison to make.
4685static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4686 bool isFP, SDValue &LHS, SDValue &RHS,
4687 SelectionDAG &DAG) {
4688 if (!isFP) {
4689 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4690 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4691 // X > -1 -> X == 0, jump !sign.
4692 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4693 return X86::COND_NS;
4694 }
4695 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4696 // X < 0 -> X == 0, jump on sign.
4697 return X86::COND_S;
4698 }
4699 if (SetCCOpcode == ISD::SETGE && RHSC->isNullValue()) {
4700 // X >= 0 -> X == 0, jump on !sign.
4701 return X86::COND_NS;
4702 }
4703 if (SetCCOpcode == ISD::SETLT && RHSC->getAPIntValue() == 1) {
4704 // X < 1 -> X <= 0
4705 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4706 return X86::COND_LE;
4707 }
4708 }
4709
4710 return TranslateIntegerX86CC(SetCCOpcode);
4711 }
4712
4713 // First determine if it is required or is profitable to flip the operands.
4714
4715 // If LHS is a foldable load, but RHS is not, flip the condition.
4716 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4717 !ISD::isNON_EXTLoad(RHS.getNode())) {
4718 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4719 std::swap(LHS, RHS);
4720 }
4721
4722 switch (SetCCOpcode) {
4723 default: break;
4724 case ISD::SETOLT:
4725 case ISD::SETOLE:
4726 case ISD::SETUGT:
4727 case ISD::SETUGE:
4728 std::swap(LHS, RHS);
4729 break;
4730 }
4731
4732 // On a floating point condition, the flags are set as follows:
4733 // ZF PF CF op
4734 // 0 | 0 | 0 | X > Y
4735 // 0 | 0 | 1 | X < Y
4736 // 1 | 0 | 0 | X == Y
4737 // 1 | 1 | 1 | unordered
4738 switch (SetCCOpcode) {
4739 default: llvm_unreachable("Condcode should be pre-legalized away")::llvm::llvm_unreachable_internal("Condcode should be pre-legalized away"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 4739)
;
4740 case ISD::SETUEQ:
4741 case ISD::SETEQ: return X86::COND_E;
4742 case ISD::SETOLT: // flipped
4743 case ISD::SETOGT:
4744 case ISD::SETGT: return X86::COND_A;
4745 case ISD::SETOLE: // flipped
4746 case ISD::SETOGE:
4747 case ISD::SETGE: return X86::COND_AE;
4748 case ISD::SETUGT: // flipped
4749 case ISD::SETULT:
4750 case ISD::SETLT: return X86::COND_B;
4751 case ISD::SETUGE: // flipped
4752 case ISD::SETULE:
4753 case ISD::SETLE: return X86::COND_BE;
4754 case ISD::SETONE:
4755 case ISD::SETNE: return X86::COND_NE;
4756 case ISD::SETUO: return X86::COND_P;
4757 case ISD::SETO: return X86::COND_NP;
4758 case ISD::SETOEQ:
4759 case ISD::SETUNE: return X86::COND_INVALID;
4760 }
4761}
4762
4763/// Is there a floating point cmov for the specific X86 condition code?
4764/// Current x86 isa includes the following FP cmov instructions:
4765/// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
4766static bool hasFPCMov(unsigned X86CC) {
4767 switch (X86CC) {
4768 default:
4769 return false;
4770 case X86::COND_B:
4771 case X86::COND_BE:
4772 case X86::COND_E:
4773 case X86::COND_P:
4774 case X86::COND_A:
4775 case X86::COND_AE:
4776 case X86::COND_NE:
4777 case X86::COND_NP:
4778 return true;
4779 }
4780}
4781
4782
4783bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4784 const CallInst &I,
4785 MachineFunction &MF,
4786 unsigned Intrinsic) const {
4787
4788 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
4789 if (!IntrData)
4790 return false;
4791
4792 Info.flags = MachineMemOperand::MONone;
4793 Info.offset = 0;
4794
4795 switch (IntrData->Type) {
4796 case TRUNCATE_TO_MEM_VI8:
4797 case TRUNCATE_TO_MEM_VI16:
4798 case TRUNCATE_TO_MEM_VI32: {
4799 Info.opc = ISD::INTRINSIC_VOID;
4800 Info.ptrVal = I.getArgOperand(0);
4801 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
4802 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
4803 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
4804 ScalarVT = MVT::i8;
4805 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
4806 ScalarVT = MVT::i16;
4807 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
4808 ScalarVT = MVT::i32;
4809
4810 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
4811 Info.align = Align::None();
4812 Info.flags |= MachineMemOperand::MOStore;
4813 break;
4814 }
4815 case GATHER:
4816 case GATHER_AVX2: {
4817 Info.opc = ISD::INTRINSIC_W_CHAIN;
4818 Info.ptrVal = nullptr;
4819 MVT DataVT = MVT::getVT(I.getType());
4820 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4821 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4822 IndexVT.getVectorNumElements());
4823 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4824 Info.align = Align::None();
4825 Info.flags |= MachineMemOperand::MOLoad;
4826 break;
4827 }
4828 case SCATTER: {
4829 Info.opc = ISD::INTRINSIC_VOID;
4830 Info.ptrVal = nullptr;
4831 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
4832 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
4833 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
4834 IndexVT.getVectorNumElements());
4835 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
4836 Info.align = Align::None();
4837 Info.flags |= MachineMemOperand::MOStore;
4838 break;
4839 }
4840 default:
4841 return false;
4842 }
4843
4844 return true;
4845}
4846
4847/// Returns true if the target can instruction select the
4848/// specified FP immediate natively. If false, the legalizer will
4849/// materialize the FP immediate as a load from a constant pool.
4850bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4851 bool ForCodeSize) const {
4852 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
4853 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
4854 return true;
4855 }
4856 return false;
4857}
4858
4859bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
4860 ISD::LoadExtType ExtTy,
4861 EVT NewVT) const {
4862 assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow")((cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow"
) ? static_cast<void> (0) : __assert_fail ("cast<LoadSDNode>(Load)->isSimple() && \"illegal to narrow\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 4862, __PRETTY_FUNCTION__))
;
4863
4864 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
4865 // relocation target a movq or addq instruction: don't let the load shrink.
4866 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
4867 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
4868 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
4869 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
4870
4871 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
4872 // those uses are extracted directly into a store, then the extract + store
4873 // can be store-folded. Therefore, it's probably not worth splitting the load.
4874 EVT VT = Load->getValueType(0);
4875 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
4876 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
4877 // Skip uses of the chain value. Result 0 of the node is the load value.
4878 if (UI.getUse().getResNo() != 0)
4879 continue;
4880
4881 // If this use is not an extract + store, it's probably worth splitting.
4882 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
4883 UI->use_begin()->getOpcode() != ISD::STORE)
4884 return true;
4885 }
4886 // All non-chain uses are extract + store.
4887 return false;
4888 }
4889
4890 return true;
4891}
4892
4893/// Returns true if it is beneficial to convert a load of a constant
4894/// to just the constant itself.
4895bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
4896 Type *Ty) const {
4897 assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail
("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 4897, __PRETTY_FUNCTION__))
;
4898
4899 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4900 if (BitSize == 0 || BitSize > 64)
4901 return false;
4902 return true;
4903}
4904
4905bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
4906 // If we are using XMM registers in the ABI and the condition of the select is
4907 // a floating-point compare and we have blendv or conditional move, then it is
4908 // cheaper to select instead of doing a cross-register move and creating a
4909 // load that depends on the compare result.
4910 bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
4911 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
4912}
4913
4914bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
4915 // TODO: It might be a win to ease or lift this restriction, but the generic
4916 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
4917 if (VT.isVector() && Subtarget.hasAVX512())
4918 return false;
4919
4920 return true;
4921}
4922
4923bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
4924 SDValue C) const {
4925 // TODO: We handle scalars using custom code, but generic combining could make
4926 // that unnecessary.
4927 APInt MulC;
4928 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
4929 return false;
4930
4931 // Find the type this will be legalized too. Otherwise we might prematurely
4932 // convert this to shl+add/sub and then still have to type legalize those ops.
4933 // Another choice would be to defer the decision for illegal types until
4934 // after type legalization. But constant splat vectors of i64 can't make it
4935 // through type legalization on 32-bit targets so we would need to special
4936 // case vXi64.
4937 while (getTypeAction(Context, VT) != TypeLegal)
4938 VT = getTypeToTransformTo(Context, VT);
4939
4940 // If vector multiply is legal, assume that's faster than shl + add/sub.
4941 // TODO: Multiply is a complex op with higher latency and lower throughput in
4942 // most implementations, so this check could be loosened based on type
4943 // and/or a CPU attribute.
4944 if (isOperationLegal(ISD::MUL, VT))
4945 return false;
4946
4947 // shl+add, shl+sub, shl+add+neg
4948 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
4949 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
4950}
4951
4952bool X86TargetLowering::shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT,
4953 bool IsSigned) const {
4954 // f80 UINT_TO_FP is more efficient using Strict code if FCMOV is available.
4955 return !IsSigned && FpVT == MVT::f80 && Subtarget.hasCMov();
4956}
4957
4958bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
4959 unsigned Index) const {
4960 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
4961 return false;
4962
4963 // Mask vectors support all subregister combinations and operations that
4964 // extract half of vector.
4965 if (ResVT.getVectorElementType() == MVT::i1)
4966 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
4967 (Index == ResVT.getVectorNumElements()));
4968
4969 return (Index % ResVT.getVectorNumElements()) == 0;
4970}
4971
4972bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
4973 unsigned Opc = VecOp.getOpcode();
4974
4975 // Assume target opcodes can't be scalarized.
4976 // TODO - do we have any exceptions?
4977 if (Opc >= ISD::BUILTIN_OP_END)
4978 return false;
4979
4980 // If the vector op is not supported, try to convert to scalar.
4981 EVT VecVT = VecOp.getValueType();
4982 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
4983 return true;
4984
4985 // If the vector op is supported, but the scalar op is not, the transform may
4986 // not be worthwhile.
4987 EVT ScalarVT = VecVT.getScalarType();
4988 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
4989}
4990
4991bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT) const {
4992 // TODO: Allow vectors?
4993 if (VT.isVector())
4994 return false;
4995 return VT.isSimple() || !isOperationExpand(Opcode, VT);
4996}
4997
4998bool X86TargetLowering::isCheapToSpeculateCttz() const {
4999 // Speculate cttz only if we can directly use TZCNT.
5000 return Subtarget.hasBMI();
5001}
5002
5003bool X86TargetLowering::isCheapToSpeculateCtlz() const {
5004 // Speculate ctlz only if we can directly use LZCNT.
5005 return Subtarget.hasLZCNT();
5006}
5007
5008bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5009 const SelectionDAG &DAG,
5010 const MachineMemOperand &MMO) const {
5011 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5012 BitcastVT.getVectorElementType() == MVT::i1)
5013 return false;
5014
5015 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5016 return false;
5017
5018 // If both types are legal vectors, it's always ok to convert them.
5019 if (LoadVT.isVector() && BitcastVT.isVector() &&
5020 isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5021 return true;
5022
5023 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5024}
5025
5026bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5027 const SelectionDAG &DAG) const {
5028 // Do not merge to float value size (128 bytes) if no implicit
5029 // float attribute is set.
5030 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
5031 Attribute::NoImplicitFloat);
5032
5033 if (NoFloat) {
5034 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5035 return (MemVT.getSizeInBits() <= MaxIntSize);
5036 }
5037 // Make sure we don't merge greater than our preferred vector
5038 // width.
5039 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5040 return false;
5041 return true;
5042}
5043
5044bool X86TargetLowering::isCtlzFast() const {
5045 return Subtarget.hasFastLZCNT();
5046}
5047
5048bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
5049 const Instruction &AndI) const {
5050 return true;
5051}
5052
5053bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
5054 EVT VT = Y.getValueType();
5055
5056 if (VT.isVector())
5057 return false;
5058
5059 if (!Subtarget.hasBMI())
5060 return false;
5061
5062 // There are only 32-bit and 64-bit forms for 'andn'.
5063 if (VT != MVT::i32 && VT != MVT::i64)
5064 return false;
5065
5066 return !isa<ConstantSDNode>(Y);
5067}
5068
5069bool X86TargetLowering::hasAndNot(SDValue Y) const {
5070 EVT VT = Y.getValueType();
5071
5072 if (!VT.isVector())
5073 return hasAndNotCompare(Y);
5074
5075 // Vector.
5076
5077 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5078 return false;
5079
5080 if (VT == MVT::v4i32)
5081 return true;
5082
5083 return Subtarget.hasSSE2();
5084}
5085
5086bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
5087 return X.getValueType().isScalarInteger(); // 'bt'
5088}
5089
5090bool X86TargetLowering::
5091 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5092 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
5093 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
5094 SelectionDAG &DAG) const {
5095 // Does baseline recommend not to perform the fold by default?
5096 if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5097 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
5098 return false;
5099 // For scalars this transform is always beneficial.
5100 if (X.getValueType().isScalarInteger())
5101 return true;
5102 // If all the shift amounts are identical, then transform is beneficial even
5103 // with rudimentary SSE2 shifts.
5104 if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
5105 return true;
5106 // If we have AVX2 with it's powerful shift operations, then it's also good.
5107 if (Subtarget.hasAVX2())
5108 return true;
5109 // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
5110 return NewShiftOpcode == ISD::SHL;
5111}
5112
5113bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5114 const SDNode *N, CombineLevel Level) const {
5115 assert(((N->getOpcode() == ISD::SHL &&((((N->getOpcode() == ISD::SHL && N->getOperand
(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL
&& N->getOperand(0).getOpcode() == ISD::SHL)) &&
"Expected shift-shift mask") ? static_cast<void> (0) :
__assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5119, __PRETTY_FUNCTION__))
5116 N->getOperand(0).getOpcode() == ISD::SRL) ||((((N->getOpcode() == ISD::SHL && N->getOperand
(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL
&& N->getOperand(0).getOpcode() == ISD::SHL)) &&
"Expected shift-shift mask") ? static_cast<void> (0) :
__assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5119, __PRETTY_FUNCTION__))
5117 (N->getOpcode() == ISD::SRL &&((((N->getOpcode() == ISD::SHL && N->getOperand
(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL
&& N->getOperand(0).getOpcode() == ISD::SHL)) &&
"Expected shift-shift mask") ? static_cast<void> (0) :
__assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5119, __PRETTY_FUNCTION__))
5118 N->getOperand(0).getOpcode() == ISD::SHL)) &&((((N->getOpcode() == ISD::SHL && N->getOperand
(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL
&& N->getOperand(0).getOpcode() == ISD::SHL)) &&
"Expected shift-shift mask") ? static_cast<void> (0) :
__assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5119, __PRETTY_FUNCTION__))
5119 "Expected shift-shift mask")((((N->getOpcode() == ISD::SHL && N->getOperand
(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL
&& N->getOperand(0).getOpcode() == ISD::SHL)) &&
"Expected shift-shift mask") ? static_cast<void> (0) :
__assert_fail ("((N->getOpcode() == ISD::SHL && N->getOperand(0).getOpcode() == ISD::SRL) || (N->getOpcode() == ISD::SRL && N->getOperand(0).getOpcode() == ISD::SHL)) && \"Expected shift-shift mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5119, __PRETTY_FUNCTION__))
;
5120 EVT VT = N->getValueType(0);
5121 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5122 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5123 // Only fold if the shift values are equal - so it folds to AND.
5124 // TODO - we should fold if either is a non-uniform vector but we don't do
5125 // the fold for non-splats yet.
5126 return N->getOperand(1) == N->getOperand(0).getOperand(1);
5127 }
5128 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5129}
5130
5131bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5132 EVT VT = Y.getValueType();
5133
5134 // For vectors, we don't have a preference, but we probably want a mask.
5135 if (VT.isVector())
5136 return false;
5137
5138 // 64-bit shifts on 32-bit targets produce really bad bloated code.
5139 if (VT == MVT::i64 && !Subtarget.is64Bit())
5140 return false;
5141
5142 return true;
5143}
5144
5145bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
5146 SDNode *N) const {
5147 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
5148 !Subtarget.isOSWindows())
5149 return false;
5150 return true;
5151}
5152
5153bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5154 // Any legal vector type can be splatted more efficiently than
5155 // loading/spilling from memory.
5156 return isTypeLegal(VT);
5157}
5158
5159MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5160 MVT VT = MVT::getIntegerVT(NumBits);
5161 if (isTypeLegal(VT))
5162 return VT;
5163
5164 // PMOVMSKB can handle this.
5165 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5166 return MVT::v16i8;
5167
5168 // VPMOVMSKB can handle this.
5169 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5170 return MVT::v32i8;
5171
5172 // TODO: Allow 64-bit type for 32-bit target.
5173 // TODO: 512-bit types should be allowed, but make sure that those
5174 // cases are handled in combineVectorSizedSetCCEquality().
5175
5176 return MVT::INVALID_SIMPLE_VALUE_TYPE;
5177}
5178
5179/// Val is the undef sentinel value or equal to the specified value.
5180static bool isUndefOrEqual(int Val, int CmpVal) {
5181 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5182}
5183
5184/// Val is either the undef or zero sentinel value.
5185static bool isUndefOrZero(int Val) {
5186 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5187}
5188
5189/// Return true if every element in Mask, beginning from position Pos and ending
5190/// in Pos+Size is the undef sentinel value.
5191static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
5192 return llvm::all_of(Mask.slice(Pos, Size),
5193 [](int M) { return M == SM_SentinelUndef; });
5194}
5195
5196/// Return true if the mask creates a vector whose lower half is undefined.
5197static bool isUndefLowerHalf(ArrayRef<int> Mask) {
5198 unsigned NumElts = Mask.size();
5199 return isUndefInRange(Mask, 0, NumElts / 2);
5200}
5201
5202/// Return true if the mask creates a vector whose upper half is undefined.
5203static bool isUndefUpperHalf(ArrayRef<int> Mask) {
5204 unsigned NumElts = Mask.size();
5205 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
5206}
5207
5208/// Return true if Val falls within the specified range (L, H].
5209static bool isInRange(int Val, int Low, int Hi) {
5210 return (Val >= Low && Val < Hi);
5211}
5212
5213/// Return true if the value of any element in Mask falls within the specified
5214/// range (L, H].
5215static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
5216 return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
5217}
5218
5219/// Return true if Val is undef or if its value falls within the
5220/// specified range (L, H].
5221static bool isUndefOrInRange(int Val, int Low, int Hi) {
5222 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
5223}
5224
5225/// Return true if every element in Mask is undef or if its value
5226/// falls within the specified range (L, H].
5227static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5228 return llvm::all_of(
5229 Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
5230}
5231
5232/// Return true if Val is undef, zero or if its value falls within the
5233/// specified range (L, H].
5234static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
5235 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
5236}
5237
5238/// Return true if every element in Mask is undef, zero or if its value
5239/// falls within the specified range (L, H].
5240static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5241 return llvm::all_of(
5242 Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
5243}
5244
5245/// Return true if every element in Mask, beginning
5246/// from position Pos and ending in Pos + Size, falls within the specified
5247/// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
5248static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
5249 unsigned Size, int Low, int Step = 1) {
5250 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5251 if (!isUndefOrEqual(Mask[i], Low))
5252 return false;
5253 return true;
5254}
5255
5256/// Return true if every element in Mask, beginning
5257/// from position Pos and ending in Pos+Size, falls within the specified
5258/// sequential range (Low, Low+Size], or is undef or is zero.
5259static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5260 unsigned Size, int Low,
5261 int Step = 1) {
5262 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5263 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
5264 return false;
5265 return true;
5266}
5267
5268/// Return true if every element in Mask, beginning
5269/// from position Pos and ending in Pos+Size is undef or is zero.
5270static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5271 unsigned Size) {
5272 return llvm::all_of(Mask.slice(Pos, Size),
5273 [](int M) { return isUndefOrZero(M); });
5274}
5275
5276/// Helper function to test whether a shuffle mask could be
5277/// simplified by widening the elements being shuffled.
5278///
5279/// Appends the mask for wider elements in WidenedMask if valid. Otherwise
5280/// leaves it in an unspecified state.
5281///
5282/// NOTE: This must handle normal vector shuffle masks and *target* vector
5283/// shuffle masks. The latter have the special property of a '-2' representing
5284/// a zero-ed lane of a vector.
5285static bool canWidenShuffleElements(ArrayRef<int> Mask,
5286 SmallVectorImpl<int> &WidenedMask) {
5287 WidenedMask.assign(Mask.size() / 2, 0);
5288 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
5289 int M0 = Mask[i];
5290 int M1 = Mask[i + 1];
5291
5292 // If both elements are undef, its trivial.
5293 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
5294 WidenedMask[i / 2] = SM_SentinelUndef;
5295 continue;
5296 }
5297
5298 // Check for an undef mask and a mask value properly aligned to fit with
5299 // a pair of values. If we find such a case, use the non-undef mask's value.
5300 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
5301 WidenedMask[i / 2] = M1 / 2;
5302 continue;
5303 }
5304 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
5305 WidenedMask[i / 2] = M0 / 2;
5306 continue;
5307 }
5308
5309 // When zeroing, we need to spread the zeroing across both lanes to widen.
5310 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
5311 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
5312 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
5313 WidenedMask[i / 2] = SM_SentinelZero;
5314 continue;
5315 }
5316 return false;
5317 }
5318
5319 // Finally check if the two mask values are adjacent and aligned with
5320 // a pair.
5321 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
5322 WidenedMask[i / 2] = M0 / 2;
5323 continue;
5324 }
5325
5326 // Otherwise we can't safely widen the elements used in this shuffle.
5327 return false;
5328 }
5329 assert(WidenedMask.size() == Mask.size() / 2 &&((WidenedMask.size() == Mask.size() / 2 && "Incorrect size of mask after widening the elements!"
) ? static_cast<void> (0) : __assert_fail ("WidenedMask.size() == Mask.size() / 2 && \"Incorrect size of mask after widening the elements!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5330, __PRETTY_FUNCTION__))
5330 "Incorrect size of mask after widening the elements!")((WidenedMask.size() == Mask.size() / 2 && "Incorrect size of mask after widening the elements!"
) ? static_cast<void> (0) : __assert_fail ("WidenedMask.size() == Mask.size() / 2 && \"Incorrect size of mask after widening the elements!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5330, __PRETTY_FUNCTION__))
;
5331
5332 return true;
5333}
5334
5335static bool canWidenShuffleElements(ArrayRef<int> Mask,
5336 const APInt &Zeroable,
5337 SmallVectorImpl<int> &WidenedMask) {
5338 SmallVector<int, 32> TargetMask(Mask.begin(), Mask.end());
5339 for (int i = 0, Size = TargetMask.size(); i < Size; ++i) {
5340 if (TargetMask[i] == SM_SentinelUndef)
5341 continue;
5342 if (Zeroable[i])
5343 TargetMask[i] = SM_SentinelZero;
5344 }
5345 return canWidenShuffleElements(TargetMask, WidenedMask);
5346}
5347
5348static bool canWidenShuffleElements(ArrayRef<int> Mask) {
5349 SmallVector<int, 32> WidenedMask;
5350 return canWidenShuffleElements(Mask, WidenedMask);
5351}
5352
5353/// Returns true if Elt is a constant zero or a floating point constant +0.0.
5354bool X86::isZeroNode(SDValue Elt) {
5355 return isNullConstant(Elt) || isNullFPConstant(Elt);
5356}
5357
5358// Build a vector of constants.
5359// Use an UNDEF node if MaskElt == -1.
5360// Split 64-bit constants in the 32-bit mode.
5361static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
5362 const SDLoc &dl, bool IsMask = false) {
5363
5364 SmallVector<SDValue, 32> Ops;
5365 bool Split = false;
5366
5367 MVT ConstVecVT = VT;
5368 unsigned NumElts = VT.getVectorNumElements();
5369 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5370 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5371 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5372 Split = true;
5373 }
5374
5375 MVT EltVT = ConstVecVT.getVectorElementType();
5376 for (unsigned i = 0; i < NumElts; ++i) {
5377 bool IsUndef = Values[i] < 0 && IsMask;
5378 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
5379 DAG.getConstant(Values[i], dl, EltVT);
5380 Ops.push_back(OpNode);
5381 if (Split)
5382 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
5383 DAG.getConstant(0, dl, EltVT));
5384 }
5385 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5386 if (Split)
5387 ConstsNode = DAG.getBitcast(VT, ConstsNode);
5388 return ConstsNode;
5389}
5390
5391static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
5392 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5393 assert(Bits.size() == Undefs.getBitWidth() &&((Bits.size() == Undefs.getBitWidth() && "Unequal constant and undef arrays"
) ? static_cast<void> (0) : __assert_fail ("Bits.size() == Undefs.getBitWidth() && \"Unequal constant and undef arrays\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5394, __PRETTY_FUNCTION__))
5394 "Unequal constant and undef arrays")((Bits.size() == Undefs.getBitWidth() && "Unequal constant and undef arrays"
) ? static_cast<void> (0) : __assert_fail ("Bits.size() == Undefs.getBitWidth() && \"Unequal constant and undef arrays\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5394, __PRETTY_FUNCTION__))
;
5395 SmallVector<SDValue, 32> Ops;
5396 bool Split = false;
5397
5398 MVT ConstVecVT = VT;
5399 unsigned NumElts = VT.getVectorNumElements();
5400 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5401 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5402 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5403 Split = true;
5404 }
5405
5406 MVT EltVT = ConstVecVT.getVectorElementType();
5407 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
5408 if (Undefs[i]) {
5409 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
5410 continue;
5411 }
5412 const APInt &V = Bits[i];
5413 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes")((V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes"
) ? static_cast<void> (0) : __assert_fail ("V.getBitWidth() == VT.getScalarSizeInBits() && \"Unexpected sizes\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5413, __PRETTY_FUNCTION__))
;
5414 if (Split) {
5415 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
5416 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
5417 } else if (EltVT == MVT::f32) {
5418 APFloat FV(APFloat::IEEEsingle(), V);
5419 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5420 } else if (EltVT == MVT::f64) {
5421 APFloat FV(APFloat::IEEEdouble(), V);
5422 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5423 } else {
5424 Ops.push_back(DAG.getConstant(V, dl, EltVT));
5425 }
5426 }
5427
5428 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5429 return DAG.getBitcast(VT, ConstsNode);
5430}
5431
5432/// Returns a vector of specified type with all zero elements.
5433static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
5434 SelectionDAG &DAG, const SDLoc &dl) {
5435 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
() || VT.getVectorElementType() == MVT::i1) && "Unexpected vector type"
) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() || VT.getVectorElementType() == MVT::i1) && \"Unexpected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5437, __PRETTY_FUNCTION__))
5436 VT.getVectorElementType() == MVT::i1) &&(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
() || VT.getVectorElementType() == MVT::i1) && "Unexpected vector type"
) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() || VT.getVectorElementType() == MVT::i1) && \"Unexpected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5437, __PRETTY_FUNCTION__))
5437 "Unexpected vector type")(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
() || VT.getVectorElementType() == MVT::i1) && "Unexpected vector type"
) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() || VT.getVectorElementType() == MVT::i1) && \"Unexpected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5437, __PRETTY_FUNCTION__))
;
5438
5439 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
5440 // type. This ensures they get CSE'd. But if the integer type is not
5441 // available, use a floating-point +0.0 instead.
5442 SDValue Vec;
5443 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
5444 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
5445 } else if (VT.isFloatingPoint()) {
5446 Vec = DAG.getConstantFP(+0.0, dl, VT);
5447 } else if (VT.getVectorElementType() == MVT::i1) {
5448 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&(((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
"Unexpected vector type") ? static_cast<void> (0) : __assert_fail
("(Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) && \"Unexpected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5449, __PRETTY_FUNCTION__))
5449 "Unexpected vector type")(((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
"Unexpected vector type") ? static_cast<void> (0) : __assert_fail
("(Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) && \"Unexpected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5449, __PRETTY_FUNCTION__))
;
5450 Vec = DAG.getConstant(0, dl, VT);
5451 } else {
5452 unsigned Num32BitElts = VT.getSizeInBits() / 32;
5453 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
5454 }
5455 return DAG.getBitcast(VT, Vec);
5456}
5457
5458static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
5459 const SDLoc &dl, unsigned vectorWidth) {
5460 EVT VT = Vec.getValueType();
5461 EVT ElVT = VT.getVectorElementType();
5462 unsigned Factor = VT.getSizeInBits()/vectorWidth;
5463 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
5464 VT.getVectorNumElements()/Factor);
5465
5466 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
5467 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
5468 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2")((isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(ElemsPerChunk) && \"Elements per chunk not power of 2\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5468, __PRETTY_FUNCTION__))
;
5469
5470 // This is the index of the first element of the vectorWidth-bit chunk
5471 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5472 IdxVal &= ~(ElemsPerChunk - 1);
5473
5474 // If the input is a buildvector just emit a smaller one.
5475 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
5476 return DAG.getBuildVector(ResultVT, dl,
5477 Vec->ops().slice(IdxVal, ElemsPerChunk));
5478
5479 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5480 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
5481}
5482
5483/// Generate a DAG to grab 128-bits from a vector > 128 bits. This
5484/// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
5485/// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
5486/// instructions or a simple subregister reference. Idx is an index in the
5487/// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
5488/// lowering EXTRACT_VECTOR_ELT operations easier.
5489static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5490 SelectionDAG &DAG, const SDLoc &dl) {
5491 assert((Vec.getValueType().is256BitVector() ||(((Vec.getValueType().is256BitVector() || Vec.getValueType().
is512BitVector()) && "Unexpected vector size!") ? static_cast
<void> (0) : __assert_fail ("(Vec.getValueType().is256BitVector() || Vec.getValueType().is512BitVector()) && \"Unexpected vector size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5492, __PRETTY_FUNCTION__))
5492 Vec.getValueType().is512BitVector()) && "Unexpected vector size!")(((Vec.getValueType().is256BitVector() || Vec.getValueType().
is512BitVector()) && "Unexpected vector size!") ? static_cast
<void> (0) : __assert_fail ("(Vec.getValueType().is256BitVector() || Vec.getValueType().is512BitVector()) && \"Unexpected vector size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5492, __PRETTY_FUNCTION__))
;
5493 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5494}
5495
5496/// Generate a DAG to grab 256-bits from a 512-bit vector.
5497static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5498 SelectionDAG &DAG, const SDLoc &dl) {
5499 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!")((Vec.getValueType().is512BitVector() && "Unexpected vector size!"
) ? static_cast<void> (0) : __assert_fail ("Vec.getValueType().is512BitVector() && \"Unexpected vector size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5499, __PRETTY_FUNCTION__))
;
5500 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5501}
5502
5503static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5504 SelectionDAG &DAG, const SDLoc &dl,
5505 unsigned vectorWidth) {
5506 assert((vectorWidth == 128 || vectorWidth == 256) &&(((vectorWidth == 128 || vectorWidth == 256) && "Unsupported vector width"
) ? static_cast<void> (0) : __assert_fail ("(vectorWidth == 128 || vectorWidth == 256) && \"Unsupported vector width\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5507, __PRETTY_FUNCTION__))
5507 "Unsupported vector width")(((vectorWidth == 128 || vectorWidth == 256) && "Unsupported vector width"
) ? static_cast<void> (0) : __assert_fail ("(vectorWidth == 128 || vectorWidth == 256) && \"Unsupported vector width\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5507, __PRETTY_FUNCTION__))
;
5508 // Inserting UNDEF is Result
5509 if (Vec.isUndef())
5510 return Result;
5511 EVT VT = Vec.getValueType();
5512 EVT ElVT = VT.getVectorElementType();
5513 EVT ResultVT = Result.getValueType();
5514
5515 // Insert the relevant vectorWidth bits.
5516 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5517 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2")((isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(ElemsPerChunk) && \"Elements per chunk not power of 2\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5517, __PRETTY_FUNCTION__))
;
5518
5519 // This is the index of the first element of the vectorWidth-bit chunk
5520 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5521 IdxVal &= ~(ElemsPerChunk - 1);
5522
5523 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5524 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5525}
5526
5527/// Generate a DAG to put 128-bits into a vector > 128 bits. This
5528/// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5529/// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5530/// simple superregister reference. Idx is an index in the 128 bits
5531/// we want. It need not be aligned to a 128-bit boundary. That makes
5532/// lowering INSERT_VECTOR_ELT operations easier.
5533static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5534 SelectionDAG &DAG, const SDLoc &dl) {
5535 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!")((Vec.getValueType().is128BitVector() && "Unexpected vector size!"
) ? static_cast<void> (0) : __assert_fail ("Vec.getValueType().is128BitVector() && \"Unexpected vector size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5535, __PRETTY_FUNCTION__))
;
5536 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5537}
5538
5539/// Widen a vector to a larger size with the same scalar type, with the new
5540/// elements either zero or undef.
5541static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
5542 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5543 const SDLoc &dl) {
5544 assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&((Vec.getValueSizeInBits() < VT.getSizeInBits() &&
Vec.getValueType().getScalarType() == VT.getScalarType() &&
"Unsupported vector widening type") ? static_cast<void>
(0) : __assert_fail ("Vec.getValueSizeInBits() < VT.getSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && \"Unsupported vector widening type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5546, __PRETTY_FUNCTION__))
5545 Vec.getValueType().getScalarType() == VT.getScalarType() &&((Vec.getValueSizeInBits() < VT.getSizeInBits() &&
Vec.getValueType().getScalarType() == VT.getScalarType() &&
"Unsupported vector widening type") ? static_cast<void>
(0) : __assert_fail ("Vec.getValueSizeInBits() < VT.getSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && \"Unsupported vector widening type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5546, __PRETTY_FUNCTION__))
5546 "Unsupported vector widening type")((Vec.getValueSizeInBits() < VT.getSizeInBits() &&
Vec.getValueType().getScalarType() == VT.getScalarType() &&
"Unsupported vector widening type") ? static_cast<void>
(0) : __assert_fail ("Vec.getValueSizeInBits() < VT.getSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && \"Unsupported vector widening type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5546, __PRETTY_FUNCTION__))
;
5547 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
5548 : DAG.getUNDEF(VT);
5549 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
5550 DAG.getIntPtrConstant(0, dl));
5551}
5552
5553/// Widen a vector to a larger size with the same scalar type, with the new
5554/// elements either zero or undef.
5555static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5556 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5557 const SDLoc &dl, unsigned WideSizeInBits) {
5558 assert(Vec.getValueSizeInBits() < WideSizeInBits &&((Vec.getValueSizeInBits() < WideSizeInBits && (WideSizeInBits
% Vec.getScalarValueSizeInBits()) == 0 && "Unsupported vector widening type"
) ? static_cast<void> (0) : __assert_fail ("Vec.getValueSizeInBits() < WideSizeInBits && (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 && \"Unsupported vector widening type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5560, __PRETTY_FUNCTION__))
5559 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&((Vec.getValueSizeInBits() < WideSizeInBits && (WideSizeInBits
% Vec.getScalarValueSizeInBits()) == 0 && "Unsupported vector widening type"
) ? static_cast<void> (0) : __assert_fail ("Vec.getValueSizeInBits() < WideSizeInBits && (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 && \"Unsupported vector widening type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5560, __PRETTY_FUNCTION__))
5560 "Unsupported vector widening type")((Vec.getValueSizeInBits() < WideSizeInBits && (WideSizeInBits
% Vec.getScalarValueSizeInBits()) == 0 && "Unsupported vector widening type"
) ? static_cast<void> (0) : __assert_fail ("Vec.getValueSizeInBits() < WideSizeInBits && (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 && \"Unsupported vector widening type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5560, __PRETTY_FUNCTION__))
;
5561 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
5562 MVT SVT = Vec.getSimpleValueType().getScalarType();
5563 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
5564 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5565}
5566
5567// Helper function to collect subvector ops that are concated together,
5568// either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5569// The subvectors in Ops are guaranteed to be the same type.
5570static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
5571 assert(Ops.empty() && "Expected an empty ops vector")((Ops.empty() && "Expected an empty ops vector") ? static_cast
<void> (0) : __assert_fail ("Ops.empty() && \"Expected an empty ops vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5571, __PRETTY_FUNCTION__))
;
5572
5573 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
5574 Ops.append(N->op_begin(), N->op_end());
5575 return true;
5576 }
5577
5578 if (N->getOpcode() == ISD::INSERT_SUBVECTOR &&
5579 isa<ConstantSDNode>(N->getOperand(2))) {
5580 SDValue Src = N->getOperand(0);
5581 SDValue Sub = N->getOperand(1);
5582 const APInt &Idx = N->getConstantOperandAPInt(2);
5583 EVT VT = Src.getValueType();
5584 EVT SubVT = Sub.getValueType();
5585
5586 // TODO - Handle more general insert_subvector chains.
5587 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
5588 Idx == (VT.getVectorNumElements() / 2) &&
5589 Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
5590 Src.getOperand(1).getValueType() == SubVT &&
5591 isNullConstant(Src.getOperand(2))) {
5592 Ops.push_back(Src.getOperand(1));
5593 Ops.push_back(Sub);
5594 return true;
5595 }
5596 }
5597
5598 return false;
5599}
5600
5601// Helper for splitting operands of an operation to legal target size and
5602// apply a function on each part.
5603// Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
5604// 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
5605// deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
5606// The argument Builder is a function that will be applied on each split part:
5607// SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
5608template <typename F>
5609SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
5610 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
5611 F Builder, bool CheckBWI = true) {
5612 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2")((Subtarget.hasSSE2() && "Target assumed to support at least SSE2"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Target assumed to support at least SSE2\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5612, __PRETTY_FUNCTION__))
;
5613 unsigned NumSubs = 1;
5614 if ((CheckBWI && Subtarget.useBWIRegs()) ||
5615 (!CheckBWI && Subtarget.useAVX512Regs())) {
5616 if (VT.getSizeInBits() > 512) {
5617 NumSubs = VT.getSizeInBits() / 512;
5618 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size")(((VT.getSizeInBits() % 512) == 0 && "Illegal vector size"
) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % 512) == 0 && \"Illegal vector size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5618, __PRETTY_FUNCTION__))
;
5619 }
5620 } else if (Subtarget.hasAVX2()) {
5621 if (VT.getSizeInBits() > 256) {
5622 NumSubs = VT.getSizeInBits() / 256;
5623 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size")(((VT.getSizeInBits() % 256) == 0 && "Illegal vector size"
) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % 256) == 0 && \"Illegal vector size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5623, __PRETTY_FUNCTION__))
;
5624 }
5625 } else {
5626 if (VT.getSizeInBits() > 128) {
5627 NumSubs = VT.getSizeInBits() / 128;
5628 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size")(((VT.getSizeInBits() % 128) == 0 && "Illegal vector size"
) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % 128) == 0 && \"Illegal vector size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5628, __PRETTY_FUNCTION__))
;
5629 }
5630 }
5631
5632 if (NumSubs == 1)
5633 return Builder(DAG, DL, Ops);
5634
5635 SmallVector<SDValue, 4> Subs;
5636 for (unsigned i = 0; i != NumSubs; ++i) {
5637 SmallVector<SDValue, 2> SubOps;
5638 for (SDValue Op : Ops) {
5639 EVT OpVT = Op.getValueType();
5640 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
5641 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
5642 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
5643 }
5644 Subs.push_back(Builder(DAG, DL, SubOps));
5645 }
5646 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
5647}
5648
5649/// Insert i1-subvector to i1-vector.
5650static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
5651 const X86Subtarget &Subtarget) {
5652
5653 SDLoc dl(Op);
5654 SDValue Vec = Op.getOperand(0);
5655 SDValue SubVec = Op.getOperand(1);
5656 SDValue Idx = Op.getOperand(2);
5657
5658 if (!isa<ConstantSDNode>(Idx))
5659 return SDValue();
5660
5661 // Inserting undef is a nop. We can just return the original vector.
5662 if (SubVec.isUndef())
5663 return Vec;
5664
5665 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
5666 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
5667 return Op;
5668
5669 MVT OpVT = Op.getSimpleValueType();
5670 unsigned NumElems = OpVT.getVectorNumElements();
5671
5672 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
5673
5674 // Extend to natively supported kshift.
5675 MVT WideOpVT = OpVT;
5676 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
5677 WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
5678
5679 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
5680 // if necessary.
5681 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
5682 // May need to promote to a legal type.
5683 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5684 DAG.getConstant(0, dl, WideOpVT),
5685 SubVec, Idx);
5686 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5687 }
5688
5689 MVT SubVecVT = SubVec.getSimpleValueType();
5690 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
5691
5692 assert(IdxVal + SubVecNumElems <= NumElems &&((IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT
.getSizeInBits() == 0 && "Unexpected index value in INSERT_SUBVECTOR"
) ? static_cast<void> (0) : __assert_fail ("IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT.getSizeInBits() == 0 && \"Unexpected index value in INSERT_SUBVECTOR\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5694, __PRETTY_FUNCTION__))
5693 IdxVal % SubVecVT.getSizeInBits() == 0 &&((IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT
.getSizeInBits() == 0 && "Unexpected index value in INSERT_SUBVECTOR"
) ? static_cast<void> (0) : __assert_fail ("IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT.getSizeInBits() == 0 && \"Unexpected index value in INSERT_SUBVECTOR\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5694, __PRETTY_FUNCTION__))
5694 "Unexpected index value in INSERT_SUBVECTOR")((IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT
.getSizeInBits() == 0 && "Unexpected index value in INSERT_SUBVECTOR"
) ? static_cast<void> (0) : __assert_fail ("IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT.getSizeInBits() == 0 && \"Unexpected index value in INSERT_SUBVECTOR\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5694, __PRETTY_FUNCTION__))
;
5695
5696 SDValue Undef = DAG.getUNDEF(WideOpVT);
5697
5698 if (IdxVal == 0) {
5699 // Zero lower bits of the Vec
5700 SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
5701 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
5702 ZeroIdx);
5703 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5704 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5705 // Merge them together, SubVec should be zero extended.
5706 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5707 DAG.getConstant(0, dl, WideOpVT),
5708 SubVec, ZeroIdx);
5709 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5710 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5711 }
5712
5713 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5714 Undef, SubVec, ZeroIdx);
5715
5716 if (Vec.isUndef()) {
5717 assert(IdxVal != 0 && "Unexpected index")((IdxVal != 0 && "Unexpected index") ? static_cast<
void> (0) : __assert_fail ("IdxVal != 0 && \"Unexpected index\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5717, __PRETTY_FUNCTION__))
;
5718 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5719 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5720 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5721 }
5722
5723 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
5724 assert(IdxVal != 0 && "Unexpected index")((IdxVal != 0 && "Unexpected index") ? static_cast<
void> (0) : __assert_fail ("IdxVal != 0 && \"Unexpected index\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5724, __PRETTY_FUNCTION__))
;
5725 NumElems = WideOpVT.getVectorNumElements();
5726 unsigned ShiftLeft = NumElems - SubVecNumElems;
5727 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5728 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5729 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5730 if (ShiftRight != 0)
5731 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5732 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5733 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5734 }
5735
5736 // Simple case when we put subvector in the upper part
5737 if (IdxVal + SubVecNumElems == NumElems) {
5738 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5739 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
5740 if (SubVecNumElems * 2 == NumElems) {
5741 // Special case, use legal zero extending insert_subvector. This allows
5742 // isel to opimitize when bits are known zero.
5743 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
5744 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5745 DAG.getConstant(0, dl, WideOpVT),
5746 Vec, ZeroIdx);
5747 } else {
5748 // Otherwise use explicit shifts to zero the bits.
5749 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
5750 Undef, Vec, ZeroIdx);
5751 NumElems = WideOpVT.getVectorNumElements();
5752 SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
5753 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
5754 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
5755 }
5756 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
5757 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
5758 }
5759
5760 // Inserting into the middle is more complicated.
5761
5762 NumElems = WideOpVT.getVectorNumElements();
5763
5764 // Widen the vector if needed.
5765 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
5766
5767 // Clear the upper bits of the subvector and move it to its insert position.
5768 unsigned ShiftLeft = NumElems - SubVecNumElems;
5769 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
5770 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
5771 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
5772 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
5773 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
5774
5775 // Isolate the bits below the insertion point.
5776 unsigned LowShift = NumElems - IdxVal;
5777 SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
5778 DAG.getTargetConstant(LowShift, dl, MVT::i8));
5779 Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
5780 DAG.getTargetConstant(LowShift, dl, MVT::i8));
5781
5782 // Isolate the bits after the last inserted bit.
5783 unsigned HighShift = IdxVal + SubVecNumElems;
5784 SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
5785 DAG.getTargetConstant(HighShift, dl, MVT::i8));
5786 High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
5787 DAG.getTargetConstant(HighShift, dl, MVT::i8));
5788
5789 // Now OR all 3 pieces together.
5790 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
5791 SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
5792
5793 // Reduce to original width if needed.
5794 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
5795}
5796
5797static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
5798 const SDLoc &dl) {
5799 assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch")((V1.getValueType() == V2.getValueType() && "subvector type mismatch"
) ? static_cast<void> (0) : __assert_fail ("V1.getValueType() == V2.getValueType() && \"subvector type mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5799, __PRETTY_FUNCTION__))
;
5800 EVT SubVT = V1.getValueType();
5801 EVT SubSVT = SubVT.getScalarType();
5802 unsigned SubNumElts = SubVT.getVectorNumElements();
5803 unsigned SubVectorWidth = SubVT.getSizeInBits();
5804 EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
5805 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
5806 return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
5807}
5808
5809/// Returns a vector of specified type with all bits set.
5810/// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
5811/// Then bitcast to their original type, ensuring they get CSE'd.
5812static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5813 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
()) && "Expected a 128/256/512-bit vector type") ? static_cast
<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected a 128/256/512-bit vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5814, __PRETTY_FUNCTION__))
5814 "Expected a 128/256/512-bit vector type")(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
()) && "Expected a 128/256/512-bit vector type") ? static_cast
<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected a 128/256/512-bit vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5814, __PRETTY_FUNCTION__))
;
5815
5816 APInt Ones = APInt::getAllOnesValue(32);
5817 unsigned NumElts = VT.getSizeInBits() / 32;
5818 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
5819 return DAG.getBitcast(VT, Vec);
5820}
5821
5822// Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
5823static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
5824 switch (Opcode) {
5825 case ISD::ANY_EXTEND:
5826 case ISD::ANY_EXTEND_VECTOR_INREG:
5827 return ISD::ANY_EXTEND_VECTOR_INREG;
5828 case ISD::ZERO_EXTEND:
5829 case ISD::ZERO_EXTEND_VECTOR_INREG:
5830 return ISD::ZERO_EXTEND_VECTOR_INREG;
5831 case ISD::SIGN_EXTEND:
5832 case ISD::SIGN_EXTEND_VECTOR_INREG:
5833 return ISD::SIGN_EXTEND_VECTOR_INREG;
5834 }
5835 llvm_unreachable("Unknown opcode")::llvm::llvm_unreachable_internal("Unknown opcode", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5835)
;
5836}
5837
5838static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
5839 SDValue In, SelectionDAG &DAG) {
5840 EVT InVT = In.getValueType();
5841 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.")((VT.isVector() && InVT.isVector() && "Expected vector VTs."
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && InVT.isVector() && \"Expected vector VTs.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5841, __PRETTY_FUNCTION__))
;
5842 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||(((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
ISD::ZERO_EXTEND == Opcode) && "Unknown extension opcode"
) ? static_cast<void> (0) : __assert_fail ("(ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode || ISD::ZERO_EXTEND == Opcode) && \"Unknown extension opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5844, __PRETTY_FUNCTION__))
5843 ISD::ZERO_EXTEND == Opcode) &&(((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
ISD::ZERO_EXTEND == Opcode) && "Unknown extension opcode"
) ? static_cast<void> (0) : __assert_fail ("(ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode || ISD::ZERO_EXTEND == Opcode) && \"Unknown extension opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5844, __PRETTY_FUNCTION__))
5844 "Unknown extension opcode")(((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
ISD::ZERO_EXTEND == Opcode) && "Unknown extension opcode"
) ? static_cast<void> (0) : __assert_fail ("(ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode || ISD::ZERO_EXTEND == Opcode) && \"Unknown extension opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5844, __PRETTY_FUNCTION__))
;
5845
5846 // For 256-bit vectors, we only need the lower (128-bit) input half.
5847 // For 512-bit vectors, we only need the lower input half or quarter.
5848 if (InVT.getSizeInBits() > 128) {
5849 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&((VT.getSizeInBits() == InVT.getSizeInBits() && "Expected VTs to be the same size!"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() == InVT.getSizeInBits() && \"Expected VTs to be the same size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5850, __PRETTY_FUNCTION__))
5850 "Expected VTs to be the same size!")((VT.getSizeInBits() == InVT.getSizeInBits() && "Expected VTs to be the same size!"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() == InVT.getSizeInBits() && \"Expected VTs to be the same size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5850, __PRETTY_FUNCTION__))
;
5851 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
5852 In = extractSubVector(In, 0, DAG, DL,
5853 std::max(128U, VT.getSizeInBits() / Scale));
5854 InVT = In.getValueType();
5855 }
5856
5857 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
5858 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
5859
5860 return DAG.getNode(Opcode, DL, VT, In);
5861}
5862
5863// Match (xor X, -1) -> X.
5864// Match extract_subvector(xor X, -1) -> extract_subvector(X).
5865// Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
5866static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
5867 V = peekThroughBitcasts(V);
5868 if (V.getOpcode() == ISD::XOR &&
5869 ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
5870 return V.getOperand(0);
5871 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5872 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
5873 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
5874 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
5875 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
5876 Not, V.getOperand(1));
5877 }
5878 }
5879 SmallVector<SDValue, 2> CatOps;
5880 if (collectConcatOps(V.getNode(), CatOps)) {
5881 for (SDValue &CatOp : CatOps) {
5882 SDValue NotCat = IsNOT(CatOp, DAG);
5883 if (!NotCat) return SDValue();
5884 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
5885 }
5886 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
5887 }
5888 return SDValue();
5889}
5890
5891/// Returns a vector_shuffle node for an unpackl operation.
5892static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5893 SDValue V1, SDValue V2) {
5894 SmallVector<int, 8> Mask;
5895 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
5896 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5897}
5898
5899/// Returns a vector_shuffle node for an unpackh operation.
5900static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
5901 SDValue V1, SDValue V2) {
5902 SmallVector<int, 8> Mask;
5903 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
5904 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
5905}
5906
5907/// Return a vector_shuffle of the specified vector of zero or undef vector.
5908/// This produces a shuffle where the low element of V2 is swizzled into the
5909/// zero/undef vector, landing at element Idx.
5910/// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5911static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
5912 bool IsZero,
5913 const X86Subtarget &Subtarget,
5914 SelectionDAG &DAG) {
5915 MVT VT = V2.getSimpleValueType();
5916 SDValue V1 = IsZero
5917 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5918 int NumElems = VT.getVectorNumElements();
5919 SmallVector<int, 16> MaskVec(NumElems);
5920 for (int i = 0; i != NumElems; ++i)
5921 // If this is the insertion idx, put the low elt of V2 here.
5922 MaskVec[i] = (i == Idx) ? NumElems : i;
5923 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
5924}
5925
5926static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
5927 if (!Load || !ISD::isNormalLoad(Load))
5928 return nullptr;
5929
5930 SDValue Ptr = Load->getBasePtr();
5931 if (Ptr->getOpcode() == X86ISD::Wrapper ||
5932 Ptr->getOpcode() == X86ISD::WrapperRIP)
5933 Ptr = Ptr->getOperand(0);
5934
5935 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
5936 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
5937 return nullptr;
5938
5939 return CNode->getConstVal();
5940}
5941
5942static const Constant *getTargetConstantFromNode(SDValue Op) {
5943 Op = peekThroughBitcasts(Op);
5944 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
5945}
5946
5947const Constant *
5948X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
5949 assert(LD && "Unexpected null LoadSDNode")((LD && "Unexpected null LoadSDNode") ? static_cast<
void> (0) : __assert_fail ("LD && \"Unexpected null LoadSDNode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5949, __PRETTY_FUNCTION__))
;
5950 return getTargetConstantFromNode(LD);
5951}
5952
5953// Extract raw constant bits from constant pools.
5954static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
5955 APInt &UndefElts,
5956 SmallVectorImpl<APInt> &EltBits,
5957 bool AllowWholeUndefs = true,
5958 bool AllowPartialUndefs = true) {
5959 assert(EltBits.empty() && "Expected an empty EltBits vector")((EltBits.empty() && "Expected an empty EltBits vector"
) ? static_cast<void> (0) : __assert_fail ("EltBits.empty() && \"Expected an empty EltBits vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5959, __PRETTY_FUNCTION__))
;
5960
5961 Op = peekThroughBitcasts(Op);
5962
5963 EVT VT = Op.getValueType();
5964 unsigned SizeInBits = VT.getSizeInBits();
5965 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!")(((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!"
) ? static_cast<void> (0) : __assert_fail ("(SizeInBits % EltSizeInBits) == 0 && \"Can't split constant!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5965, __PRETTY_FUNCTION__))
;
5966 unsigned NumElts = SizeInBits / EltSizeInBits;
5967
5968 // Bitcast a source array of element bits to the target size.
5969 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
5970 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
5971 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
5972 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&(((NumSrcElts * SrcEltSizeInBits) == SizeInBits && "Constant bit sizes don't match"
) ? static_cast<void> (0) : __assert_fail ("(NumSrcElts * SrcEltSizeInBits) == SizeInBits && \"Constant bit sizes don't match\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5973, __PRETTY_FUNCTION__))
5973 "Constant bit sizes don't match")(((NumSrcElts * SrcEltSizeInBits) == SizeInBits && "Constant bit sizes don't match"
) ? static_cast<void> (0) : __assert_fail ("(NumSrcElts * SrcEltSizeInBits) == SizeInBits && \"Constant bit sizes don't match\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 5973, __PRETTY_FUNCTION__))
;
5974
5975 // Don't split if we don't allow undef bits.
5976 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
5977 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
5978 return false;
5979
5980 // If we're already the right size, don't bother bitcasting.
5981 if (NumSrcElts == NumElts) {
5982 UndefElts = UndefSrcElts;
5983 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
5984 return true;
5985 }
5986
5987 // Extract all the undef/constant element data and pack into single bitsets.
5988 APInt UndefBits(SizeInBits, 0);
5989 APInt MaskBits(SizeInBits, 0);
5990
5991 for (unsigned i = 0; i != NumSrcElts; ++i) {
5992 unsigned BitOffset = i * SrcEltSizeInBits;
5993 if (UndefSrcElts[i])
5994 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
5995 MaskBits.insertBits(SrcEltBits[i], BitOffset);
5996 }
5997
5998 // Split the undef/constant single bitset data into the target elements.
5999 UndefElts = APInt(NumElts, 0);
6000 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
6001
6002 for (unsigned i = 0; i != NumElts; ++i) {
6003 unsigned BitOffset = i * EltSizeInBits;
6004 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
6005
6006 // Only treat an element as UNDEF if all bits are UNDEF.
6007 if (UndefEltBits.isAllOnesValue()) {
6008 if (!AllowWholeUndefs)
6009 return false;
6010 UndefElts.setBit(i);
6011 continue;
6012 }
6013
6014 // If only some bits are UNDEF then treat them as zero (or bail if not
6015 // supported).
6016 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
6017 return false;
6018
6019 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
6020 }
6021 return true;
6022 };
6023
6024 // Collect constant bits and insert into mask/undef bit masks.
6025 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
6026 unsigned UndefBitIndex) {
6027 if (!Cst)
6028 return false;
6029 if (isa<UndefValue>(Cst)) {
6030 Undefs.setBit(UndefBitIndex);
6031 return true;
6032 }
6033 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
6034 Mask = CInt->getValue();
6035 return true;
6036 }
6037 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
6038 Mask = CFP->getValueAPF().bitcastToAPInt();
6039 return true;
6040 }
6041 return false;
6042 };
6043
6044 // Handle UNDEFs.
6045 if (Op.isUndef()) {
6046 APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
6047 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
6048 return CastBitData(UndefSrcElts, SrcEltBits);
6049 }
6050
6051 // Extract scalar constant bits.
6052 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
6053 APInt UndefSrcElts = APInt::getNullValue(1);
6054 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
6055 return CastBitData(UndefSrcElts, SrcEltBits);
6056 }
6057 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6058 APInt UndefSrcElts = APInt::getNullValue(1);
6059 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6060 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
6061 return CastBitData(UndefSrcElts, SrcEltBits);
6062 }
6063
6064 // Extract constant bits from build vector.
6065 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
6066 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6067 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6068
6069 APInt UndefSrcElts(NumSrcElts, 0);
6070 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6071 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6072 const SDValue &Src = Op.getOperand(i);
6073 if (Src.isUndef()) {
6074 UndefSrcElts.setBit(i);
6075 continue;
6076 }
6077 auto *Cst = cast<ConstantSDNode>(Src);
6078 SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
6079 }
6080 return CastBitData(UndefSrcElts, SrcEltBits);
6081 }
6082 if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
6083 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6084 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6085
6086 APInt UndefSrcElts(NumSrcElts, 0);
6087 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6088 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6089 const SDValue &Src = Op.getOperand(i);
6090 if (Src.isUndef()) {
6091 UndefSrcElts.setBit(i);
6092 continue;
6093 }
6094 auto *Cst = cast<ConstantFPSDNode>(Src);
6095 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6096 SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
6097 }
6098 return CastBitData(UndefSrcElts, SrcEltBits);
6099 }
6100
6101 // Extract constant bits from constant pool vector.
6102 if (auto *Cst = getTargetConstantFromNode(Op)) {
6103 Type *CstTy = Cst->getType();
6104 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
6105 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
6106 return false;
6107
6108 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
6109 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6110
6111 APInt UndefSrcElts(NumSrcElts, 0);
6112 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6113 for (unsigned i = 0; i != NumSrcElts; ++i)
6114 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
6115 UndefSrcElts, i))
6116 return false;
6117
6118 return CastBitData(UndefSrcElts, SrcEltBits);
6119 }
6120
6121 // Extract constant bits from a broadcasted constant pool scalar.
6122 if (Op.getOpcode() == X86ISD::VBROADCAST &&
6123 EltSizeInBits <= VT.getScalarSizeInBits()) {
6124 if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) {
6125 unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits();
6126 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6127
6128 APInt UndefSrcElts(NumSrcElts, 0);
6129 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6130 if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) {
6131 if (UndefSrcElts[0])
6132 UndefSrcElts.setBits(0, NumSrcElts);
6133 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6134 return CastBitData(UndefSrcElts, SrcEltBits);
6135 }
6136 }
6137 }
6138
6139 if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
6140 EltSizeInBits <= VT.getScalarSizeInBits()) {
6141 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
6142 if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
6143 return false;
6144
6145 SDValue Ptr = MemIntr->getBasePtr();
6146 if (Ptr->getOpcode() == X86ISD::Wrapper ||
6147 Ptr->getOpcode() == X86ISD::WrapperRIP)
6148 Ptr = Ptr->getOperand(0);
6149
6150 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6151 if (!CNode || CNode->isMachineConstantPoolEntry() ||
6152 CNode->getOffset() != 0)
6153 return false;
6154
6155 if (const Constant *C = CNode->getConstVal()) {
6156 unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
6157 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6158
6159 APInt UndefSrcElts(NumSrcElts, 0);
6160 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6161 if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
6162 if (UndefSrcElts[0])
6163 UndefSrcElts.setBits(0, NumSrcElts);
6164 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6165 return CastBitData(UndefSrcElts, SrcEltBits);
6166 }
6167 }
6168 }
6169
6170 // Extract constant bits from a subvector broadcast.
6171 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
6172 SmallVector<APInt, 16> SubEltBits;
6173 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6174 UndefElts, SubEltBits, AllowWholeUndefs,
6175 AllowPartialUndefs)) {
6176 UndefElts = APInt::getSplat(NumElts, UndefElts);
6177 while (EltBits.size() < NumElts)
6178 EltBits.append(SubEltBits.begin(), SubEltBits.end());
6179 return true;
6180 }
6181 }
6182
6183 // Extract a rematerialized scalar constant insertion.
6184 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
6185 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
6186 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
6187 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6188 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6189
6190 APInt UndefSrcElts(NumSrcElts, 0);
6191 SmallVector<APInt, 64> SrcEltBits;
6192 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
6193 SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
6194 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
6195 return CastBitData(UndefSrcElts, SrcEltBits);
6196 }
6197
6198 // Insert constant bits from a base and sub vector sources.
6199 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR &&
6200 isa<ConstantSDNode>(Op.getOperand(2))) {
6201 // TODO - support insert_subvector through bitcasts.
6202 if (EltSizeInBits != VT.getScalarSizeInBits())
6203 return false;
6204
6205 APInt UndefSubElts;
6206 SmallVector<APInt, 32> EltSubBits;
6207 if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6208 UndefSubElts, EltSubBits,
6209 AllowWholeUndefs, AllowPartialUndefs) &&
6210 getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6211 UndefElts, EltBits, AllowWholeUndefs,
6212 AllowPartialUndefs)) {
6213 unsigned BaseIdx = Op.getConstantOperandVal(2);
6214 UndefElts.insertBits(UndefSubElts, BaseIdx);
6215 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
6216 EltBits[BaseIdx + i] = EltSubBits[i];
6217 return true;
6218 }
6219 }
6220
6221 // Extract constant bits from a subvector's source.
6222 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6223 isa<ConstantSDNode>(Op.getOperand(1))) {
6224 // TODO - support extract_subvector through bitcasts.
6225 if (EltSizeInBits != VT.getScalarSizeInBits())
6226 return false;
6227
6228 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6229 UndefElts, EltBits, AllowWholeUndefs,
6230 AllowPartialUndefs)) {
6231 EVT SrcVT = Op.getOperand(0).getValueType();
6232 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6233 unsigned NumSubElts = VT.getVectorNumElements();
6234 unsigned BaseIdx = Op.getConstantOperandVal(1);
6235 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
6236 if ((BaseIdx + NumSubElts) != NumSrcElts)
6237 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
6238 if (BaseIdx != 0)
6239 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
6240 return true;
6241 }
6242 }
6243
6244 // Extract constant bits from shuffle node sources.
6245 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
6246 // TODO - support shuffle through bitcasts.
6247 if (EltSizeInBits != VT.getScalarSizeInBits())
6248 return false;
6249
6250 ArrayRef<int> Mask = SVN->getMask();
6251 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
6252 llvm::any_of(Mask, [](int M) { return M < 0; }))
6253 return false;
6254
6255 APInt UndefElts0, UndefElts1;
6256 SmallVector<APInt, 32> EltBits0, EltBits1;
6257 if (isAnyInRange(Mask, 0, NumElts) &&
6258 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6259 UndefElts0, EltBits0, AllowWholeUndefs,
6260 AllowPartialUndefs))
6261 return false;
6262 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
6263 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6264 UndefElts1, EltBits1, AllowWholeUndefs,
6265 AllowPartialUndefs))
6266 return false;
6267
6268 UndefElts = APInt::getNullValue(NumElts);
6269 for (int i = 0; i != (int)NumElts; ++i) {
6270 int M = Mask[i];
6271 if (M < 0) {
6272 UndefElts.setBit(i);
6273 EltBits.push_back(APInt::getNullValue(EltSizeInBits));
6274 } else if (M < (int)NumElts) {
6275 if (UndefElts0[M])
6276 UndefElts.setBit(i);
6277 EltBits.push_back(EltBits0[M]);
6278 } else {
6279 if (UndefElts1[M - NumElts])
6280 UndefElts.setBit(i);
6281 EltBits.push_back(EltBits1[M - NumElts]);
6282 }
6283 }
6284 return true;
6285 }
6286
6287 return false;
6288}
6289
6290namespace llvm {
6291namespace X86 {
6292bool isConstantSplat(SDValue Op, APInt &SplatVal) {
6293 APInt UndefElts;
6294 SmallVector<APInt, 16> EltBits;
6295 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
6296 UndefElts, EltBits, true, false)) {
6297 int SplatIndex = -1;
6298 for (int i = 0, e = EltBits.size(); i != e; ++i) {
6299 if (UndefElts[i])
6300 continue;
6301 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
6302 SplatIndex = -1;
6303 break;
6304 }
6305 SplatIndex = i;
6306 }
6307 if (0 <= SplatIndex) {
6308 SplatVal = EltBits[SplatIndex];
6309 return true;
6310 }
6311 }
6312
6313 return false;
6314}
6315} // namespace X86
6316} // namespace llvm
6317
6318static bool getTargetShuffleMaskIndices(SDValue MaskNode,
6319 unsigned MaskEltSizeInBits,
6320 SmallVectorImpl<uint64_t> &RawMask,
6321 APInt &UndefElts) {
6322 // Extract the raw target constant bits.
6323 SmallVector<APInt, 64> EltBits;
6324 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
6325 EltBits, /* AllowWholeUndefs */ true,
6326 /* AllowPartialUndefs */ false))
6327 return false;
6328
6329 // Insert the extracted elements into the mask.
6330 for (APInt Elt : EltBits)
6331 RawMask.push_back(Elt.getZExtValue());
6332
6333 return true;
6334}
6335
6336/// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
6337/// Note: This ignores saturation, so inputs must be checked first.
6338static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6339 bool Unary) {
6340 assert(Mask.empty() && "Expected an empty shuffle mask vector")((Mask.empty() && "Expected an empty shuffle mask vector"
) ? static_cast<void> (0) : __assert_fail ("Mask.empty() && \"Expected an empty shuffle mask vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6340, __PRETTY_FUNCTION__))
;
6341 unsigned NumElts = VT.getVectorNumElements();
6342 unsigned NumLanes = VT.getSizeInBits() / 128;
6343 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
6344 unsigned Offset = Unary ? 0 : NumElts;
6345
6346 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
6347 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6348 Mask.push_back(Elt + (Lane * NumEltsPerLane));
6349 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2)
6350 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
6351 }
6352}
6353
6354// Split the demanded elts of a PACKSS/PACKUS node between its operands.
6355static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
6356 APInt &DemandedLHS, APInt &DemandedRHS) {
6357 int NumLanes = VT.getSizeInBits() / 128;
6358 int NumElts = DemandedElts.getBitWidth();
6359 int NumInnerElts = NumElts / 2;
6360 int NumEltsPerLane = NumElts / NumLanes;
6361 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
6362
6363 DemandedLHS = APInt::getNullValue(NumInnerElts);
6364 DemandedRHS = APInt::getNullValue(NumInnerElts);
6365
6366 // Map DemandedElts to the packed operands.
6367 for (int Lane = 0; Lane != NumLanes; ++Lane) {
6368 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
6369 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
6370 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
6371 if (DemandedElts[OuterIdx])
6372 DemandedLHS.setBit(InnerIdx);
6373 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
6374 DemandedRHS.setBit(InnerIdx);
6375 }
6376 }
6377}
6378
6379// Split the demanded elts of a HADD/HSUB node between its operands.
6380static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
6381 APInt &DemandedLHS, APInt &DemandedRHS) {
6382 int NumLanes = VT.getSizeInBits() / 128;
6383 int NumElts = DemandedElts.getBitWidth();
6384 int NumEltsPerLane = NumElts / NumLanes;
6385 int HalfEltsPerLane = NumEltsPerLane / 2;
6386
6387 DemandedLHS = APInt::getNullValue(NumElts);
6388 DemandedRHS = APInt::getNullValue(NumElts);
6389
6390 // Map DemandedElts to the horizontal operands.
6391 for (int Idx = 0; Idx != NumElts; ++Idx) {
6392 if (!DemandedElts[Idx])
6393 continue;
6394 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
6395 int LocalIdx = Idx % NumEltsPerLane;
6396 if (LocalIdx < HalfEltsPerLane) {
6397 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6398 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6399 } else {
6400 LocalIdx -= HalfEltsPerLane;
6401 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6402 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6403 }
6404 }
6405}
6406
6407/// Calculates the shuffle mask corresponding to the target-specific opcode.
6408/// If the mask could be calculated, returns it in \p Mask, returns the shuffle
6409/// operands in \p Ops, and returns true.
6410/// Sets \p IsUnary to true if only one source is used. Note that this will set
6411/// IsUnary for shuffles which use a single input multiple times, and in those
6412/// cases it will adjust the mask to only have indices within that single input.
6413/// It is an error to call this with non-empty Mask/Ops vectors.
6414static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
6415 SmallVectorImpl<SDValue> &Ops,
6416 SmallVectorImpl<int> &Mask, bool &IsUnary) {
6417 unsigned NumElems = VT.getVectorNumElements();
6418 unsigned MaskEltSize = VT.getScalarSizeInBits();
6419 SmallVector<uint64_t, 32> RawMask;
6420 APInt RawUndefs;
6421 SDValue ImmN;
6422
6423 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector")((Mask.empty() && "getTargetShuffleMask expects an empty Mask vector"
) ? static_cast<void> (0) : __assert_fail ("Mask.empty() && \"getTargetShuffleMask expects an empty Mask vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6423, __PRETTY_FUNCTION__))
;
6424 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector")((Ops.empty() && "getTargetShuffleMask expects an empty Ops vector"
) ? static_cast<void> (0) : __assert_fail ("Ops.empty() && \"getTargetShuffleMask expects an empty Ops vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6424, __PRETTY_FUNCTION__))
;
6425
6426 IsUnary = false;
6427 bool IsFakeUnary = false;
6428 switch (N->getOpcode()) {
6429 case X86ISD::BLENDI:
6430 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6430, __PRETTY_FUNCTION__))
;
6431 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6431, __PRETTY_FUNCTION__))
;
6432 ImmN = N->getOperand(N->getNumOperands() - 1);
6433 DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6434 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6435 break;
6436 case X86ISD::SHUFP:
6437 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6437, __PRETTY_FUNCTION__))
;
6438 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6438, __PRETTY_FUNCTION__))
;
6439 ImmN = N->getOperand(N->getNumOperands() - 1);
6440 DecodeSHUFPMask(NumElems, MaskEltSize,
6441 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6442 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6443 break;
6444 case X86ISD::INSERTPS:
6445 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6445, __PRETTY_FUNCTION__))
;
6446 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6446, __PRETTY_FUNCTION__))
;
6447 ImmN = N->getOperand(N->getNumOperands() - 1);
6448 DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6449 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6450 break;
6451 case X86ISD::EXTRQI:
6452 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6452, __PRETTY_FUNCTION__))
;
6453 if (isa<ConstantSDNode>(N->getOperand(1)) &&
6454 isa<ConstantSDNode>(N->getOperand(2))) {
6455 int BitLen = N->getConstantOperandVal(1);
6456 int BitIdx = N->getConstantOperandVal(2);
6457 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6458 IsUnary = true;
6459 }
6460 break;
6461 case X86ISD::INSERTQI:
6462 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6462, __PRETTY_FUNCTION__))
;
6463 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6463, __PRETTY_FUNCTION__))
;
6464 if (isa<ConstantSDNode>(N->getOperand(2)) &&
6465 isa<ConstantSDNode>(N->getOperand(3))) {
6466 int BitLen = N->getConstantOperandVal(2);
6467 int BitIdx = N->getConstantOperandVal(3);
6468 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6469 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6470 }
6471 break;
6472 case X86ISD::UNPCKH:
6473 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6473, __PRETTY_FUNCTION__))
;
6474 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6474, __PRETTY_FUNCTION__))
;
6475 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
6476 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6477 break;
6478 case X86ISD::UNPCKL:
6479 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6479, __PRETTY_FUNCTION__))
;
6480 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6480, __PRETTY_FUNCTION__))
;
6481 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
6482 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6483 break;
6484 case X86ISD::MOVHLPS:
6485 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6485, __PRETTY_FUNCTION__))
;
6486 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6486, __PRETTY_FUNCTION__))
;
6487 DecodeMOVHLPSMask(NumElems, Mask);
6488 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6489 break;
6490 case X86ISD::MOVLHPS:
6491 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6491, __PRETTY_FUNCTION__))
;
6492 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6492, __PRETTY_FUNCTION__))
;
6493 DecodeMOVLHPSMask(NumElems, Mask);
6494 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6495 break;
6496 case X86ISD::PALIGNR:
6497 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected")((VT.getScalarType() == MVT::i8 && "Byte vector expected"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Byte vector expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6497, __PRETTY_FUNCTION__))
;
6498 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6498, __PRETTY_FUNCTION__))
;
6499 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6499, __PRETTY_FUNCTION__))
;
6500 ImmN = N->getOperand(N->getNumOperands() - 1);
6501 DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6502 Mask);
6503 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6504 Ops.push_back(N->getOperand(1));
6505 Ops.push_back(N->getOperand(0));
6506 break;
6507 case X86ISD::VSHLDQ:
6508 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected")((VT.getScalarType() == MVT::i8 && "Byte vector expected"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Byte vector expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6508, __PRETTY_FUNCTION__))
;
6509 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6509, __PRETTY_FUNCTION__))
;
6510 ImmN = N->getOperand(N->getNumOperands() - 1);
6511 DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6512 Mask);
6513 IsUnary = true;
6514 break;
6515 case X86ISD::VSRLDQ:
6516 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected")((VT.getScalarType() == MVT::i8 && "Byte vector expected"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Byte vector expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6516, __PRETTY_FUNCTION__))
;
6517 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6517, __PRETTY_FUNCTION__))
;
6518 ImmN = N->getOperand(N->getNumOperands() - 1);
6519 DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6520 Mask);
6521 IsUnary = true;
6522 break;
6523 case X86ISD::PSHUFD:
6524 case X86ISD::VPERMILPI:
6525 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6525, __PRETTY_FUNCTION__))
;
6526 ImmN = N->getOperand(N->getNumOperands() - 1);
6527 DecodePSHUFMask(NumElems, MaskEltSize,
6528 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6529 IsUnary = true;
6530 break;
6531 case X86ISD::PSHUFHW:
6532 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6532, __PRETTY_FUNCTION__))
;
6533 ImmN = N->getOperand(N->getNumOperands() - 1);
6534 DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6535 Mask);
6536 IsUnary = true;
6537 break;
6538 case X86ISD::PSHUFLW:
6539 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6539, __PRETTY_FUNCTION__))
;
6540 ImmN = N->getOperand(N->getNumOperands() - 1);
6541 DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6542 Mask);
6543 IsUnary = true;
6544 break;
6545 case X86ISD::VZEXT_MOVL:
6546 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6546, __PRETTY_FUNCTION__))
;
6547 DecodeZeroMoveLowMask(NumElems, Mask);
6548 IsUnary = true;
6549 break;
6550 case X86ISD::VBROADCAST: {
6551 SDValue N0 = N->getOperand(0);
6552 // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so,
6553 // add the pre-extracted value to the Ops vector.
6554 if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6555 N0.getOperand(0).getValueType() == VT &&
6556 N0.getConstantOperandVal(1) == 0)
6557 Ops.push_back(N0.getOperand(0));
6558
6559 // We only decode broadcasts of same-sized vectors, unless the broadcast
6560 // came from an extract from the original width. If we found one, we
6561 // pushed it the Ops vector above.
6562 if (N0.getValueType() == VT || !Ops.empty()) {
6563 DecodeVectorBroadcast(NumElems, Mask);
6564 IsUnary = true;
6565 break;
6566 }
6567 return false;
6568 }
6569 case X86ISD::VPERMILPV: {
6570 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6570, __PRETTY_FUNCTION__))
;
6571 IsUnary = true;
6572 SDValue MaskNode = N->getOperand(1);
6573 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6574 RawUndefs)) {
6575 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
6576 break;
6577 }
6578 return false;
6579 }
6580 case X86ISD::PSHUFB: {
6581 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected")((VT.getScalarType() == MVT::i8 && "Byte vector expected"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Byte vector expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6581, __PRETTY_FUNCTION__))
;
6582 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6582, __PRETTY_FUNCTION__))
;
6583 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6583, __PRETTY_FUNCTION__))
;
6584 IsUnary = true;
6585 SDValue MaskNode = N->getOperand(1);
6586 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6587 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
6588 break;
6589 }
6590 return false;
6591 }
6592 case X86ISD::VPERMI:
6593 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6593, __PRETTY_FUNCTION__))
;
6594 ImmN = N->getOperand(N->getNumOperands() - 1);
6595 DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6596 IsUnary = true;
6597 break;
6598 case X86ISD::MOVSS:
6599 case X86ISD::MOVSD:
6600 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6600, __PRETTY_FUNCTION__))
;
6601 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6601, __PRETTY_FUNCTION__))
;
6602 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
6603 break;
6604 case X86ISD::VPERM2X128:
6605 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6605, __PRETTY_FUNCTION__))
;
6606 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6606, __PRETTY_FUNCTION__))
;
6607 ImmN = N->getOperand(N->getNumOperands() - 1);
6608 DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(),
6609 Mask);
6610 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6611 break;
6612 case X86ISD::SHUF128:
6613 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6613, __PRETTY_FUNCTION__))
;
6614 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6614, __PRETTY_FUNCTION__))
;
6615 ImmN = N->getOperand(N->getNumOperands() - 1);
6616 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize,
6617 cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
6618 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6619 break;
6620 case X86ISD::MOVSLDUP:
6621 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6621, __PRETTY_FUNCTION__))
;
6622 DecodeMOVSLDUPMask(NumElems, Mask);
6623 IsUnary = true;
6624 break;
6625 case X86ISD::MOVSHDUP:
6626 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6626, __PRETTY_FUNCTION__))
;
6627 DecodeMOVSHDUPMask(NumElems, Mask);
6628 IsUnary = true;
6629 break;
6630 case X86ISD::MOVDDUP:
6631 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6631, __PRETTY_FUNCTION__))
;
6632 DecodeMOVDDUPMask(NumElems, Mask);
6633 IsUnary = true;
6634 break;
6635 case X86ISD::VPERMIL2: {
6636 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6636, __PRETTY_FUNCTION__))
;
6637 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6637, __PRETTY_FUNCTION__))
;
6638 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6639 SDValue MaskNode = N->getOperand(2);
6640 SDValue CtrlNode = N->getOperand(3);
6641 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
6642 unsigned CtrlImm = CtrlOp->getZExtValue();
6643 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6644 RawUndefs)) {
6645 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
6646 Mask);
6647 break;
6648 }
6649 }
6650 return false;
6651 }
6652 case X86ISD::VPPERM: {
6653 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6653, __PRETTY_FUNCTION__))
;
6654 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6654, __PRETTY_FUNCTION__))
;
6655 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6656 SDValue MaskNode = N->getOperand(2);
6657 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6658 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
6659 break;
6660 }
6661 return false;
6662 }
6663 case X86ISD::VPERMV: {
6664 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6664, __PRETTY_FUNCTION__))
;
6665 IsUnary = true;
6666 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
6667 Ops.push_back(N->getOperand(1));
6668 SDValue MaskNode = N->getOperand(0);
6669 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6670 RawUndefs)) {
6671 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
6672 break;
6673 }
6674 return false;
6675 }
6676 case X86ISD::VPERMV3: {
6677 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6677, __PRETTY_FUNCTION__))
;
6678 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type")((N->getOperand(2).getValueType() == VT && "Unexpected value type"
) ? static_cast<void> (0) : __assert_fail ("N->getOperand(2).getValueType() == VT && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6678, __PRETTY_FUNCTION__))
;
6679 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
6680 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
6681 Ops.push_back(N->getOperand(0));
6682 Ops.push_back(N->getOperand(2));
6683 SDValue MaskNode = N->getOperand(1);
6684 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6685 RawUndefs)) {
6686 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
6687 break;
6688 }
6689 return false;
6690 }
6691 default: llvm_unreachable("unknown target shuffle node")::llvm::llvm_unreachable_internal("unknown target shuffle node"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6691)
;
6692 }
6693
6694 // Empty mask indicates the decode failed.
6695 if (Mask.empty())
6696 return false;
6697
6698 // Check if we're getting a shuffle mask with zero'd elements.
6699 if (!AllowSentinelZero)
6700 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
6701 return false;
6702
6703 // If we have a fake unary shuffle, the shuffle mask is spread across two
6704 // inputs that are actually the same node. Re-map the mask to always point
6705 // into the first input.
6706 if (IsFakeUnary)
6707 for (int &M : Mask)
6708 if (M >= (int)Mask.size())
6709 M -= Mask.size();
6710
6711 // If we didn't already add operands in the opcode-specific code, default to
6712 // adding 1 or 2 operands starting at 0.
6713 if (Ops.empty()) {
6714 Ops.push_back(N->getOperand(0));
6715 if (!IsUnary || IsFakeUnary)
6716 Ops.push_back(N->getOperand(1));
6717 }
6718
6719 return true;
6720}
6721
6722/// Decode a target shuffle mask and inputs and see if any values are
6723/// known to be undef or zero from their inputs.
6724/// Returns true if the target shuffle mask was decoded.
6725static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
6726 SmallVectorImpl<SDValue> &Ops,
6727 APInt &KnownUndef, APInt &KnownZero) {
6728 bool IsUnary;
6729 if (!isTargetShuffle(N.getOpcode()))
6730 return false;
6731
6732 MVT VT = N.getSimpleValueType();
6733 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
6734 return false;
6735
6736 int Size = Mask.size();
6737 SDValue V1 = Ops[0];
6738 SDValue V2 = IsUnary ? V1 : Ops[1];
6739 KnownUndef = KnownZero = APInt::getNullValue(Size);
6740
6741 V1 = peekThroughBitcasts(V1);
6742 V2 = peekThroughBitcasts(V2);
6743
6744 assert((VT.getSizeInBits() % Mask.size()) == 0 &&(((VT.getSizeInBits() % Mask.size()) == 0 && "Illegal split of shuffle value type"
) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % Mask.size()) == 0 && \"Illegal split of shuffle value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6745, __PRETTY_FUNCTION__))
6745 "Illegal split of shuffle value type")(((VT.getSizeInBits() % Mask.size()) == 0 && "Illegal split of shuffle value type"
) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % Mask.size()) == 0 && \"Illegal split of shuffle value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6745, __PRETTY_FUNCTION__))
;
6746 unsigned EltSizeInBits = VT.getSizeInBits() / Size;
6747
6748 // Extract known constant input data.
6749 APInt UndefSrcElts[2];
6750 SmallVector<APInt, 32> SrcEltBits[2];
6751 bool IsSrcConstant[2] = {
6752 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
6753 SrcEltBits[0], true, false),
6754 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
6755 SrcEltBits[1], true, false)};
6756
6757 for (int i = 0; i < Size; ++i) {
6758 int M = Mask[i];
6759
6760 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
6761 if (M < 0) {
6762 assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!")((isUndefOrZero(M) && "Unknown shuffle sentinel value!"
) ? static_cast<void> (0) : __assert_fail ("isUndefOrZero(M) && \"Unknown shuffle sentinel value!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6762, __PRETTY_FUNCTION__))
;
6763 if (SM_SentinelUndef == M)
6764 KnownUndef.setBit(i);
6765 if (SM_SentinelZero == M)
6766 KnownZero.setBit(i);
6767 continue;
6768 }
6769
6770 // Determine shuffle input and normalize the mask.
6771 unsigned SrcIdx = M / Size;
6772 SDValue V = M < Size ? V1 : V2;
6773 M %= Size;
6774
6775 // We are referencing an UNDEF input.
6776 if (V.isUndef()) {
6777 KnownUndef.setBit(i);
6778 continue;
6779 }
6780
6781 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
6782 // TODO: We currently only set UNDEF for integer types - floats use the same
6783 // registers as vectors and many of the scalar folded loads rely on the
6784 // SCALAR_TO_VECTOR pattern.
6785 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
6786 (Size % V.getValueType().getVectorNumElements()) == 0) {
6787 int Scale = Size / V.getValueType().getVectorNumElements();
6788 int Idx = M / Scale;
6789 if (Idx != 0 && !VT.isFloatingPoint())
6790 KnownUndef.setBit(i);
6791 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
6792 KnownZero.setBit(i);
6793 continue;
6794 }
6795
6796 // Attempt to extract from the source's constant bits.
6797 if (IsSrcConstant[SrcIdx]) {
6798 if (UndefSrcElts[SrcIdx][M])
6799 KnownUndef.setBit(i);
6800 else if (SrcEltBits[SrcIdx][M] == 0)
6801 KnownZero.setBit(i);
6802 }
6803 }
6804
6805 assert(VT.getVectorNumElements() == (unsigned)Size &&((VT.getVectorNumElements() == (unsigned)Size && "Different mask size from vector size!"
) ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == (unsigned)Size && \"Different mask size from vector size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6806, __PRETTY_FUNCTION__))
6806 "Different mask size from vector size!")((VT.getVectorNumElements() == (unsigned)Size && "Different mask size from vector size!"
) ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == (unsigned)Size && \"Different mask size from vector size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6806, __PRETTY_FUNCTION__))
;
6807 return true;
6808}
6809
6810// Forward declaration (for getFauxShuffleMask recursive check).
6811// TODO: Use DemandedElts variant.
6812static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
6813 SmallVectorImpl<int> &Mask,
6814 SelectionDAG &DAG, unsigned Depth,
6815 bool ResolveKnownElts);
6816
6817// Attempt to decode ops that could be represented as a shuffle mask.
6818// The decoded shuffle mask may contain a different number of elements to the
6819// destination value type.
6820static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
6821 SmallVectorImpl<int> &Mask,
6822 SmallVectorImpl<SDValue> &Ops,
6823 SelectionDAG &DAG, unsigned Depth,
6824 bool ResolveKnownElts) {
6825 Mask.clear();
6826 Ops.clear();
6827
6828 MVT VT = N.getSimpleValueType();
6829 unsigned NumElts = VT.getVectorNumElements();
6830 unsigned NumSizeInBits = VT.getSizeInBits();
6831 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
6832 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
6833 return false;
6834 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size")((NumElts == DemandedElts.getBitWidth() && "Unexpected vector size"
) ? static_cast<void> (0) : __assert_fail ("NumElts == DemandedElts.getBitWidth() && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6834, __PRETTY_FUNCTION__))
;
6835
6836 unsigned Opcode = N.getOpcode();
6837 switch (Opcode) {
6838 case ISD::VECTOR_SHUFFLE: {
6839 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
6840 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
6841 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
6842 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
6843 Ops.push_back(N.getOperand(0));
6844 Ops.push_back(N.getOperand(1));
6845 return true;
6846 }
6847 return false;
6848 }
6849 case ISD::AND:
6850 case X86ISD::ANDNP: {
6851 // Attempt to decode as a per-byte mask.
6852 APInt UndefElts;
6853 SmallVector<APInt, 32> EltBits;
6854 SDValue N0 = N.getOperand(0);
6855 SDValue N1 = N.getOperand(1);
6856 bool IsAndN = (X86ISD::ANDNP == Opcode);
6857 uint64_t ZeroMask = IsAndN ? 255 : 0;
6858 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
6859 return false;
6860 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
6861 if (UndefElts[i]) {
6862 Mask.push_back(SM_SentinelUndef);
6863 continue;
6864 }
6865 const APInt &ByteBits = EltBits[i];
6866 if (ByteBits != 0 && ByteBits != 255)
6867 return false;
6868 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
6869 }
6870 Ops.push_back(IsAndN ? N1 : N0);
6871 return true;
6872 }
6873 case ISD::OR: {
6874 // Inspect each operand at the byte level. We can merge these into a
6875 // blend shuffle mask if for each byte at least one is masked out (zero).
6876 KnownBits Known0 =
6877 DAG.computeKnownBits(N.getOperand(0), DemandedElts, Depth + 1);
6878 KnownBits Known1 =
6879 DAG.computeKnownBits(N.getOperand(1), DemandedElts, Depth + 1);
6880 if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
6881 bool IsByteMask = true;
6882 unsigned NumSizeInBytes = NumSizeInBits / 8;
6883 unsigned NumBytesPerElt = NumBitsPerElt / 8;
6884 APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
6885 APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
6886 for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
6887 unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
6888 unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
6889 if (LHS == 255 && RHS == 0)
6890 SelectMask.setBit(i);
6891 else if (LHS == 255 && RHS == 255)
6892 ZeroMask.setBit(i);
6893 else if (!(LHS == 0 && RHS == 255))
6894 IsByteMask = false;
6895 }
6896 if (IsByteMask) {
6897 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
6898 for (unsigned j = 0; j != NumBytesPerElt; ++j) {
6899 unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
6900 int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
6901 Mask.push_back(Idx);
6902 }
6903 }
6904 Ops.push_back(N.getOperand(0));
6905 Ops.push_back(N.getOperand(1));
6906 return true;
6907 }
6908 }
6909
6910 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
6911 // is a valid shuffle index.
6912 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
6913 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
6914 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
6915 return false;
6916 SmallVector<int, 64> SrcMask0, SrcMask1;
6917 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
6918 if (!getTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG, Depth + 1,
6919 ResolveKnownElts) ||
6920 !getTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG, Depth + 1,
6921 ResolveKnownElts))
6922 return false;
6923 size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
6924 SmallVector<int, 64> Mask0, Mask1;
6925 scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
6926 scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
6927 for (size_t i = 0; i != MaskSize; ++i) {
6928 if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
6929 Mask.push_back(SM_SentinelUndef);
6930 else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
6931 Mask.push_back(SM_SentinelZero);
6932 else if (Mask1[i] == SM_SentinelZero)
6933 Mask.push_back(Mask0[i]);
6934 else if (Mask0[i] == SM_SentinelZero)
6935 Mask.push_back(Mask1[i] + (int)(MaskSize * SrcInputs0.size()));
6936 else
6937 return false;
6938 }
6939 Ops.append(SrcInputs0.begin(), SrcInputs0.end());
6940 Ops.append(SrcInputs1.begin(), SrcInputs1.end());
6941 return true;
6942 }
6943 case ISD::INSERT_SUBVECTOR: {
6944 SDValue Src = N.getOperand(0);
6945 SDValue Sub = N.getOperand(1);
6946 EVT SubVT = Sub.getValueType();
6947 unsigned NumSubElts = SubVT.getVectorNumElements();
6948 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
6949 !N->isOnlyUserOf(Sub.getNode()))
6950 return false;
6951 uint64_t InsertIdx = N.getConstantOperandVal(2);
6952 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
6953 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6954 Sub.getOperand(0).getValueType() == VT &&
6955 isa<ConstantSDNode>(Sub.getOperand(1))) {
6956 uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
6957 for (int i = 0; i != (int)NumElts; ++i)
6958 Mask.push_back(i);
6959 for (int i = 0; i != (int)NumSubElts; ++i)
6960 Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
6961 Ops.push_back(Src);
6962 Ops.push_back(Sub.getOperand(0));
6963 return true;
6964 }
6965 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
6966 SmallVector<int, 64> SubMask;
6967 SmallVector<SDValue, 2> SubInputs;
6968 if (!getTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
6969 SubMask, DAG, Depth + 1, ResolveKnownElts))
6970 return false;
6971 if (SubMask.size() != NumSubElts) {
6972 assert(((SubMask.size() % NumSubElts) == 0 ||((((SubMask.size() % NumSubElts) == 0 || (NumSubElts % SubMask
.size()) == 0) && "Illegal submask scale") ? static_cast
<void> (0) : __assert_fail ("((SubMask.size() % NumSubElts) == 0 || (NumSubElts % SubMask.size()) == 0) && \"Illegal submask scale\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6973, __PRETTY_FUNCTION__))
6973 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale")((((SubMask.size() % NumSubElts) == 0 || (NumSubElts % SubMask
.size()) == 0) && "Illegal submask scale") ? static_cast
<void> (0) : __assert_fail ("((SubMask.size() % NumSubElts) == 0 || (NumSubElts % SubMask.size()) == 0) && \"Illegal submask scale\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 6973, __PRETTY_FUNCTION__))
;
6974 if ((NumSubElts % SubMask.size()) == 0) {
6975 int Scale = NumSubElts / SubMask.size();
6976 SmallVector<int,64> ScaledSubMask;
6977 scaleShuffleMask<int>(Scale, SubMask, ScaledSubMask);
6978 SubMask = ScaledSubMask;
6979 } else {
6980 int Scale = SubMask.size() / NumSubElts;
6981 NumSubElts = SubMask.size();
6982 NumElts *= Scale;
6983 InsertIdx *= Scale;
6984 }
6985 }
6986 Ops.push_back(Src);
6987 for (SDValue &SubInput : SubInputs) {
6988 EVT SubSVT = SubInput.getValueType().getScalarType();
6989 EVT AltVT = EVT::getVectorVT(*DAG.getContext(), SubSVT,
6990 NumSizeInBits / SubSVT.getSizeInBits());
6991 Ops.push_back(DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), AltVT,
6992 DAG.getUNDEF(AltVT), SubInput,
6993 DAG.getIntPtrConstant(0, SDLoc(N))));
6994 }
6995 for (int i = 0; i != (int)NumElts; ++i)
6996 Mask.push_back(i);
6997 for (int i = 0; i != (int)NumSubElts; ++i) {
6998 int M = SubMask[i];
6999 if (0 <= M) {
7000 int InputIdx = M / NumSubElts;
7001 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
7002 }
7003 Mask[i + InsertIdx] = M;
7004 }
7005 return true;
7006 }
7007 case ISD::SCALAR_TO_VECTOR: {
7008 // Match against a scalar_to_vector of an extract from a vector,
7009 // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar.
7010 SDValue N0 = N.getOperand(0);
7011 SDValue SrcExtract;
7012
7013 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7014 N0.getOperand(0).getValueType() == VT) ||
7015 (N0.getOpcode() == X86ISD::PEXTRW &&
7016 N0.getOperand(0).getValueType() == MVT::v8i16) ||
7017 (N0.getOpcode() == X86ISD::PEXTRB &&
7018 N0.getOperand(0).getValueType() == MVT::v16i8)) {
7019 SrcExtract = N0;
7020 }
7021
7022 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
7023 return false;
7024
7025 SDValue SrcVec = SrcExtract.getOperand(0);
7026 EVT SrcVT = SrcVec.getValueType();
7027 unsigned NumSrcElts = SrcVT.getVectorNumElements();
7028 unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1;
7029
7030 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
7031 if (NumSrcElts <= SrcIdx)
7032 return false;
7033
7034 Ops.push_back(SrcVec);
7035 Mask.push_back(SrcIdx);
7036 Mask.append(NumZeros, SM_SentinelZero);
7037 Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef);
7038 return true;
7039 }
7040 case X86ISD::PINSRB:
7041 case X86ISD::PINSRW: {
7042 SDValue InVec = N.getOperand(0);
7043 SDValue InScl = N.getOperand(1);
7044 SDValue InIndex = N.getOperand(2);
7045 if (!isa<ConstantSDNode>(InIndex) ||
7046 cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts))
7047 return false;
7048 uint64_t InIdx = N.getConstantOperandVal(2);
7049
7050 // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern.
7051 if (X86::isZeroNode(InScl)) {
7052 Ops.push_back(InVec);
7053 for (unsigned i = 0; i != NumElts; ++i)
7054 Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i);
7055 return true;
7056 }
7057
7058 // Attempt to recognise a PINSR*(PEXTR*) shuffle pattern.
7059 // TODO: Expand this to support INSERT_VECTOR_ELT/etc.
7060 unsigned ExOp =
7061 (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW);
7062 if (InScl.getOpcode() != ExOp)
7063 return false;
7064
7065 SDValue ExVec = InScl.getOperand(0);
7066 SDValue ExIndex = InScl.getOperand(1);
7067 if (!isa<ConstantSDNode>(ExIndex) ||
7068 cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts))
7069 return false;
7070 uint64_t ExIdx = InScl.getConstantOperandVal(1);
7071
7072 Ops.push_back(InVec);
7073 Ops.push_back(ExVec);
7074 for (unsigned i = 0; i != NumElts; ++i)
7075 Mask.push_back(i == InIdx ? NumElts + ExIdx : i);
7076 return true;
7077 }
7078 case X86ISD::PACKSS:
7079 case X86ISD::PACKUS: {
7080 SDValue N0 = N.getOperand(0);
7081 SDValue N1 = N.getOperand(1);
7082 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&((N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
"Unexpected input value type") ? static_cast<void> (0)
: __assert_fail ("N0.getValueType().getVectorNumElements() == (NumElts / 2) && N1.getValueType().getVectorNumElements() == (NumElts / 2) && \"Unexpected input value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7084, __PRETTY_FUNCTION__))
7083 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&((N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
"Unexpected input value type") ? static_cast<void> (0)
: __assert_fail ("N0.getValueType().getVectorNumElements() == (NumElts / 2) && N1.getValueType().getVectorNumElements() == (NumElts / 2) && \"Unexpected input value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7084, __PRETTY_FUNCTION__))
7084 "Unexpected input value type")((N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
"Unexpected input value type") ? static_cast<void> (0)
: __assert_fail ("N0.getValueType().getVectorNumElements() == (NumElts / 2) && N1.getValueType().getVectorNumElements() == (NumElts / 2) && \"Unexpected input value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7084, __PRETTY_FUNCTION__))
;
7085
7086 APInt EltsLHS, EltsRHS;
7087 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
7088
7089 // If we know input saturation won't happen we can treat this
7090 // as a truncation shuffle.
7091 if (Opcode == X86ISD::PACKSS) {
7092 if ((!N0.isUndef() &&
7093 DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
7094 (!N1.isUndef() &&
7095 DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
7096 return false;
7097 } else {
7098 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
7099 if ((!N0.isUndef() &&
7100 !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
7101 (!N1.isUndef() &&
7102 !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
7103 return false;
7104 }
7105
7106 bool IsUnary = (N0 == N1);
7107
7108 Ops.push_back(N0);
7109 if (!IsUnary)
7110 Ops.push_back(N1);
7111
7112 createPackShuffleMask(VT, Mask, IsUnary);
7113 return true;
7114 }
7115 case X86ISD::VSHLI:
7116 case X86ISD::VSRLI: {
7117 uint64_t ShiftVal = N.getConstantOperandVal(1);
7118 // Out of range bit shifts are guaranteed to be zero.
7119 if (NumBitsPerElt <= ShiftVal) {
7120 Mask.append(NumElts, SM_SentinelZero);
7121 return true;
7122 }
7123
7124 // We can only decode 'whole byte' bit shifts as shuffles.
7125 if ((ShiftVal % 8) != 0)
7126 break;
7127
7128 uint64_t ByteShift = ShiftVal / 8;
7129 unsigned NumBytes = NumSizeInBits / 8;
7130 unsigned NumBytesPerElt = NumBitsPerElt / 8;
7131 Ops.push_back(N.getOperand(0));
7132
7133 // Clear mask to all zeros and insert the shifted byte indices.
7134 Mask.append(NumBytes, SM_SentinelZero);
7135
7136 if (X86ISD::VSHLI == Opcode) {
7137 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7138 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7139 Mask[i + j] = i + j - ByteShift;
7140 } else {
7141 for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt)
7142 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7143 Mask[i + j - ByteShift] = i + j;
7144 }
7145 return true;
7146 }
7147 case X86ISD::VBROADCAST: {
7148 SDValue Src = N.getOperand(0);
7149 MVT SrcVT = Src.getSimpleValueType();
7150 if (!SrcVT.isVector())
7151 return false;
7152
7153 if (NumSizeInBits != SrcVT.getSizeInBits()) {
7154 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&(((NumSizeInBits % SrcVT.getSizeInBits()) == 0 && "Illegal broadcast type"
) ? static_cast<void> (0) : __assert_fail ("(NumSizeInBits % SrcVT.getSizeInBits()) == 0 && \"Illegal broadcast type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7155, __PRETTY_FUNCTION__))
7155 "Illegal broadcast type")(((NumSizeInBits % SrcVT.getSizeInBits()) == 0 && "Illegal broadcast type"
) ? static_cast<void> (0) : __assert_fail ("(NumSizeInBits % SrcVT.getSizeInBits()) == 0 && \"Illegal broadcast type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7155, __PRETTY_FUNCTION__))
;
7156 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
7157 NumSizeInBits / SrcVT.getScalarSizeInBits());
7158 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7159 DAG.getUNDEF(SrcVT), Src,
7160 DAG.getIntPtrConstant(0, SDLoc(N)));
7161 }
7162
7163 Ops.push_back(Src);
7164 Mask.append(NumElts, 0);
7165 return true;
7166 }
7167 case ISD::ZERO_EXTEND:
7168 case ISD::ANY_EXTEND:
7169 case ISD::ZERO_EXTEND_VECTOR_INREG:
7170 case ISD::ANY_EXTEND_VECTOR_INREG: {
7171 SDValue Src = N.getOperand(0);
7172 EVT SrcVT = Src.getValueType();
7173
7174 // Extended source must be a simple vector.
7175 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7176 (SrcVT.getScalarSizeInBits() % 8) != 0)
7177 return false;
7178
7179 unsigned NumSrcBitsPerElt = SrcVT.getScalarSizeInBits();
7180 bool IsAnyExtend =
7181 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
7182 DecodeZeroExtendMask(NumSrcBitsPerElt, NumBitsPerElt, NumElts, IsAnyExtend,
7183 Mask);
7184
7185 if (NumSizeInBits != SrcVT.getSizeInBits()) {
7186 assert((NumSizeInBits % SrcVT.getSizeInBits()) == 0 &&(((NumSizeInBits % SrcVT.getSizeInBits()) == 0 && "Illegal zero-extension type"
) ? static_cast<void> (0) : __assert_fail ("(NumSizeInBits % SrcVT.getSizeInBits()) == 0 && \"Illegal zero-extension type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7187, __PRETTY_FUNCTION__))
7187 "Illegal zero-extension type")(((NumSizeInBits % SrcVT.getSizeInBits()) == 0 && "Illegal zero-extension type"
) ? static_cast<void> (0) : __assert_fail ("(NumSizeInBits % SrcVT.getSizeInBits()) == 0 && \"Illegal zero-extension type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7187, __PRETTY_FUNCTION__))
;
7188 SrcVT = MVT::getVectorVT(SrcVT.getSimpleVT().getScalarType(),
7189 NumSizeInBits / NumSrcBitsPerElt);
7190 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), SrcVT,
7191 DAG.getUNDEF(SrcVT), Src,
7192 DAG.getIntPtrConstant(0, SDLoc(N)));
7193 }
7194
7195 Ops.push_back(Src);
7196 return true;
7197 }
7198 }
7199
7200 return false;
7201}
7202
7203/// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
7204static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
7205 SmallVectorImpl<int> &Mask) {
7206 int MaskWidth = Mask.size();
7207 SmallVector<SDValue, 16> UsedInputs;
7208 for (int i = 0, e = Inputs.size(); i < e; ++i) {
7209 int lo = UsedInputs.size() * MaskWidth;
7210 int hi = lo + MaskWidth;
7211
7212 // Strip UNDEF input usage.
7213 if (Inputs[i].isUndef())
7214 for (int &M : Mask)
7215 if ((lo <= M) && (M < hi))
7216 M = SM_SentinelUndef;
7217
7218 // Check for unused inputs.
7219 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
7220 for (int &M : Mask)
7221 if (lo <= M)
7222 M -= MaskWidth;
7223 continue;
7224 }
7225
7226 // Check for repeated inputs.
7227 bool IsRepeat = false;
7228 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
7229 if (UsedInputs[j] != Inputs[i])
7230 continue;
7231 for (int &M : Mask)
7232 if (lo <= M)
7233 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
7234 IsRepeat = true;
7235 break;
7236 }
7237 if (IsRepeat)
7238 continue;
7239
7240 UsedInputs.push_back(Inputs[i]);
7241 }
7242 Inputs = UsedInputs;
7243}
7244
7245/// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
7246/// and then sets the SM_SentinelUndef and SM_SentinelZero values.
7247/// Returns true if the target shuffle mask was decoded.
7248static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
7249 SmallVectorImpl<SDValue> &Inputs,
7250 SmallVectorImpl<int> &Mask,
7251 APInt &KnownUndef, APInt &KnownZero,
7252 SelectionDAG &DAG, unsigned Depth,
7253 bool ResolveKnownElts) {
7254 EVT VT = Op.getValueType();
7255 if (!VT.isSimple() || !VT.isVector())
25
Calling 'EVT::isSimple'
27
Returning from 'EVT::isSimple'
28
Calling 'EVT::isVector'
34
Returning from 'EVT::isVector'
35
Taking false branch
7256 return false;
7257
7258 if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
36
Value assigned to 'OpMask.Size'
37
Assuming the condition is true
38
Taking true branch
7259 for (int i = 0, e = Mask.size(); i != e; ++i) {
39
Assuming 'i' is equal to 'e'
40
Loop condition is false. Execution continues on line 7268
7260 int &M = Mask[i];
7261 if (M < 0 || !ResolveKnownElts)
7262 continue;
7263 if (KnownUndef[i])
7264 M = SM_SentinelUndef;
7265 else if (KnownZero[i])
7266 M = SM_SentinelZero;
7267 }
7268 return true;
41
Returning the value 1, which participates in a condition later
7269 }
7270 if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
7271 ResolveKnownElts)) {
7272 KnownUndef = KnownZero = APInt::getNullValue(Mask.size());
7273 for (int i = 0, e = Mask.size(); i != e; ++i) {
7274 int M = Mask[i];
7275 if (SM_SentinelUndef == M)
7276 KnownUndef.setBit(i);
7277 if (SM_SentinelZero == M)
7278 KnownZero.setBit(i);
7279 }
7280 return true;
7281 }
7282 return false;
7283}
7284
7285static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7286 SmallVectorImpl<int> &Mask,
7287 SelectionDAG &DAG, unsigned Depth = 0,
7288 bool ResolveKnownElts = true) {
7289 EVT VT = Op.getValueType();
7290 if (!VT.isSimple() || !VT.isVector())
13
Calling 'EVT::isSimple'
15
Returning from 'EVT::isSimple'
16
Calling 'EVT::isVector'
22
Returning from 'EVT::isVector'
23
Taking false branch
7291 return false;
7292
7293 APInt KnownUndef, KnownZero;
7294 unsigned NumElts = Op.getValueType().getVectorNumElements();
7295 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
7296 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
24
Calling 'getTargetShuffleInputs'
42
Returning from 'getTargetShuffleInputs'
43
Returning the value 1, which participates in a condition later
7297 KnownZero, DAG, Depth, ResolveKnownElts);
7298}
7299
7300/// Returns the scalar element that will make up the ith
7301/// element of the result of the vector shuffle.
7302static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
7303 unsigned Depth) {
7304 if (Depth == 6)
7305 return SDValue(); // Limit search depth.
7306
7307 SDValue V = SDValue(N, 0);
7308 EVT VT = V.getValueType();
7309 unsigned Opcode = V.getOpcode();
7310
7311 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
7312 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
7313 int Elt = SV->getMaskElt(Index);
7314
7315 if (Elt < 0)
7316 return DAG.getUNDEF(VT.getVectorElementType());
7317
7318 unsigned NumElems = VT.getVectorNumElements();
7319 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
7320 : SV->getOperand(1);
7321 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
7322 }
7323
7324 // Recurse into target specific vector shuffles to find scalars.
7325 if (isTargetShuffle(Opcode)) {
7326 MVT ShufVT = V.getSimpleValueType();
7327 MVT ShufSVT = ShufVT.getVectorElementType();
7328 int NumElems = (int)ShufVT.getVectorNumElements();
7329 SmallVector<int, 16> ShuffleMask;
7330 SmallVector<SDValue, 16> ShuffleOps;
7331 bool IsUnary;
7332
7333 if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary))
7334 return SDValue();
7335
7336 int Elt = ShuffleMask[Index];
7337 if (Elt == SM_SentinelZero)
7338 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT)
7339 : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT);
7340 if (Elt == SM_SentinelUndef)
7341 return DAG.getUNDEF(ShufSVT);
7342
7343 assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range")((0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range"
) ? static_cast<void> (0) : __assert_fail ("0 <= Elt && Elt < (2*NumElems) && \"Shuffle index out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7343, __PRETTY_FUNCTION__))
;
7344 SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
7345 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
7346 Depth+1);
7347 }
7348
7349 // Recurse into insert_subvector base/sub vector to find scalars.
7350 if (Opcode == ISD::INSERT_SUBVECTOR &&
7351 isa<ConstantSDNode>(N->getOperand(2))) {
7352 SDValue Vec = N->getOperand(0);
7353 SDValue Sub = N->getOperand(1);
7354 EVT SubVT = Sub.getValueType();
7355 unsigned NumSubElts = SubVT.getVectorNumElements();
7356 uint64_t SubIdx = N->getConstantOperandVal(2);
7357
7358 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
7359 return getShuffleScalarElt(Sub.getNode(), Index - SubIdx, DAG, Depth + 1);
7360 return getShuffleScalarElt(Vec.getNode(), Index, DAG, Depth + 1);
7361 }
7362
7363 // Recurse into extract_subvector src vector to find scalars.
7364 if (Opcode == ISD::EXTRACT_SUBVECTOR &&
7365 isa<ConstantSDNode>(N->getOperand(1))) {
7366 SDValue Src = N->getOperand(0);
7367 uint64_t SrcIdx = N->getConstantOperandVal(1);
7368 return getShuffleScalarElt(Src.getNode(), Index + SrcIdx, DAG, Depth + 1);
7369 }
7370
7371 // Actual nodes that may contain scalar elements
7372 if (Opcode == ISD::BITCAST) {
7373 V = V.getOperand(0);
7374 EVT SrcVT = V.getValueType();
7375 unsigned NumElems = VT.getVectorNumElements();
7376
7377 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
7378 return SDValue();
7379 }
7380
7381 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
7382 return (Index == 0) ? V.getOperand(0)
7383 : DAG.getUNDEF(VT.getVectorElementType());
7384
7385 if (V.getOpcode() == ISD::BUILD_VECTOR)
7386 return V.getOperand(Index);
7387
7388 return SDValue();
7389}
7390
7391// Use PINSRB/PINSRW/PINSRD to create a build vector.
7392static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
7393 unsigned NumNonZero, unsigned NumZero,
7394 SelectionDAG &DAG,
7395 const X86Subtarget &Subtarget) {
7396 MVT VT = Op.getSimpleValueType();
7397 unsigned NumElts = VT.getVectorNumElements();
7398 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||((((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT ==
MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41
())) && "Illegal vector insertion") ? static_cast<
void> (0) : __assert_fail ("((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) && \"Illegal vector insertion\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7400, __PRETTY_FUNCTION__))
7399 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&((((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT ==
MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41
())) && "Illegal vector insertion") ? static_cast<
void> (0) : __assert_fail ("((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) && \"Illegal vector insertion\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7400, __PRETTY_FUNCTION__))
7400 "Illegal vector insertion")((((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT ==
MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41
())) && "Illegal vector insertion") ? static_cast<
void> (0) : __assert_fail ("((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) && \"Illegal vector insertion\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7400, __PRETTY_FUNCTION__))
;
7401
7402 SDLoc dl(Op);
7403 SDValue V;
7404 bool First = true;
7405
7406 for (unsigned i = 0; i < NumElts; ++i) {
7407 bool IsNonZero = (NonZeros & (1 << i)) != 0;
7408 if (!IsNonZero)
7409 continue;
7410
7411 // If the build vector contains zeros or our first insertion is not the
7412 // first index then insert into zero vector to break any register
7413 // dependency else use SCALAR_TO_VECTOR.
7414 if (First) {
7415 First = false;
7416 if (NumZero || 0 != i)
7417 V = getZeroVector(VT, Subtarget, DAG, dl);
7418 else {
7419 assert(0 == i && "Expected insertion into zero-index")((0 == i && "Expected insertion into zero-index") ? static_cast
<void> (0) : __assert_fail ("0 == i && \"Expected insertion into zero-index\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7419, __PRETTY_FUNCTION__))
;
7420 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7421 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
7422 V = DAG.getBitcast(VT, V);
7423 continue;
7424 }
7425 }
7426 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
7427 DAG.getIntPtrConstant(i, dl));
7428 }
7429
7430 return V;
7431}
7432
7433/// Custom lower build_vector of v16i8.
7434static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
7435 unsigned NumNonZero, unsigned NumZero,
7436 SelectionDAG &DAG,
7437 const X86Subtarget &Subtarget) {
7438 if (NumNonZero > 8 && !Subtarget.hasSSE41())
7439 return SDValue();
7440
7441 // SSE4.1 - use PINSRB to insert each byte directly.
7442 if (Subtarget.hasSSE41())
7443 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7444 Subtarget);
7445
7446 SDLoc dl(Op);
7447 SDValue V;
7448
7449 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
7450 for (unsigned i = 0; i < 16; i += 2) {
7451 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
7452 bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
7453 if (!ThisIsNonZero && !NextIsNonZero)
7454 continue;
7455
7456 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
7457 SDValue Elt;
7458 if (ThisIsNonZero) {
7459 if (NumZero || NextIsNonZero)
7460 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7461 else
7462 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7463 }
7464
7465 if (NextIsNonZero) {
7466 SDValue NextElt = Op.getOperand(i + 1);
7467 if (i == 0 && NumZero)
7468 NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
7469 else
7470 NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
7471 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
7472 DAG.getConstant(8, dl, MVT::i8));
7473 if (ThisIsNonZero)
7474 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
7475 else
7476 Elt = NextElt;
7477 }
7478
7479 // If our first insertion is not the first index then insert into zero
7480 // vector to break any register dependency else use SCALAR_TO_VECTOR.
7481 if (!V) {
7482 if (i != 0)
7483 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
7484 else {
7485 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
7486 V = DAG.getBitcast(MVT::v8i16, V);
7487 continue;
7488 }
7489 }
7490 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
7491 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
7492 DAG.getIntPtrConstant(i / 2, dl));
7493 }
7494
7495 return DAG.getBitcast(MVT::v16i8, V);
7496}
7497
7498/// Custom lower build_vector of v8i16.
7499static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
7500 unsigned NumNonZero, unsigned NumZero,
7501 SelectionDAG &DAG,
7502 const X86Subtarget &Subtarget) {
7503 if (NumNonZero > 4 && !Subtarget.hasSSE41())
7504 return SDValue();
7505
7506 // Use PINSRW to insert each byte directly.
7507 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7508 Subtarget);
7509}
7510
7511/// Custom lower build_vector of v4i32 or v4f32.
7512static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
7513 const X86Subtarget &Subtarget) {
7514 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
7515 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
7516 // Because we're creating a less complicated build vector here, we may enable
7517 // further folding of the MOVDDUP via shuffle transforms.
7518 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
7519 Op.getOperand(0) == Op.getOperand(2) &&
7520 Op.getOperand(1) == Op.getOperand(3) &&
7521 Op.getOperand(0) != Op.getOperand(1)) {
7522 SDLoc DL(Op);
7523 MVT VT = Op.getSimpleValueType();
7524 MVT EltVT = VT.getVectorElementType();
7525 // Create a new build vector with the first 2 elements followed by undef
7526 // padding, bitcast to v2f64, duplicate, and bitcast back.
7527 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
7528 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
7529 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
7530 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
7531 return DAG.getBitcast(VT, Dup);
7532 }
7533
7534 // Find all zeroable elements.
7535 std::bitset<4> Zeroable, Undefs;
7536 for (int i = 0; i < 4; ++i) {
7537 SDValue Elt = Op.getOperand(i);
7538 Undefs[i] = Elt.isUndef();
7539 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
7540 }
7541 assert(Zeroable.size() - Zeroable.count() > 1 &&((Zeroable.size() - Zeroable.count() > 1 && "We expect at least two non-zero elements!"
) ? static_cast<void> (0) : __assert_fail ("Zeroable.size() - Zeroable.count() > 1 && \"We expect at least two non-zero elements!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7542, __PRETTY_FUNCTION__))
7542 "We expect at least two non-zero elements!")((Zeroable.size() - Zeroable.count() > 1 && "We expect at least two non-zero elements!"
) ? static_cast<void> (0) : __assert_fail ("Zeroable.size() - Zeroable.count() > 1 && \"We expect at least two non-zero elements!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7542, __PRETTY_FUNCTION__))
;
7543
7544 // We only know how to deal with build_vector nodes where elements are either
7545 // zeroable or extract_vector_elt with constant index.
7546 SDValue FirstNonZero;
7547 unsigned FirstNonZeroIdx;
7548 for (unsigned i = 0; i < 4; ++i) {
7549 if (Zeroable[i])
7550 continue;
7551 SDValue Elt = Op.getOperand(i);
7552 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7553 !isa<ConstantSDNode>(Elt.getOperand(1)))
7554 return SDValue();
7555 // Make sure that this node is extracting from a 128-bit vector.
7556 MVT VT = Elt.getOperand(0).getSimpleValueType();
7557 if (!VT.is128BitVector())
7558 return SDValue();
7559 if (!FirstNonZero.getNode()) {
7560 FirstNonZero = Elt;
7561 FirstNonZeroIdx = i;
7562 }
7563 }
7564
7565 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!")((FirstNonZero.getNode() && "Unexpected build vector of all zeros!"
) ? static_cast<void> (0) : __assert_fail ("FirstNonZero.getNode() && \"Unexpected build vector of all zeros!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7565, __PRETTY_FUNCTION__))
;
7566 SDValue V1 = FirstNonZero.getOperand(0);
7567 MVT VT = V1.getSimpleValueType();
7568
7569 // See if this build_vector can be lowered as a blend with zero.
7570 SDValue Elt;
7571 unsigned EltMaskIdx, EltIdx;
7572 int Mask[4];
7573 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
7574 if (Zeroable[EltIdx]) {
7575 // The zero vector will be on the right hand side.
7576 Mask[EltIdx] = EltIdx+4;
7577 continue;
7578 }
7579
7580 Elt = Op->getOperand(EltIdx);
7581 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
7582 EltMaskIdx = Elt.getConstantOperandVal(1);
7583 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
7584 break;
7585 Mask[EltIdx] = EltIdx;
7586 }
7587
7588 if (EltIdx == 4) {
7589 // Let the shuffle legalizer deal with blend operations.
7590 SDValue VZeroOrUndef = (Zeroable == Undefs)
7591 ? DAG.getUNDEF(VT)
7592 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
7593 if (V1.getSimpleValueType() != VT)
7594 V1 = DAG.getBitcast(VT, V1);
7595 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
7596 }
7597
7598 // See if we can lower this build_vector to a INSERTPS.
7599 if (!Subtarget.hasSSE41())
7600 return SDValue();
7601
7602 SDValue V2 = Elt.getOperand(0);
7603 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
7604 V1 = SDValue();
7605
7606 bool CanFold = true;
7607 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
7608 if (Zeroable[i])
7609 continue;
7610
7611 SDValue Current = Op->getOperand(i);
7612 SDValue SrcVector = Current->getOperand(0);
7613 if (!V1.getNode())
7614 V1 = SrcVector;
7615 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
7616 }
7617
7618 if (!CanFold)
7619 return SDValue();
7620
7621 assert(V1.getNode() && "Expected at least two non-zero elements!")((V1.getNode() && "Expected at least two non-zero elements!"
) ? static_cast<void> (0) : __assert_fail ("V1.getNode() && \"Expected at least two non-zero elements!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7621, __PRETTY_FUNCTION__))
;
7622 if (V1.getSimpleValueType() != MVT::v4f32)
7623 V1 = DAG.getBitcast(MVT::v4f32, V1);
7624 if (V2.getSimpleValueType() != MVT::v4f32)
7625 V2 = DAG.getBitcast(MVT::v4f32, V2);
7626
7627 // Ok, we can emit an INSERTPS instruction.
7628 unsigned ZMask = Zeroable.to_ulong();
7629
7630 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
7631 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!")(((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!"
) ? static_cast<void> (0) : __assert_fail ("(InsertPSMask & ~0xFFu) == 0 && \"Invalid mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7631, __PRETTY_FUNCTION__))
;
7632 SDLoc DL(Op);
7633 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
7634 DAG.getIntPtrConstant(InsertPSMask, DL, true));
7635 return DAG.getBitcast(VT, Result);
7636}
7637
7638/// Return a vector logical shift node.
7639static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
7640 SelectionDAG &DAG, const TargetLowering &TLI,
7641 const SDLoc &dl) {
7642 assert(VT.is128BitVector() && "Unknown type for VShift")((VT.is128BitVector() && "Unknown type for VShift") ?
static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Unknown type for VShift\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7642, __PRETTY_FUNCTION__))
;
7643 MVT ShVT = MVT::v16i8;
7644 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
7645 SrcOp = DAG.getBitcast(ShVT, SrcOp);
7646 assert(NumBits % 8 == 0 && "Only support byte sized shifts")((NumBits % 8 == 0 && "Only support byte sized shifts"
) ? static_cast<void> (0) : __assert_fail ("NumBits % 8 == 0 && \"Only support byte sized shifts\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7646, __PRETTY_FUNCTION__))
;
7647 SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
7648 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
7649}
7650
7651static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
7652 SelectionDAG &DAG) {
7653
7654 // Check if the scalar load can be widened into a vector load. And if
7655 // the address is "base + cst" see if the cst can be "absorbed" into
7656 // the shuffle mask.
7657 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
7658 SDValue Ptr = LD->getBasePtr();
7659 if (!ISD::isNormalLoad(LD) || !LD->isSimple())
7660 return SDValue();
7661 EVT PVT = LD->getValueType(0);
7662 if (PVT != MVT::i32 && PVT != MVT::f32)
7663 return SDValue();
7664
7665 int FI = -1;
7666 int64_t Offset = 0;
7667 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
7668 FI = FINode->getIndex();
7669 Offset = 0;
7670 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
7671 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
7672 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
7673 Offset = Ptr.getConstantOperandVal(1);
7674 Ptr = Ptr.getOperand(0);
7675 } else {
7676 return SDValue();
7677 }
7678
7679 // FIXME: 256-bit vector instructions don't require a strict alignment,
7680 // improve this code to support it better.
7681 unsigned RequiredAlign = VT.getSizeInBits()/8;
7682 SDValue Chain = LD->getChain();
7683 // Make sure the stack object alignment is at least 16 or 32.
7684 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7685 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
7686 if (MFI.isFixedObjectIndex(FI)) {
7687 // Can't change the alignment. FIXME: It's possible to compute
7688 // the exact stack offset and reference FI + adjust offset instead.
7689 // If someone *really* cares about this. That's the way to implement it.
7690 return SDValue();
7691 } else {
7692 MFI.setObjectAlignment(FI, RequiredAlign);
7693 }
7694 }
7695
7696 // (Offset % 16 or 32) must be multiple of 4. Then address is then
7697 // Ptr + (Offset & ~15).
7698 if (Offset < 0)
7699 return SDValue();
7700 if ((Offset % RequiredAlign) & 3)
7701 return SDValue();
7702 int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1);
7703 if (StartOffset) {
7704 SDLoc DL(Ptr);
7705 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
7706 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
7707 }
7708
7709 int EltNo = (Offset - StartOffset) >> 2;
7710 unsigned NumElems = VT.getVectorNumElements();
7711
7712 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
7713 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
7714 LD->getPointerInfo().getWithOffset(StartOffset));
7715
7716 SmallVector<int, 8> Mask(NumElems, EltNo);
7717
7718 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
7719 }
7720
7721 return SDValue();
7722}
7723
7724// Recurse to find a LoadSDNode source and the accumulated ByteOffest.
7725static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
7726 if (ISD::isNON_EXTLoad(Elt.getNode())) {
7727 auto *BaseLd = cast<LoadSDNode>(Elt);
7728 if (!BaseLd->isSimple())
7729 return false;
7730 Ld = BaseLd;
7731 ByteOffset = 0;
7732 return true;
7733 }
7734
7735 switch (Elt.getOpcode()) {
7736 case ISD::BITCAST:
7737 case ISD::TRUNCATE:
7738 case ISD::SCALAR_TO_VECTOR:
7739 return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
7740 case ISD::SRL:
7741 if (isa<ConstantSDNode>(Elt.getOperand(1))) {
7742 uint64_t Idx = Elt.getConstantOperandVal(1);
7743 if ((Idx % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
7744 ByteOffset += Idx / 8;
7745 return true;
7746 }
7747 }
7748 break;
7749 case ISD::EXTRACT_VECTOR_ELT:
7750 if (isa<ConstantSDNode>(Elt.getOperand(1))) {
7751 SDValue Src = Elt.getOperand(0);
7752 unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
7753 unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
7754 if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
7755 findEltLoadSrc(Src, Ld, ByteOffset)) {
7756 uint64_t Idx = Elt.getConstantOperandVal(1);
7757 ByteOffset += Idx * (SrcSizeInBits / 8);
7758 return true;
7759 }
7760 }
7761 break;
7762 }
7763
7764 return false;
7765}
7766
7767/// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
7768/// elements can be replaced by a single large load which has the same value as
7769/// a build_vector or insert_subvector whose loaded operands are 'Elts'.
7770///
7771/// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
7772static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
7773 const SDLoc &DL, SelectionDAG &DAG,
7774 const X86Subtarget &Subtarget,
7775 bool isAfterLegalize) {
7776 if ((VT.getScalarSizeInBits() % 8) != 0)
7777 return SDValue();
7778
7779 unsigned NumElems = Elts.size();
7780
7781 int LastLoadedElt = -1;
7782 APInt LoadMask = APInt::getNullValue(NumElems);
7783 APInt ZeroMask = APInt::getNullValue(NumElems);
7784 APInt UndefMask = APInt::getNullValue(NumElems);
7785
7786 SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
7787 SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
7788
7789 // For each element in the initializer, see if we've found a load, zero or an
7790 // undef.
7791 for (unsigned i = 0; i < NumElems; ++i) {
7792 SDValue Elt = peekThroughBitcasts(Elts[i]);
7793 if (!Elt.getNode())
7794 return SDValue();
7795 if (Elt.isUndef()) {
7796 UndefMask.setBit(i);
7797 continue;
7798 }
7799 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
7800 ZeroMask.setBit(i);
7801 continue;
7802 }
7803
7804 // Each loaded element must be the correct fractional portion of the
7805 // requested vector load.
7806 unsigned EltSizeInBits = Elt.getValueSizeInBits();
7807 if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
7808 return SDValue();
7809
7810 if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
7811 return SDValue();
7812 unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
7813 if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
7814 return SDValue();
7815
7816 LoadMask.setBit(i);
7817 LastLoadedElt = i;
7818 }
7819 assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +(((ZeroMask.countPopulation() + UndefMask.countPopulation() +
LoadMask.countPopulation()) == NumElems && "Incomplete element masks"
) ? static_cast<void> (0) : __assert_fail ("(ZeroMask.countPopulation() + UndefMask.countPopulation() + LoadMask.countPopulation()) == NumElems && \"Incomplete element masks\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7821, __PRETTY_FUNCTION__))
7820 LoadMask.countPopulation()) == NumElems &&(((ZeroMask.countPopulation() + UndefMask.countPopulation() +
LoadMask.countPopulation()) == NumElems && "Incomplete element masks"
) ? static_cast<void> (0) : __assert_fail ("(ZeroMask.countPopulation() + UndefMask.countPopulation() + LoadMask.countPopulation()) == NumElems && \"Incomplete element masks\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7821, __PRETTY_FUNCTION__))
7821 "Incomplete element masks")(((ZeroMask.countPopulation() + UndefMask.countPopulation() +
LoadMask.countPopulation()) == NumElems && "Incomplete element masks"
) ? static_cast<void> (0) : __assert_fail ("(ZeroMask.countPopulation() + UndefMask.countPopulation() + LoadMask.countPopulation()) == NumElems && \"Incomplete element masks\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7821, __PRETTY_FUNCTION__))
;
7822
7823 // Handle Special Cases - all undef or undef/zero.
7824 if (UndefMask.countPopulation() == NumElems)
7825 return DAG.getUNDEF(VT);
7826
7827 // FIXME: Should we return this as a BUILD_VECTOR instead?
7828 if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
7829 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
7830 : DAG.getConstantFP(0.0, DL, VT);
7831
7832 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7833 int FirstLoadedElt = LoadMask.countTrailingZeros();
7834 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
7835 EVT EltBaseVT = EltBase.getValueType();
7836 assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&((EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits()
&& "Register/Memory size mismatch") ? static_cast<
void> (0) : __assert_fail ("EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() && \"Register/Memory size mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7837, __PRETTY_FUNCTION__))
7837 "Register/Memory size mismatch")((EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits()
&& "Register/Memory size mismatch") ? static_cast<
void> (0) : __assert_fail ("EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() && \"Register/Memory size mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7837, __PRETTY_FUNCTION__))
;
7838 LoadSDNode *LDBase = Loads[FirstLoadedElt];
7839 assert(LDBase && "Did not find base load for merging consecutive loads")((LDBase && "Did not find base load for merging consecutive loads"
) ? static_cast<void> (0) : __assert_fail ("LDBase && \"Did not find base load for merging consecutive loads\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7839, __PRETTY_FUNCTION__))
;
7840 unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
7841 unsigned BaseSizeInBytes = BaseSizeInBits / 8;
7842 int LoadSizeInBits = (1 + LastLoadedElt - FirstLoadedElt) * BaseSizeInBits;
7843 assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected")(((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected"
) ? static_cast<void> (0) : __assert_fail ("(BaseSizeInBits % 8) == 0 && \"Sub-byte element loads detected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7843, __PRETTY_FUNCTION__))
;
7844
7845 // TODO: Support offsetting the base load.
7846 if (ByteOffsets[FirstLoadedElt] != 0)
7847 return SDValue();
7848
7849 // Check to see if the element's load is consecutive to the base load
7850 // or offset from a previous (already checked) load.
7851 auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
7852 LoadSDNode *Ld = Loads[EltIdx];
7853 int64_t ByteOffset = ByteOffsets[EltIdx];
7854 if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
7855 int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
7856 return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
7857 Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
7858 }
7859 return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
7860 EltIdx - FirstLoadedElt);
7861 };
7862
7863 // Consecutive loads can contain UNDEFS but not ZERO elements.
7864 // Consecutive loads with UNDEFs and ZEROs elements require a
7865 // an additional shuffle stage to clear the ZERO elements.
7866 bool IsConsecutiveLoad = true;
7867 bool IsConsecutiveLoadWithZeros = true;
7868 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
7869 if (LoadMask[i]) {
7870 if (!CheckConsecutiveLoad(LDBase, i)) {
7871 IsConsecutiveLoad = false;
7872 IsConsecutiveLoadWithZeros = false;
7873 break;
7874 }
7875 } else if (ZeroMask[i]) {
7876 IsConsecutiveLoad = false;
7877 }
7878 }
7879
7880 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
7881 auto MMOFlags = LDBase->getMemOperand()->getFlags();
7882 assert(LDBase->isSimple() &&((LDBase->isSimple() && "Cannot merge volatile or atomic loads."
) ? static_cast<void> (0) : __assert_fail ("LDBase->isSimple() && \"Cannot merge volatile or atomic loads.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7883, __PRETTY_FUNCTION__))
7883 "Cannot merge volatile or atomic loads.")((LDBase->isSimple() && "Cannot merge volatile or atomic loads."
) ? static_cast<void> (0) : __assert_fail ("LDBase->isSimple() && \"Cannot merge volatile or atomic loads.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 7883, __PRETTY_FUNCTION__))
;
7884 SDValue NewLd =
7885 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
7886 LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags);
7887 for (auto *LD : Loads)
7888 if (LD)
7889 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
7890 return NewLd;
7891 };
7892
7893 // Check if the base load is entirely dereferenceable.
7894 bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
7895 VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
7896
7897 // LOAD - all consecutive load/undefs (must start/end with a load or be
7898 // entirely dereferenceable). If we have found an entire vector of loads and
7899 // undefs, then return a large load of the entire vector width starting at the
7900 // base pointer. If the vector contains zeros, then attempt to shuffle those
7901 // elements.
7902 if (FirstLoadedElt == 0 &&
7903 (LastLoadedElt == (int)(NumElems - 1) || IsDereferenceable) &&
7904 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
7905 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
7906 return SDValue();
7907
7908 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
7909 // will lower to regular temporal loads and use the cache.
7910 if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
7911 VT.is256BitVector() && !Subtarget.hasInt256())
7912 return SDValue();
7913
7914 if (NumElems == 1)
7915 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
7916
7917 if (!ZeroMask)
7918 return CreateLoad(VT, LDBase);
7919
7920 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
7921 // vector and a zero vector to clear out the zero elements.
7922 if (!isAfterLegalize && VT.isVector()) {
7923 unsigned NumMaskElts = VT.getVectorNumElements();
7924 if ((NumMaskElts % NumElems) == 0) {
7925 unsigned Scale = NumMaskElts / NumElems;
7926 SmallVector<int, 4> ClearMask(NumMaskElts, -1);
7927 for (unsigned i = 0; i < NumElems; ++i) {
7928 if (UndefMask[i])
7929 continue;
7930 int Offset = ZeroMask[i] ? NumMaskElts : 0;
7931 for (unsigned j = 0; j != Scale; ++j)
7932 ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
7933 }
7934 SDValue V = CreateLoad(VT, LDBase);
7935 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
7936 : DAG.getConstantFP(0.0, DL, VT);
7937 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
7938 }
7939 }
7940 }
7941
7942 // If the upper half of a ymm/zmm load is undef then just load the lower half.
7943 if (VT.is256BitVector() || VT.is512BitVector()) {
7944 unsigned HalfNumElems = NumElems / 2;
7945 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
7946 EVT HalfVT =
7947 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
7948 SDValue HalfLD =
7949 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
7950 DAG, Subtarget, isAfterLegalize);
7951 if (HalfLD)
7952 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
7953 HalfLD, DAG.getIntPtrConstant(0, DL));
7954 }
7955 }
7956
7957 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
7958 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
7959 (LoadSizeInBits == 32 || LoadSizeInBits == 64) &&
7960 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
7961 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
7962 : MVT::getIntegerVT(LoadSizeInBits);
7963 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
7964 if (TLI.isTypeLegal(VecVT)) {
7965 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
7966 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
7967 SDValue ResNode =
7968 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT,
7969 LDBase->getPointerInfo(),
7970 LDBase->getAlignment(),
7971 MachineMemOperand::MOLoad);
7972 for (auto *LD : Loads)
7973 if (LD)
7974 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
7975 return DAG.getBitcast(VT, ResNode);
7976 }
7977 }
7978
7979 // BROADCAST - match the smallest possible repetition pattern, load that
7980 // scalar/subvector element and then broadcast to the entire vector.
7981 if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
7982 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
7983 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
7984 unsigned RepeatSize = SubElems * BaseSizeInBits;
7985 unsigned ScalarSize = std::min(RepeatSize, 64u);
7986 if (!Subtarget.hasAVX2() && ScalarSize < 32)
7987 continue;
7988
7989 bool Match = true;
7990 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
7991 for (unsigned i = 0; i != NumElems && Match; ++i) {
7992 if (!LoadMask[i])
7993 continue;
7994 SDValue Elt = peekThroughBitcasts(Elts[i]);
7995 if (RepeatedLoads[i % SubElems].isUndef())
7996 RepeatedLoads[i % SubElems] = Elt;
7997 else
7998 Match &= (RepeatedLoads[i % SubElems] == Elt);
7999 }
8000
8001 // We must have loads at both ends of the repetition.
8002 Match &= !RepeatedLoads.front().isUndef();
8003 Match &= !RepeatedLoads.back().isUndef();
8004 if (!Match)
8005 continue;
8006
8007 EVT RepeatVT =
8008 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
8009 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
8010 : EVT::getFloatingPointVT(ScalarSize);
8011 if (RepeatSize > ScalarSize)
8012 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
8013 RepeatSize / ScalarSize);
8014 EVT BroadcastVT =
8015 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
8016 VT.getSizeInBits() / ScalarSize);
8017 if (TLI.isTypeLegal(BroadcastVT)) {
8018 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
8019 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
8020 unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
8021 : X86ISD::VBROADCAST;
8022 SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
8023 return DAG.getBitcast(VT, Broadcast);
8024 }
8025 }
8026 }
8027 }
8028
8029 return SDValue();
8030}
8031
8032// Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
8033// load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
8034// are consecutive, non-overlapping, and in the right order.
8035static SDValue combineToConsecutiveLoads(EVT VT, SDNode *N, const SDLoc &DL,
8036 SelectionDAG &DAG,
8037 const X86Subtarget &Subtarget,
8038 bool isAfterLegalize) {
8039 SmallVector<SDValue, 64> Elts;
8040 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8041 if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) {
8042 Elts.push_back(Elt);
8043 continue;
8044 }
8045 return SDValue();
8046 }
8047 assert(Elts.size() == VT.getVectorNumElements())((Elts.size() == VT.getVectorNumElements()) ? static_cast<
void> (0) : __assert_fail ("Elts.size() == VT.getVectorNumElements()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8047, __PRETTY_FUNCTION__))
;
8048 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
8049 isAfterLegalize);
8050}
8051
8052static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
8053 unsigned SplatBitSize, LLVMContext &C) {
8054 unsigned ScalarSize = VT.getScalarSizeInBits();
8055 unsigned NumElm = SplatBitSize / ScalarSize;
8056
8057 SmallVector<Constant *, 32> ConstantVec;
8058 for (unsigned i = 0; i < NumElm; i++) {
8059 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
8060 Constant *Const;
8061 if (VT.isFloatingPoint()) {
8062 if (ScalarSize == 32) {
8063 Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
8064 } else {
8065 assert(ScalarSize == 64 && "Unsupported floating point scalar size")((ScalarSize == 64 && "Unsupported floating point scalar size"
) ? static_cast<void> (0) : __assert_fail ("ScalarSize == 64 && \"Unsupported floating point scalar size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8065, __PRETTY_FUNCTION__))
;
8066 Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
8067 }
8068 } else
8069 Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
8070 ConstantVec.push_back(Const);
8071 }
8072 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
8073}
8074
8075static bool isFoldableUseOfShuffle(SDNode *N) {
8076 for (auto *U : N->uses()) {
8077 unsigned Opc = U->getOpcode();
8078 // VPERMV/VPERMV3 shuffles can never fold their index operands.
8079 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
8080 return false;
8081 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
8082 return false;
8083 if (isTargetShuffle(Opc))
8084 return true;
8085 if (Opc == ISD::BITCAST) // Ignore bitcasts
8086 return isFoldableUseOfShuffle(U);
8087 if (N->hasOneUse())
8088 return true;
8089 }
8090 return false;
8091}
8092
8093// Check if the current node of build vector is a zero extended vector.
8094// // If so, return the value extended.
8095// // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
8096// // NumElt - return the number of zero extended identical values.
8097// // EltType - return the type of the value include the zero extend.
8098static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
8099 unsigned &NumElt, MVT &EltType) {
8100 SDValue ExtValue = Op->getOperand(0);
8101 unsigned NumElts = Op->getNumOperands();
8102 unsigned Delta = NumElts;
8103
8104 for (unsigned i = 1; i < NumElts; i++) {
8105 if (Op->getOperand(i) == ExtValue) {
8106 Delta = i;
8107 break;
8108 }
8109 if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
8110 return SDValue();
8111 }
8112 if (!isPowerOf2_32(Delta) || Delta == 1)
8113 return SDValue();
8114
8115 for (unsigned i = Delta; i < NumElts; i++) {
8116 if (i % Delta == 0) {
8117 if (Op->getOperand(i) != ExtValue)
8118 return SDValue();
8119 } else if (!(isNullConstant(Op->getOperand(i)) ||
8120 Op->getOperand(i).isUndef()))
8121 return SDValue();
8122 }
8123 unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
8124 unsigned ExtVTSize = EltSize * Delta;
8125 EltType = MVT::getIntegerVT(ExtVTSize);
8126 NumElt = NumElts / Delta;
8127 return ExtValue;
8128}
8129
8130/// Attempt to use the vbroadcast instruction to generate a splat value
8131/// from a splat BUILD_VECTOR which uses:
8132/// a. A single scalar load, or a constant.
8133/// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
8134///
8135/// The VBROADCAST node is returned when a pattern is found,
8136/// or SDValue() otherwise.
8137static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
8138 const X86Subtarget &Subtarget,
8139 SelectionDAG &DAG) {
8140 // VBROADCAST requires AVX.
8141 // TODO: Splats could be generated for non-AVX CPUs using SSE
8142 // instructions, but there's less potential gain for only 128-bit vectors.
8143 if (!Subtarget.hasAVX())
8144 return SDValue();
8145
8146 MVT VT = BVOp->getSimpleValueType(0);
8147 SDLoc dl(BVOp);
8148
8149 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
()) && "Unsupported vector type for broadcast.") ? static_cast
<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Unsupported vector type for broadcast.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8150, __PRETTY_FUNCTION__))
8150 "Unsupported vector type for broadcast.")(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
()) && "Unsupported vector type for broadcast.") ? static_cast
<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Unsupported vector type for broadcast.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8150, __PRETTY_FUNCTION__))
;
8151
8152 BitVector UndefElements;
8153 SDValue Ld = BVOp->getSplatValue(&UndefElements);
8154
8155 // Attempt to use VBROADCASTM
8156 // From this paterrn:
8157 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
8158 // b. t1 = (build_vector t0 t0)
8159 //
8160 // Create (VBROADCASTM v2i1 X)
8161 if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
8162 MVT EltType = VT.getScalarType();
8163 unsigned NumElts = VT.getVectorNumElements();
8164 SDValue BOperand;
8165 SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
8166 if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
8167 (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
8168 Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
8169 if (ZeroExtended)
8170 BOperand = ZeroExtended.getOperand(0);
8171 else
8172 BOperand = Ld.getOperand(0).getOperand(0);
8173 MVT MaskVT = BOperand.getSimpleValueType();
8174 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
8175 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
8176 SDValue Brdcst =
8177 DAG.getNode(X86ISD::VBROADCASTM, dl,
8178 MVT::getVectorVT(EltType, NumElts), BOperand);
8179 return DAG.getBitcast(VT, Brdcst);
8180 }
8181 }
8182 }
8183
8184 unsigned NumElts = VT.getVectorNumElements();
8185 unsigned NumUndefElts = UndefElements.count();
8186 if (!Ld || (NumElts - NumUndefElts) <= 1) {
8187 APInt SplatValue, Undef;
8188 unsigned SplatBitSize;
8189 bool HasUndef;
8190 // Check if this is a repeated constant pattern suitable for broadcasting.
8191 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
8192 SplatBitSize > VT.getScalarSizeInBits() &&
8193 SplatBitSize < VT.getSizeInBits()) {
8194 // Avoid replacing with broadcast when it's a use of a shuffle
8195 // instruction to preserve the present custom lowering of shuffles.
8196 if (isFoldableUseOfShuffle(BVOp))
8197 return SDValue();
8198 // replace BUILD_VECTOR with broadcast of the repeated constants.
8199 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8200 LLVMContext *Ctx = DAG.getContext();
8201 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
8202 if (Subtarget.hasAVX()) {
8203 if (SplatBitSize <= 64 && Subtarget.hasAVX2() &&
8204 !(SplatBitSize == 64 && Subtarget.is32Bit())) {
8205 // Splatted value can fit in one INTEGER constant in constant pool.
8206 // Load the constant and broadcast it.
8207 MVT CVT = MVT::getIntegerVT(SplatBitSize);
8208 Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
8209 Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
8210 SDValue CP = DAG.getConstantPool(C, PVT);
8211 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8212
8213 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8214 Ld = DAG.getLoad(
8215 CVT, dl, DAG.getEntryNode(), CP,
8216 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8217 Alignment);
8218 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8219 MVT::getVectorVT(CVT, Repeat), Ld);
8220 return DAG.getBitcast(VT, Brdcst);
8221 } else if (SplatBitSize == 32 || SplatBitSize == 64) {
8222 // Splatted value can fit in one FLOAT constant in constant pool.
8223 // Load the constant and broadcast it.
8224 // AVX have support for 32 and 64 bit broadcast for floats only.
8225 // No 64bit integer in 32bit subtarget.
8226 MVT CVT = MVT::getFloatingPointVT(SplatBitSize);
8227 // Lower the splat via APFloat directly, to avoid any conversion.
8228 Constant *C =
8229 SplatBitSize == 32
8230 ? ConstantFP::get(*Ctx,
8231 APFloat(APFloat::IEEEsingle(), SplatValue))
8232 : ConstantFP::get(*Ctx,
8233 APFloat(APFloat::IEEEdouble(), SplatValue));
8234 SDValue CP = DAG.getConstantPool(C, PVT);
8235 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8236
8237 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8238 Ld = DAG.getLoad(
8239 CVT, dl, DAG.getEntryNode(), CP,
8240 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8241 Alignment);
8242 SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl,
8243 MVT::getVectorVT(CVT, Repeat), Ld);
8244 return DAG.getBitcast(VT, Brdcst);
8245 } else if (SplatBitSize > 64) {
8246 // Load the vector of constants and broadcast it.
8247 MVT CVT = VT.getScalarType();
8248 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
8249 *Ctx);
8250 SDValue VCP = DAG.getConstantPool(VecC, PVT);
8251 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
8252 unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment();
8253 Ld = DAG.getLoad(
8254 MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
8255 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8256 Alignment);
8257 SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
8258 return DAG.getBitcast(VT, Brdcst);
8259 }
8260 }
8261 }
8262
8263 // If we are moving a scalar into a vector (Ld must be set and all elements
8264 // but 1 are undef) and that operation is not obviously supported by
8265 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
8266 // That's better than general shuffling and may eliminate a load to GPR and
8267 // move from scalar to vector register.
8268 if (!Ld || NumElts - NumUndefElts != 1)
8269 return SDValue();
8270 unsigned ScalarSize = Ld.getValueSizeInBits();
8271 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
8272 return SDValue();
8273 }
8274
8275 bool ConstSplatVal =
8276 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
8277
8278 // Make sure that all of the users of a non-constant load are from the
8279 // BUILD_VECTOR node.
8280 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
8281 return SDValue();
8282
8283 unsigned ScalarSize = Ld.getValueSizeInBits();
8284 bool IsGE256 = (VT.getSizeInBits() >= 256);
8285
8286 // When optimizing for size, generate up to 5 extra bytes for a broadcast
8287 // instruction to save 8 or more bytes of constant pool data.
8288 // TODO: If multiple splats are generated to load the same constant,
8289 // it may be detrimental to overall size. There needs to be a way to detect
8290 // that condition to know if this is truly a size win.
8291 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
8292
8293 // Handle broadcasting a single constant scalar from the constant pool
8294 // into a vector.
8295 // On Sandybridge (no AVX2), it is still better to load a constant vector
8296 // from the constant pool and not to broadcast it from a scalar.
8297 // But override that restriction when optimizing for size.
8298 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
8299 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
8300 EVT CVT = Ld.getValueType();
8301 assert(!CVT.isVector() && "Must not broadcast a vector type")((!CVT.isVector() && "Must not broadcast a vector type"
) ? static_cast<void> (0) : __assert_fail ("!CVT.isVector() && \"Must not broadcast a vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8301, __PRETTY_FUNCTION__))
;
8302
8303 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
8304 // For size optimization, also splat v2f64 and v2i64, and for size opt
8305 // with AVX2, also splat i8 and i16.
8306 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
8307 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8308 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
8309 const Constant *C = nullptr;
8310 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
8311 C = CI->getConstantIntValue();
8312 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
8313 C = CF->getConstantFPValue();
8314
8315 assert(C && "Invalid constant type")((C && "Invalid constant type") ? static_cast<void
> (0) : __assert_fail ("C && \"Invalid constant type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8315, __PRETTY_FUNCTION__))
;
8316
8317 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8318 SDValue CP =
8319 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
8320 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
8321 Ld = DAG.getLoad(
8322 CVT, dl, DAG.getEntryNode(), CP,
8323 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8324 Alignment);
8325
8326 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8327 }
8328 }
8329
8330 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
8331
8332 // Handle AVX2 in-register broadcasts.
8333 if (!IsLoad && Subtarget.hasInt256() &&
8334 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
8335 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8336
8337 // The scalar source must be a normal load.
8338 if (!IsLoad)
8339 return SDValue();
8340
8341 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8342 (Subtarget.hasVLX() && ScalarSize == 64))
8343 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8344
8345 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
8346 // double since there is no vbroadcastsd xmm
8347 if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) {
8348 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
8349 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8350 }
8351
8352 // Unsupported broadcast.
8353 return SDValue();
8354}
8355
8356/// For an EXTRACT_VECTOR_ELT with a constant index return the real
8357/// underlying vector and index.
8358///
8359/// Modifies \p ExtractedFromVec to the real vector and returns the real
8360/// index.
8361static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
8362 SDValue ExtIdx) {
8363 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
8364 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
8365 return Idx;
8366
8367 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
8368 // lowered this:
8369 // (extract_vector_elt (v8f32 %1), Constant<6>)
8370 // to:
8371 // (extract_vector_elt (vector_shuffle<2,u,u,u>
8372 // (extract_subvector (v8f32 %0), Constant<4>),
8373 // undef)
8374 // Constant<0>)
8375 // In this case the vector is the extract_subvector expression and the index
8376 // is 2, as specified by the shuffle.
8377 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
8378 SDValue ShuffleVec = SVOp->getOperand(0);
8379 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
8380 assert(ShuffleVecVT.getVectorElementType() ==((ShuffleVecVT.getVectorElementType() == ExtractedFromVec.getSimpleValueType
().getVectorElementType()) ? static_cast<void> (0) : __assert_fail
("ShuffleVecVT.getVectorElementType() == ExtractedFromVec.getSimpleValueType().getVectorElementType()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8381, __PRETTY_FUNCTION__))
8381 ExtractedFromVec.getSimpleValueType().getVectorElementType())((ShuffleVecVT.getVectorElementType() == ExtractedFromVec.getSimpleValueType
().getVectorElementType()) ? static_cast<void> (0) : __assert_fail
("ShuffleVecVT.getVectorElementType() == ExtractedFromVec.getSimpleValueType().getVectorElementType()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8381, __PRETTY_FUNCTION__))
;
8382
8383 int ShuffleIdx = SVOp->getMaskElt(Idx);
8384 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
8385 ExtractedFromVec = ShuffleVec;
8386 return ShuffleIdx;
8387 }
8388 return Idx;
8389}
8390
8391static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
8392 MVT VT = Op.getSimpleValueType();
8393
8394 // Skip if insert_vec_elt is not supported.
8395 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8396 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
8397 return SDValue();
8398
8399 SDLoc DL(Op);
8400 unsigned NumElems = Op.getNumOperands();
8401
8402 SDValue VecIn1;
8403 SDValue VecIn2;
8404 SmallVector<unsigned, 4> InsertIndices;
8405 SmallVector<int, 8> Mask(NumElems, -1);
8406
8407 for (unsigned i = 0; i != NumElems; ++i) {
8408 unsigned Opc = Op.getOperand(i).getOpcode();
8409
8410 if (Opc == ISD::UNDEF)
8411 continue;
8412
8413 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
8414 // Quit if more than 1 elements need inserting.
8415 if (InsertIndices.size() > 1)
8416 return SDValue();
8417
8418 InsertIndices.push_back(i);
8419 continue;
8420 }
8421
8422 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
8423 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
8424
8425 // Quit if non-constant index.
8426 if (!isa<ConstantSDNode>(ExtIdx))
8427 return SDValue();
8428 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
8429
8430 // Quit if extracted from vector of different type.
8431 if (ExtractedFromVec.getValueType() != VT)
8432 return SDValue();
8433
8434 if (!VecIn1.getNode())
8435 VecIn1 = ExtractedFromVec;
8436 else if (VecIn1 != ExtractedFromVec) {
8437 if (!VecIn2.getNode())
8438 VecIn2 = ExtractedFromVec;
8439 else if (VecIn2 != ExtractedFromVec)
8440 // Quit if more than 2 vectors to shuffle
8441 return SDValue();
8442 }
8443
8444 if (ExtractedFromVec == VecIn1)
8445 Mask[i] = Idx;
8446 else if (ExtractedFromVec == VecIn2)
8447 Mask[i] = Idx + NumElems;
8448 }
8449
8450 if (!VecIn1.getNode())
8451 return SDValue();
8452
8453 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
8454 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
8455
8456 for (unsigned Idx : InsertIndices)
8457 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
8458 DAG.getIntPtrConstant(Idx, DL));
8459
8460 return NV;
8461}
8462
8463static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) {
8464 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
Op.getScalarValueSizeInBits() == 1 && "Can not convert non-constant vector"
) ? static_cast<void> (0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && Op.getScalarValueSizeInBits() == 1 && \"Can not convert non-constant vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8466, __PRETTY_FUNCTION__))
8465 Op.getScalarValueSizeInBits() == 1 &&((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
Op.getScalarValueSizeInBits() == 1 && "Can not convert non-constant vector"
) ? static_cast<void> (0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && Op.getScalarValueSizeInBits() == 1 && \"Can not convert non-constant vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8466, __PRETTY_FUNCTION__))
8466 "Can not convert non-constant vector")((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
Op.getScalarValueSizeInBits() == 1 && "Can not convert non-constant vector"
) ? static_cast<void> (0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && Op.getScalarValueSizeInBits() == 1 && \"Can not convert non-constant vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8466, __PRETTY_FUNCTION__))
;
8467 uint64_t Immediate = 0;
8468 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8469 SDValue In = Op.getOperand(idx);
8470 if (!In.isUndef())
8471 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8472 }
8473 SDLoc dl(Op);
8474 MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8));
8475 return DAG.getConstant(Immediate, dl, VT);
8476}
8477// Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
8478static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
8479 const X86Subtarget &Subtarget) {
8480
8481 MVT VT = Op.getSimpleValueType();
8482 assert((VT.getVectorElementType() == MVT::i1) &&(((VT.getVectorElementType() == MVT::i1) && "Unexpected type in LowerBUILD_VECTORvXi1!"
) ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i1) && \"Unexpected type in LowerBUILD_VECTORvXi1!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8483, __PRETTY_FUNCTION__))
8483 "Unexpected type in LowerBUILD_VECTORvXi1!")(((VT.getVectorElementType() == MVT::i1) && "Unexpected type in LowerBUILD_VECTORvXi1!"
) ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i1) && \"Unexpected type in LowerBUILD_VECTORvXi1!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8483, __PRETTY_FUNCTION__))
;
8484
8485 SDLoc dl(Op);
8486 if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
8487 ISD::isBuildVectorAllOnes(Op.getNode()))
8488 return Op;
8489
8490 uint64_t Immediate = 0;
8491 SmallVector<unsigned, 16> NonConstIdx;
8492 bool IsSplat = true;
8493 bool HasConstElts = false;
8494 int SplatIdx = -1;
8495 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
8496 SDValue In = Op.getOperand(idx);
8497 if (In.isUndef())
8498 continue;
8499 if (!isa<ConstantSDNode>(In))
8500 NonConstIdx.push_back(idx);
8501 else {
8502 Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx;
8503 HasConstElts = true;
8504 }
8505 if (SplatIdx < 0)
8506 SplatIdx = idx;
8507 else if (In != Op.getOperand(SplatIdx))
8508 IsSplat = false;
8509 }
8510
8511 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
8512 if (IsSplat) {
8513 // The build_vector allows the scalar element to be larger than the vector
8514 // element type. We need to mask it to use as a condition unless we know
8515 // the upper bits are zero.
8516 // FIXME: Use computeKnownBits instead of checking specific opcode?
8517 SDValue Cond = Op.getOperand(SplatIdx);
8518 assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!")((Cond.getValueType() == MVT::i8 && "Unexpected VT!")
? static_cast<void> (0) : __assert_fail ("Cond.getValueType() == MVT::i8 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8518, __PRETTY_FUNCTION__))
;
8519 if (Cond.getOpcode() != ISD::SETCC)
8520 Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
8521 DAG.getConstant(1, dl, MVT::i8));
8522 return DAG.getSelect(dl, VT, Cond,
8523 DAG.getConstant(1, dl, VT),
8524 DAG.getConstant(0, dl, VT));
8525 }
8526
8527 // insert elements one by one
8528 SDValue DstVec;
8529 if (HasConstElts) {
8530 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
8531 SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
8532 SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
8533 ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
8534 ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
8535 DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
8536 } else {
8537 MVT ImmVT = MVT::getIntegerVT(std::max(VT.getSizeInBits(), 8U));
8538 SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
8539 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
8540 DstVec = DAG.getBitcast(VecVT, Imm);
8541 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
8542 DAG.getIntPtrConstant(0, dl));
8543 }
8544 } else
8545 DstVec = DAG.getUNDEF(VT);
8546
8547 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
8548 unsigned InsertIdx = NonConstIdx[i];
8549 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
8550 Op.getOperand(InsertIdx),
8551 DAG.getIntPtrConstant(InsertIdx, dl));
8552 }
8553 return DstVec;
8554}
8555
8556/// This is a helper function of LowerToHorizontalOp().
8557/// This function checks that the build_vector \p N in input implements a
8558/// 128-bit partial horizontal operation on a 256-bit vector, but that operation
8559/// may not match the layout of an x86 256-bit horizontal instruction.
8560/// In other words, if this returns true, then some extraction/insertion will
8561/// be required to produce a valid horizontal instruction.
8562///
8563/// Parameter \p Opcode defines the kind of horizontal operation to match.
8564/// For example, if \p Opcode is equal to ISD::ADD, then this function
8565/// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
8566/// is equal to ISD::SUB, then this function checks if this is a horizontal
8567/// arithmetic sub.
8568///
8569/// This function only analyzes elements of \p N whose indices are
8570/// in range [BaseIdx, LastIdx).
8571///
8572/// TODO: This function was originally used to match both real and fake partial
8573/// horizontal operations, but the index-matching logic is incorrect for that.
8574/// See the corrected implementation in isHopBuildVector(). Can we reduce this
8575/// code because it is only used for partial h-op matching now?
8576static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
8577 SelectionDAG &DAG,
8578 unsigned BaseIdx, unsigned LastIdx,
8579 SDValue &V0, SDValue &V1) {
8580 EVT VT = N->getValueType(0);
8581 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops")((VT.is256BitVector() && "Only use for matching partial 256-bit h-ops"
) ? static_cast<void> (0) : __assert_fail ("VT.is256BitVector() && \"Only use for matching partial 256-bit h-ops\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8581, __PRETTY_FUNCTION__))
;
8582 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!")((BaseIdx * 2 <= LastIdx && "Invalid Indices in input!"
) ? static_cast<void> (0) : __assert_fail ("BaseIdx * 2 <= LastIdx && \"Invalid Indices in input!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8582, __PRETTY_FUNCTION__))
;
8583 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&((VT.isVector() && VT.getVectorNumElements() >= LastIdx
&& "Invalid Vector in input!") ? static_cast<void
> (0) : __assert_fail ("VT.isVector() && VT.getVectorNumElements() >= LastIdx && \"Invalid Vector in input!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8584, __PRETTY_FUNCTION__))
8584 "Invalid Vector in input!")((VT.isVector() && VT.getVectorNumElements() >= LastIdx
&& "Invalid Vector in input!") ? static_cast<void
> (0) : __assert_fail ("VT.isVector() && VT.getVectorNumElements() >= LastIdx && \"Invalid Vector in input!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8584, __PRETTY_FUNCTION__))
;
8585
8586 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
8587 bool CanFold = true;
8588 unsigned ExpectedVExtractIdx = BaseIdx;
8589 unsigned NumElts = LastIdx - BaseIdx;
8590 V0 = DAG.getUNDEF(VT);
8591 V1 = DAG.getUNDEF(VT);
8592
8593 // Check if N implements a horizontal binop.
8594 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
8595 SDValue Op = N->getOperand(i + BaseIdx);
8596
8597 // Skip UNDEFs.
8598 if (Op->isUndef()) {
8599 // Update the expected vector extract index.
8600 if (i * 2 == NumElts)
8601 ExpectedVExtractIdx = BaseIdx;
8602 ExpectedVExtractIdx += 2;
8603 continue;
8604 }
8605
8606 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
8607
8608 if (!CanFold)
8609 break;
8610
8611 SDValue Op0 = Op.getOperand(0);
8612 SDValue Op1 = Op.getOperand(1);
8613
8614 // Try to match the following pattern:
8615 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
8616 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8617 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
8618 Op0.getOperand(0) == Op1.getOperand(0) &&
8619 isa<ConstantSDNode>(Op0.getOperand(1)) &&
8620 isa<ConstantSDNode>(Op1.getOperand(1)));
8621 if (!CanFold)
8622 break;
8623
8624 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8625 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
8626
8627 if (i * 2 < NumElts) {
8628 if (V0.isUndef()) {
8629 V0 = Op0.getOperand(0);
8630 if (V0.getValueType() != VT)
8631 return false;
8632 }
8633 } else {
8634 if (V1.isUndef()) {
8635 V1 = Op0.getOperand(0);
8636 if (V1.getValueType() != VT)
8637 return false;
8638 }
8639 if (i * 2 == NumElts)
8640 ExpectedVExtractIdx = BaseIdx;
8641 }
8642
8643 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
8644 if (I0 == ExpectedVExtractIdx)
8645 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
8646 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
8647 // Try to match the following dag sequence:
8648 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
8649 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
8650 } else
8651 CanFold = false;
8652
8653 ExpectedVExtractIdx += 2;
8654 }
8655
8656 return CanFold;
8657}
8658
8659/// Emit a sequence of two 128-bit horizontal add/sub followed by
8660/// a concat_vector.
8661///
8662/// This is a helper function of LowerToHorizontalOp().
8663/// This function expects two 256-bit vectors called V0 and V1.
8664/// At first, each vector is split into two separate 128-bit vectors.
8665/// Then, the resulting 128-bit vectors are used to implement two
8666/// horizontal binary operations.
8667///
8668/// The kind of horizontal binary operation is defined by \p X86Opcode.
8669///
8670/// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
8671/// the two new horizontal binop.
8672/// When Mode is set, the first horizontal binop dag node would take as input
8673/// the lower 128-bit of V0 and the upper 128-bit of V0. The second
8674/// horizontal binop dag node would take as input the lower 128-bit of V1
8675/// and the upper 128-bit of V1.
8676/// Example:
8677/// HADD V0_LO, V0_HI
8678/// HADD V1_LO, V1_HI
8679///
8680/// Otherwise, the first horizontal binop dag node takes as input the lower
8681/// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
8682/// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
8683/// Example:
8684/// HADD V0_LO, V1_LO
8685/// HADD V0_HI, V1_HI
8686///
8687/// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
8688/// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
8689/// the upper 128-bits of the result.
8690static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
8691 const SDLoc &DL, SelectionDAG &DAG,
8692 unsigned X86Opcode, bool Mode,
8693 bool isUndefLO, bool isUndefHI) {
8694 MVT VT = V0.getSimpleValueType();
8695 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&((VT.is256BitVector() && VT == V1.getSimpleValueType(
) && "Invalid nodes in input!") ? static_cast<void
> (0) : __assert_fail ("VT.is256BitVector() && VT == V1.getSimpleValueType() && \"Invalid nodes in input!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8696, __PRETTY_FUNCTION__))
8696 "Invalid nodes in input!")((VT.is256BitVector() && VT == V1.getSimpleValueType(
) && "Invalid nodes in input!") ? static_cast<void
> (0) : __assert_fail ("VT.is256BitVector() && VT == V1.getSimpleValueType() && \"Invalid nodes in input!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 8696, __PRETTY_FUNCTION__))
;
8697
8698 unsigned NumElts = VT.getVectorNumElements();
8699 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
8700 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
8701 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
8702 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
8703 MVT NewVT = V0_LO.getSimpleValueType();
8704
8705 SDValue LO = DAG.getUNDEF(NewVT);
8706 SDValue HI = DAG.getUNDEF(NewVT);
8707
8708 if (Mode) {
8709 // Don't emit a horizontal binop if the result is expected to be UNDEF.
8710 if (!isUndefLO && !V0->isUndef())
8711 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
8712 if (!isUndefHI && !V1->isUndef())
8713 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
8714 } else {
8715 // Don't emit a horizontal binop if the result is expected to be UNDEF.
8716 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
8717 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
8718
8719 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
8720 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
8721 }
8722
8723 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
8724}
8725
8726/// Returns true iff \p BV builds a vector with the result equivalent to
8727/// the result of ADDSUB/SUBADD operation.
8728/// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
8729/// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
8730/// \p Opnd0 and \p Opnd1.
8731static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
8732 const X86Subtarget &Subtarget, SelectionDAG &DAG,
8733 SDValue &Opnd0, SDValue &Opnd1,
8734 unsigned &NumExtracts,
8735 bool &IsSubAdd) {
8736
8737 MVT VT = BV->getSimpleValueType(0);
8738 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
8739 return false;
8740
8741 unsigned NumElts = VT.getVectorNumElements();
8742 SDValue InVec0 = DAG.getUNDEF(VT);
8743 SDValue InVec1 = DAG.getUNDEF(VT);
8744
8745 NumExtracts = 0;
8746
8747 // Odd-numbered elements in the input build vector are obtained from
8748 // adding/subtracting two integer/float elements.
8749 // Even-numbered elements in the input build vector are obtained from
8750 // subtracting/adding two integer/float elements.
8751 unsigned Opc[2] = {0, 0};
8752 for (unsigned i = 0, e = NumElts; i != e; ++i) {
8753 SDValue Op = BV->getOperand(i);
8754
8755 // Skip 'undef' values.
8756 unsigned Opcode = Op.getOpcode();
8757 if (Opcode == ISD::UNDEF)
8758 continue;
8759
8760 // Early exit if we found an unexpected opcode.
8761 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
8762 return false;
8763
8764 SDValue Op0 = Op.getOperand(0);
8765 SDValue Op1 = Op.getOperand(1);
8766
8767 // Try to match the following pattern:
8768 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
8769 // Early exit if we cannot match that sequence.
8770 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8771 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8772 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8773 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
8774 Op0.getOperand(1) != Op1.getOperand(1))
8775 return false;
8776
8777 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
8778 if (I0 != i)
8779 return false;
8780
8781 // We found a valid add/sub node, make sure its the same opcode as previous
8782 // elements for this parity.
8783 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
8784 return false;
8785 Opc[i % 2] = Opcode;
8786
8787 // Update InVec0 and InVec1.
8788 if (InVec0.isUndef()) {
8789 InVec0 = Op0.getOperand(0);
8790 if (InVec0.getSimpleValueType() != VT)
8791 return false;
8792 }
8793 if (InVec1.isUndef()) {
8794 InVec1 = Op1.getOperand(0);
8795 if (InVec1.getSimpleValueType() != VT)
8796 return false;
8797 }
8798
8799 // Make sure that operands in input to each add/sub node always
8800 // come from a same pair of vectors.
8801 if (InVec0 != Op0.getOperand(0)) {
8802 if (Opcode == ISD::FSUB)
8803 return false;
8804
8805 // FADD is commutable. Try to commute the operands
8806 // and then test again.
8807 std::swap(Op0, Op1);
8808 if (InVec0 != Op0.getOperand(0))
8809 return false;
8810 }
8811
8812 if (InVec1 != Op1.getOperand(0))
8813 return false;
8814
8815 // Increment the number of extractions done.
8816 ++NumExtracts;
8817 }
8818
8819 // Ensure we have found an opcode for both parities and that they are
8820 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
8821 // inputs are undef.
8822 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
8823 InVec0.isUndef() || InVec1.isUndef())
8824 return false;
8825
8826 IsSubAdd = Opc[0] == ISD::FADD;
8827
8828 Opnd0 = InVec0;
8829 Opnd1 = InVec1;
8830 return true;
8831}
8832
8833/// Returns true if is possible to fold MUL and an idiom that has already been
8834/// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
8835/// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
8836/// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
8837///
8838/// Prior to calling this function it should be known that there is some
8839/// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
8840/// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
8841/// before replacement of such SDNode with ADDSUB operation. Thus the number
8842/// of \p Opnd0 uses is expected to be equal to 2.
8843/// For example, this function may be called for the following IR:
8844/// %AB = fmul fast <2 x double> %A, %B
8845/// %Sub = fsub fast <2 x double> %AB, %C
8846/// %Add = fadd fast <2 x double> %AB, %C
8847/// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
8848/// <2 x i32> <i32 0, i32 3>
8849/// There is a def for %Addsub here, which potentially can be replaced by
8850/// X86ISD::ADDSUB operation:
8851/// %Addsub = X86ISD::ADDSUB %AB, %C
8852/// and such ADDSUB can further be replaced with FMADDSUB:
8853/// %Addsub = FMADDSUB %A, %B, %C.
8854///
8855/// The main reason why this method is called before the replacement of the
8856/// recognized ADDSUB idiom with ADDSUB operation is that such replacement
8857/// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
8858/// FMADDSUB is.
8859static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
8860 SelectionDAG &DAG,
8861 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
8862 unsigned ExpectedUses) {
8863 if (Opnd0.getOpcode() != ISD::FMUL ||
8864 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
8865 return false;
8866
8867 // FIXME: These checks must match the similar ones in
8868 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
8869 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
8870 // or MUL + ADDSUB to FMADDSUB.
8871 const TargetOptions &Options = DAG.getTarget().Options;
8872 bool AllowFusion =
8873 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
8874 if (!AllowFusion)
8875 return false;
8876
8877 Opnd2 = Opnd1;
8878 Opnd1 = Opnd0.getOperand(1);
8879 Opnd0 = Opnd0.getOperand(0);
8880
8881 return true;
8882}
8883
8884/// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
8885/// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
8886/// X86ISD::FMSUBADD node.
8887static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
8888 const X86Subtarget &Subtarget,
8889 SelectionDAG &DAG) {
8890 SDValue Opnd0, Opnd1;
8891 unsigned NumExtracts;
8892 bool IsSubAdd;
8893 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
8894 IsSubAdd))
8895 return SDValue();
8896
8897 MVT VT = BV->getSimpleValueType(0);
8898 SDLoc DL(BV);
8899
8900 // Try to generate X86ISD::FMADDSUB node here.
8901 SDValue Opnd2;
8902 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
8903 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
8904 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
8905 }
8906
8907 // We only support ADDSUB.
8908 if (IsSubAdd)
8909 return SDValue();
8910
8911 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
8912 // the ADDSUB idiom has been successfully recognized. There are no known
8913 // X86 targets with 512-bit ADDSUB instructions!
8914 // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
8915 // recognition.
8916 if (VT.is512BitVector())
8917 return SDValue();
8918
8919 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
8920}
8921
8922static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
8923 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
8924 // Initialize outputs to known values.
8925 MVT VT = BV->getSimpleValueType(0);
8926 HOpcode = ISD::DELETED_NODE;
8927 V0 = DAG.getUNDEF(VT);
8928 V1 = DAG.getUNDEF(VT);
8929
8930 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
8931 // half of the result is calculated independently from the 128-bit halves of
8932 // the inputs, so that makes the index-checking logic below more complicated.
8933 unsigned NumElts = VT.getVectorNumElements();
8934 unsigned GenericOpcode = ISD::DELETED_NODE;
8935 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
8936 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
8937 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
8938 for (unsigned i = 0; i != Num128BitChunks; ++i) {
8939 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
8940 // Ignore undef elements.
8941 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
8942 if (Op.isUndef())
8943 continue;
8944
8945 // If there's an opcode mismatch, we're done.
8946 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
8947 return false;
8948
8949 // Initialize horizontal opcode.
8950 if (HOpcode == ISD::DELETED_NODE) {
8951 GenericOpcode = Op.getOpcode();
8952 switch (GenericOpcode) {
8953 case ISD::ADD: HOpcode = X86ISD::HADD; break;
8954 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
8955 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
8956 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
8957 default: return false;
8958 }
8959 }
8960
8961 SDValue Op0 = Op.getOperand(0);
8962 SDValue Op1 = Op.getOperand(1);
8963 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8964 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8965 Op0.getOperand(0) != Op1.getOperand(0) ||
8966 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8967 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
8968 return false;
8969
8970 // The source vector is chosen based on which 64-bit half of the
8971 // destination vector is being calculated.
8972 if (j < NumEltsIn64Bits) {
8973 if (V0.isUndef())
8974 V0 = Op0.getOperand(0);
8975 } else {
8976 if (V1.isUndef())
8977 V1 = Op0.getOperand(0);
8978 }
8979
8980 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
8981 if (SourceVec != Op0.getOperand(0))
8982 return false;
8983
8984 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
8985 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
8986 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
8987 unsigned ExpectedIndex = i * NumEltsIn128Bits +
8988 (j % NumEltsIn64Bits) * 2;
8989 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
8990 continue;
8991
8992 // If this is not a commutative op, this does not match.
8993 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
8994 return false;
8995
8996 // Addition is commutative, so try swapping the extract indexes.
8997 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
8998 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
8999 continue;
9000
9001 // Extract indexes do not match horizontal requirement.
9002 return false;
9003 }
9004 }
9005 // We matched. Opcode and operands are returned by reference as arguments.
9006 return true;
9007}
9008
9009static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
9010 SelectionDAG &DAG, unsigned HOpcode,
9011 SDValue V0, SDValue V1) {
9012 // If either input vector is not the same size as the build vector,
9013 // extract/insert the low bits to the correct size.
9014 // This is free (examples: zmm --> xmm, xmm --> ymm).
9015 MVT VT = BV->getSimpleValueType(0);
9016 unsigned Width = VT.getSizeInBits();
9017 if (V0.getValueSizeInBits() > Width)
9018 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
9019 else if (V0.getValueSizeInBits() < Width)
9020 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
9021
9022 if (V1.getValueSizeInBits() > Width)
9023 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
9024 else if (V1.getValueSizeInBits() < Width)
9025 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
9026
9027 unsigned NumElts = VT.getVectorNumElements();
9028 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
9029 for (unsigned i = 0; i != NumElts; ++i)
9030 if (BV->getOperand(i).isUndef())
9031 DemandedElts.clearBit(i);
9032
9033 // If we don't need the upper xmm, then perform as a xmm hop.
9034 unsigned HalfNumElts = NumElts / 2;
9035 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
9036 MVT HalfVT = VT.getHalfNumVectorElementsVT();
9037 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
9038 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
9039 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
9040 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
9041 }
9042
9043 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
9044}
9045
9046/// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
9047static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
9048 const X86Subtarget &Subtarget,
9049 SelectionDAG &DAG) {
9050 // We need at least 2 non-undef elements to make this worthwhile by default.
9051 unsigned NumNonUndefs =
9052 count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
9053 if (NumNonUndefs < 2)
9054 return SDValue();
9055
9056 // There are 4 sets of horizontal math operations distinguished by type:
9057 // int/FP at 128-bit/256-bit. Each type was introduced with a different
9058 // subtarget feature. Try to match those "native" patterns first.
9059 MVT VT = BV->getSimpleValueType(0);
9060 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
9061 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
9062 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
9063 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
9064 unsigned HOpcode;
9065 SDValue V0, V1;
9066 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
9067 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
9068 }
9069
9070 // Try harder to match 256-bit ops by using extract/concat.
9071 if (!Subtarget.hasAVX() || !VT.is256BitVector())
9072 return SDValue();
9073
9074 // Count the number of UNDEF operands in the build_vector in input.
9075 unsigned NumElts = VT.getVectorNumElements();
9076 unsigned Half = NumElts / 2;
9077 unsigned NumUndefsLO = 0;
9078 unsigned NumUndefsHI = 0;
9079 for (unsigned i = 0, e = Half; i != e; ++i)
9080 if (BV->getOperand(i)->isUndef())
9081 NumUndefsLO++;
9082
9083 for (unsigned i = Half, e = NumElts; i != e; ++i)
9084 if (BV->getOperand(i)->isUndef())
9085 NumUndefsHI++;
9086
9087 SDLoc DL(BV);
9088 SDValue InVec0, InVec1;
9089 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
9090 SDValue InVec2, InVec3;
9091 unsigned X86Opcode;
9092 bool CanFold = true;
9093
9094 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
9095 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
9096 InVec3) &&
9097 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9098 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9099 X86Opcode = X86ISD::HADD;
9100 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
9101 InVec1) &&
9102 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
9103 InVec3) &&
9104 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9105 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9106 X86Opcode = X86ISD::HSUB;
9107 else
9108 CanFold = false;
9109
9110 if (CanFold) {
9111 // Do not try to expand this build_vector into a pair of horizontal
9112 // add/sub if we can emit a pair of scalar add/sub.
9113 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9114 return SDValue();
9115
9116 // Convert this build_vector into a pair of horizontal binops followed by
9117 // a concat vector. We must adjust the outputs from the partial horizontal
9118 // matching calls above to account for undefined vector halves.
9119 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
9120 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
9121 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?")(((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?"
) ? static_cast<void> (0) : __assert_fail ("(!V0.isUndef() || !V1.isUndef()) && \"Horizontal-op of undefs?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9121, __PRETTY_FUNCTION__))
;
9122 bool isUndefLO = NumUndefsLO == Half;
9123 bool isUndefHI = NumUndefsHI == Half;
9124 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
9125 isUndefHI);
9126 }
9127 }
9128
9129 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
9130 VT == MVT::v16i16) {
9131 unsigned X86Opcode;
9132 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
9133 X86Opcode = X86ISD::HADD;
9134 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
9135 InVec1))
9136 X86Opcode = X86ISD::HSUB;
9137 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
9138 InVec1))
9139 X86Opcode = X86ISD::FHADD;
9140 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
9141 InVec1))
9142 X86Opcode = X86ISD::FHSUB;
9143 else
9144 return SDValue();
9145
9146 // Don't try to expand this build_vector into a pair of horizontal add/sub
9147 // if we can simply emit a pair of scalar add/sub.
9148 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9149 return SDValue();
9150
9151 // Convert this build_vector into two horizontal add/sub followed by
9152 // a concat vector.
9153 bool isUndefLO = NumUndefsLO == Half;
9154 bool isUndefHI = NumUndefsHI == Half;
9155 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
9156 isUndefLO, isUndefHI);
9157 }
9158
9159 return SDValue();
9160}
9161
9162/// If a BUILD_VECTOR's source elements all apply the same bit operation and
9163/// one of their operands is constant, lower to a pair of BUILD_VECTOR and
9164/// just apply the bit to the vectors.
9165/// NOTE: Its not in our interest to start make a general purpose vectorizer
9166/// from this, but enough scalar bit operations are created from the later
9167/// legalization + scalarization stages to need basic support.
9168static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
9169 SelectionDAG &DAG) {
9170 SDLoc DL(Op);
9171 MVT VT = Op->getSimpleValueType(0);
9172 unsigned NumElems = VT.getVectorNumElements();
9173 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9174
9175 // Check that all elements have the same opcode.
9176 // TODO: Should we allow UNDEFS and if so how many?
9177 unsigned Opcode = Op->getOperand(0).getOpcode();
9178 for (unsigned i = 1; i < NumElems; ++i)
9179 if (Opcode != Op->getOperand(i).getOpcode())
9180 return SDValue();
9181
9182 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
9183 bool IsShift = false;
9184 switch (Opcode) {
9185 default:
9186 return SDValue();
9187 case ISD::SHL:
9188 case ISD::SRL:
9189 case ISD::SRA:
9190 IsShift = true;
9191 break;
9192 case ISD::AND:
9193 case ISD::XOR:
9194 case ISD::OR:
9195 // Don't do this if the buildvector is a splat - we'd replace one
9196 // constant with an entire vector.
9197 if (Op->getSplatValue())
9198 return SDValue();
9199 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
9200 return SDValue();
9201 break;
9202 }
9203
9204 SmallVector<SDValue, 4> LHSElts, RHSElts;
9205 for (SDValue Elt : Op->ops()) {
9206 SDValue LHS = Elt.getOperand(0);
9207 SDValue RHS = Elt.getOperand(1);
9208
9209 // We expect the canonicalized RHS operand to be the constant.
9210 if (!isa<ConstantSDNode>(RHS))
9211 return SDValue();
9212
9213 // Extend shift amounts.
9214 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
9215 if (!IsShift)
9216 return SDValue();
9217 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
9218 }
9219
9220 LHSElts.push_back(LHS);
9221 RHSElts.push_back(RHS);
9222 }
9223
9224 // Limit to shifts by uniform immediates.
9225 // TODO: Only accept vXi8/vXi64 special cases?
9226 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
9227 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
9228 return SDValue();
9229
9230 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
9231 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
9232 return DAG.getNode(Opcode, DL, VT, LHS, RHS);
9233}
9234
9235/// Create a vector constant without a load. SSE/AVX provide the bare minimum
9236/// functionality to do this, so it's all zeros, all ones, or some derivation
9237/// that is cheap to calculate.
9238static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
9239 const X86Subtarget &Subtarget) {
9240 SDLoc DL(Op);
9241 MVT VT = Op.getSimpleValueType();
9242
9243 // Vectors containing all zeros can be matched by pxor and xorps.
9244 if (ISD::isBuildVectorAllZeros(Op.getNode()))
9245 return Op;
9246
9247 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
9248 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
9249 // vpcmpeqd on 256-bit vectors.
9250 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
9251 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
9252 return Op;
9253
9254 return getOnesVector(VT, DAG, DL);
9255 }
9256
9257 return SDValue();
9258}
9259
9260/// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
9261/// from a vector of source values and a vector of extraction indices.
9262/// The vectors might be manipulated to match the type of the permute op.
9263static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
9264 SDLoc &DL, SelectionDAG &DAG,
9265 const X86Subtarget &Subtarget) {
9266 MVT ShuffleVT = VT;
9267 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9268 unsigned NumElts = VT.getVectorNumElements();
9269 unsigned SizeInBits = VT.getSizeInBits();
9270
9271 // Adjust IndicesVec to match VT size.
9272 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&((IndicesVec.getValueType().getVectorNumElements() >= NumElts
&& "Illegal variable permute mask size") ? static_cast
<void> (0) : __assert_fail ("IndicesVec.getValueType().getVectorNumElements() >= NumElts && \"Illegal variable permute mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9273, __PRETTY_FUNCTION__))
9273 "Illegal variable permute mask size")((IndicesVec.getValueType().getVectorNumElements() >= NumElts
&& "Illegal variable permute mask size") ? static_cast
<void> (0) : __assert_fail ("IndicesVec.getValueType().getVectorNumElements() >= NumElts && \"Illegal variable permute mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9273, __PRETTY_FUNCTION__))
;
9274 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
9275 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
9276 NumElts * VT.getScalarSizeInBits());
9277 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
9278
9279 // Handle SrcVec that don't match VT type.
9280 if (SrcVec.getValueSizeInBits() != SizeInBits) {
9281 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
9282 // Handle larger SrcVec by treating it as a larger permute.
9283 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
9284 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
9285 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9286 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
9287 Subtarget, DAG, SDLoc(IndicesVec));
9288 return extractSubVector(
9289 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0,
9290 DAG, DL, SizeInBits);
9291 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
9292 // Widen smaller SrcVec to match VT.
9293 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
9294 } else
9295 return SDValue();
9296 }
9297
9298 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
9299 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale")((isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_64(Scale) && \"Illegal variable permute shuffle scale\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9299, __PRETTY_FUNCTION__))
;
9300 EVT SrcVT = Idx.getValueType();
9301 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
9302 uint64_t IndexScale = 0;
9303 uint64_t IndexOffset = 0;
9304
9305 // If we're scaling a smaller permute op, then we need to repeat the
9306 // indices, scaling and offsetting them as well.
9307 // e.g. v4i32 -> v16i8 (Scale = 4)
9308 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
9309 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
9310 for (uint64_t i = 0; i != Scale; ++i) {
9311 IndexScale |= Scale << (i * NumDstBits);
9312 IndexOffset |= i << (i * NumDstBits);
9313 }
9314
9315 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
9316 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
9317 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
9318 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
9319 return Idx;
9320 };
9321
9322 unsigned Opcode = 0;
9323 switch (VT.SimpleTy) {
9324 default:
9325 break;
9326 case MVT::v16i8:
9327 if (Subtarget.hasSSSE3())
9328 Opcode = X86ISD::PSHUFB;
9329 break;
9330 case MVT::v8i16:
9331 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9332 Opcode = X86ISD::VPERMV;
9333 else if (Subtarget.hasSSSE3()) {
9334 Opcode = X86ISD::PSHUFB;
9335 ShuffleVT = MVT::v16i8;
9336 }
9337 break;
9338 case MVT::v4f32:
9339 case MVT::v4i32:
9340 if (Subtarget.hasAVX()) {
9341 Opcode = X86ISD::VPERMILPV;
9342 ShuffleVT = MVT::v4f32;
9343 } else if (Subtarget.hasSSSE3()) {
9344 Opcode = X86ISD::PSHUFB;
9345 ShuffleVT = MVT::v16i8;
9346 }
9347 break;
9348 case MVT::v2f64:
9349 case MVT::v2i64:
9350 if (Subtarget.hasAVX()) {
9351 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
9352 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9353 Opcode = X86ISD::VPERMILPV;
9354 ShuffleVT = MVT::v2f64;
9355 } else if (Subtarget.hasSSE41()) {
9356 // SSE41 can compare v2i64 - select between indices 0 and 1.
9357 return DAG.getSelectCC(
9358 DL, IndicesVec,
9359 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
9360 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
9361 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
9362 ISD::CondCode::SETEQ);
9363 }
9364 break;
9365 case MVT::v32i8:
9366 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
9367 Opcode = X86ISD::VPERMV;
9368 else if (Subtarget.hasXOP()) {
9369 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
9370 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
9371 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
9372 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
9373 return DAG.getNode(
9374 ISD::CONCAT_VECTORS, DL, VT,
9375 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
9376 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
9377 } else if (Subtarget.hasAVX()) {
9378 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
9379 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
9380 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
9381 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
9382 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
9383 ArrayRef<SDValue> Ops) {
9384 // Permute Lo and Hi and then select based on index range.
9385 // This works as SHUFB uses bits[3:0] to permute elements and we don't
9386 // care about the bit[7] as its just an index vector.
9387 SDValue Idx = Ops[2];
9388 EVT VT = Idx.getValueType();
9389 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
9390 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
9391 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
9392 ISD::CondCode::SETGT);
9393 };
9394 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
9395 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
9396 PSHUFBBuilder);
9397 }
9398 break;
9399 case MVT::v16i16:
9400 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9401 Opcode = X86ISD::VPERMV;
9402 else if (Subtarget.hasAVX()) {
9403 // Scale to v32i8 and perform as v32i8.
9404 IndicesVec = ScaleIndices(IndicesVec, 2);
9405 return DAG.getBitcast(
9406 VT, createVariablePermute(
9407 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
9408 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
9409 }
9410 break;
9411 case MVT::v8f32:
9412 case MVT::v8i32:
9413 if (Subtarget.hasAVX2())
9414 Opcode = X86ISD::VPERMV;
9415 else if (Subtarget.hasAVX()) {
9416 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
9417 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9418 {0, 1, 2, 3, 0, 1, 2, 3});
9419 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9420 {4, 5, 6, 7, 4, 5, 6, 7});
9421 if (Subtarget.hasXOP())
9422 return DAG.getBitcast(
9423 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
9424 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9425 // Permute Lo and Hi and then select based on index range.
9426 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
9427 SDValue Res = DAG.getSelectCC(
9428 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
9429 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
9430 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
9431 ISD::CondCode::SETGT);
9432 return DAG.getBitcast(VT, Res);
9433 }
9434 break;
9435 case MVT::v4i64:
9436 case MVT::v4f64:
9437 if (Subtarget.hasAVX512()) {
9438 if (!Subtarget.hasVLX()) {
9439 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
9440 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
9441 SDLoc(SrcVec));
9442 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
9443 DAG, SDLoc(IndicesVec));
9444 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
9445 DAG, Subtarget);
9446 return extract256BitVector(Res, 0, DAG, DL);
9447 }
9448 Opcode = X86ISD::VPERMV;
9449 } else if (Subtarget.hasAVX()) {
9450 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
9451 SDValue LoLo =
9452 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
9453 SDValue HiHi =
9454 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
9455 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
9456 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9457 if (Subtarget.hasXOP())
9458 return DAG.getBitcast(
9459 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
9460 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9461 // Permute Lo and Hi and then select based on index range.
9462 // This works as VPERMILPD only uses index bit[1] to permute elements.
9463 SDValue Res = DAG.getSelectCC(
9464 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
9465 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
9466 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
9467 ISD::CondCode::SETGT);
9468 return DAG.getBitcast(VT, Res);
9469 }
9470 break;
9471 case MVT::v64i8:
9472 if (Subtarget.hasVBMI())
9473 Opcode = X86ISD::VPERMV;
9474 break;
9475 case MVT::v32i16:
9476 if (Subtarget.hasBWI())
9477 Opcode = X86ISD::VPERMV;
9478 break;
9479 case MVT::v16f32:
9480 case MVT::v16i32:
9481 case MVT::v8f64:
9482 case MVT::v8i64:
9483 if (Subtarget.hasAVX512())
9484 Opcode = X86ISD::VPERMV;
9485 break;
9486 }
9487 if (!Opcode)
9488 return SDValue();
9489
9490 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&(((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
(VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits())
== 0 && "Illegal variable permute shuffle type") ? static_cast
<void> (0) : __assert_fail ("(VT.getSizeInBits() == ShuffleVT.getSizeInBits()) && (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 && \"Illegal variable permute shuffle type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9492, __PRETTY_FUNCTION__))
9491 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&(((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
(VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits())
== 0 && "Illegal variable permute shuffle type") ? static_cast
<void> (0) : __assert_fail ("(VT.getSizeInBits() == ShuffleVT.getSizeInBits()) && (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 && \"Illegal variable permute shuffle type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9492, __PRETTY_FUNCTION__))
9492 "Illegal variable permute shuffle type")(((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
(VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits())
== 0 && "Illegal variable permute shuffle type") ? static_cast
<void> (0) : __assert_fail ("(VT.getSizeInBits() == ShuffleVT.getSizeInBits()) && (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 && \"Illegal variable permute shuffle type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9492, __PRETTY_FUNCTION__))
;
9493
9494 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
9495 if (Scale > 1)
9496 IndicesVec = ScaleIndices(IndicesVec, Scale);
9497
9498 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
9499 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
9500
9501 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
9502 SDValue Res = Opcode == X86ISD::VPERMV
9503 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
9504 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
9505 return DAG.getBitcast(VT, Res);
9506}
9507
9508// Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
9509// reasoned to be a permutation of a vector by indices in a non-constant vector.
9510// (build_vector (extract_elt V, (extract_elt I, 0)),
9511// (extract_elt V, (extract_elt I, 1)),
9512// ...
9513// ->
9514// (vpermv I, V)
9515//
9516// TODO: Handle undefs
9517// TODO: Utilize pshufb and zero mask blending to support more efficient
9518// construction of vectors with constant-0 elements.
9519static SDValue
9520LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
9521 const X86Subtarget &Subtarget) {
9522 SDValue SrcVec, IndicesVec;
9523 // Check for a match of the permute source vector and permute index elements.
9524 // This is done by checking that the i-th build_vector operand is of the form:
9525 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
9526 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
9527 SDValue Op = V.getOperand(Idx);
9528 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9529 return SDValue();
9530
9531 // If this is the first extract encountered in V, set the source vector,
9532 // otherwise verify the extract is from the previously defined source
9533 // vector.
9534 if (!SrcVec)
9535 SrcVec = Op.getOperand(0);
9536 else if (SrcVec != Op.getOperand(0))
9537 return SDValue();
9538 SDValue ExtractedIndex = Op->getOperand(1);
9539 // Peek through extends.
9540 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
9541 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
9542 ExtractedIndex = ExtractedIndex.getOperand(0);
9543 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9544 return SDValue();
9545
9546 // If this is the first extract from the index vector candidate, set the
9547 // indices vector, otherwise verify the extract is from the previously
9548 // defined indices vector.
9549 if (!IndicesVec)
9550 IndicesVec = ExtractedIndex.getOperand(0);
9551 else if (IndicesVec != ExtractedIndex.getOperand(0))
9552 return SDValue();
9553
9554 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
9555 if (!PermIdx || PermIdx->getAPIntValue() != Idx)
9556 return SDValue();
9557 }
9558
9559 SDLoc DL(V);
9560 MVT VT = V.getSimpleValueType();
9561 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
9562}
9563
9564SDValue
9565X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
9566 SDLoc dl(Op);
9567
9568 MVT VT = Op.getSimpleValueType();
9569 MVT EltVT = VT.getVectorElementType();
9570 unsigned NumElems = Op.getNumOperands();
9571
9572 // Generate vectors for predicate vectors.
9573 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
9574 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
9575
9576 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
9577 return VectorConstant;
9578
9579 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
9580 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
9581 return AddSub;
9582 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
9583 return HorizontalOp;
9584 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
9585 return Broadcast;
9586 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG))
9587 return BitOp;
9588
9589 unsigned EVTBits = EltVT.getSizeInBits();
9590
9591 unsigned NumZero = 0;
9592 unsigned NumNonZero = 0;
9593 uint64_t NonZeros = 0;
9594 bool IsAllConstants = true;
9595 SmallSet<SDValue, 8> Values;
9596 unsigned NumConstants = NumElems;
9597 for (unsigned i = 0; i < NumElems; ++i) {
9598 SDValue Elt = Op.getOperand(i);
9599 if (Elt.isUndef())
9600 continue;
9601 Values.insert(Elt);
9602 if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
9603 IsAllConstants = false;
9604 NumConstants--;
9605 }
9606 if (X86::isZeroNode(Elt))
9607 NumZero++;
9608 else {
9609 assert(i < sizeof(NonZeros) * 8)((i < sizeof(NonZeros) * 8) ? static_cast<void> (0) :
__assert_fail ("i < sizeof(NonZeros) * 8", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9609, __PRETTY_FUNCTION__))
; // Make sure the shift is within range.
9610 NonZeros |= ((uint64_t)1 << i);
9611 NumNonZero++;
9612 }
9613 }
9614
9615 // All undef vector. Return an UNDEF. All zero vectors were handled above.
9616 if (NumNonZero == 0)
9617 return DAG.getUNDEF(VT);
9618
9619 // If we are inserting one variable into a vector of non-zero constants, try
9620 // to avoid loading each constant element as a scalar. Load the constants as a
9621 // vector and then insert the variable scalar element. If insertion is not
9622 // supported, fall back to a shuffle to get the scalar blended with the
9623 // constants. Insertion into a zero vector is handled as a special-case
9624 // somewhere below here.
9625 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
9626 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
9627 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
9628 // Create an all-constant vector. The variable element in the old
9629 // build vector is replaced by undef in the constant vector. Save the
9630 // variable scalar element and its index for use in the insertelement.
9631 LLVMContext &Context = *DAG.getContext();
9632 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
9633 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
9634 SDValue VarElt;
9635 SDValue InsIndex;
9636 for (unsigned i = 0; i != NumElems; ++i) {
9637 SDValue Elt = Op.getOperand(i);
9638 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
9639 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
9640 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
9641 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
9642 else if (!Elt.isUndef()) {
9643 assert(!VarElt.getNode() && !InsIndex.getNode() &&((!VarElt.getNode() && !InsIndex.getNode() &&
"Expected one variable element in this vector") ? static_cast
<void> (0) : __assert_fail ("!VarElt.getNode() && !InsIndex.getNode() && \"Expected one variable element in this vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9644, __PRETTY_FUNCTION__))
9644 "Expected one variable element in this vector")((!VarElt.getNode() && !InsIndex.getNode() &&
"Expected one variable element in this vector") ? static_cast
<void> (0) : __assert_fail ("!VarElt.getNode() && !InsIndex.getNode() && \"Expected one variable element in this vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9644, __PRETTY_FUNCTION__))
;
9645 VarElt = Elt;
9646 InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout()));
9647 }
9648 }
9649 Constant *CV = ConstantVector::get(ConstVecOps);
9650 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
9651
9652 // The constants we just created may not be legal (eg, floating point). We
9653 // must lower the vector right here because we can not guarantee that we'll
9654 // legalize it before loading it. This is also why we could not just create
9655 // a new build vector here. If the build vector contains illegal constants,
9656 // it could get split back up into a series of insert elements.
9657 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
9658 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
9659 MachineFunction &MF = DAG.getMachineFunction();
9660 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
9661 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
9662 unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
9663 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
9664 if (InsertC < NumEltsInLow128Bits)
9665 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
9666
9667 // There's no good way to insert into the high elements of a >128-bit
9668 // vector, so use shuffles to avoid an extract/insert sequence.
9669 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?")((VT.getSizeInBits() > 128 && "Invalid insertion index?"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() > 128 && \"Invalid insertion index?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9669, __PRETTY_FUNCTION__))
;
9670 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector")((Subtarget.hasAVX() && "Must have AVX with >16-byte vector"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX() && \"Must have AVX with >16-byte vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9670, __PRETTY_FUNCTION__))
;
9671 SmallVector<int, 8> ShuffleMask;
9672 unsigned NumElts = VT.getVectorNumElements();
9673 for (unsigned i = 0; i != NumElts; ++i)
9674 ShuffleMask.push_back(i == InsertC ? NumElts : i);
9675 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
9676 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
9677 }
9678
9679 // Special case for single non-zero, non-undef, element.
9680 if (NumNonZero == 1) {
9681 unsigned Idx = countTrailingZeros(NonZeros);
9682 SDValue Item = Op.getOperand(Idx);
9683
9684 // If we have a constant or non-constant insertion into the low element of
9685 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
9686 // the rest of the elements. This will be matched as movd/movq/movss/movsd
9687 // depending on what the source datatype is.
9688 if (Idx == 0) {
9689 if (NumZero == 0)
9690 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9691
9692 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
9693 (EltVT == MVT::i64 && Subtarget.is64Bit())) {
9694 assert((VT.is128BitVector() || VT.is256BitVector() ||(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
()) && "Expected an SSE value type!") ? static_cast<
void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected an SSE value type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9696, __PRETTY_FUNCTION__))
9695 VT.is512BitVector()) &&(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
()) && "Expected an SSE value type!") ? static_cast<
void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected an SSE value type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9696, __PRETTY_FUNCTION__))
9696 "Expected an SSE value type!")(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector
()) && "Expected an SSE value type!") ? static_cast<
void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected an SSE value type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9696, __PRETTY_FUNCTION__))
;
9697 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9698 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
9699 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9700 }
9701
9702 // We can't directly insert an i8 or i16 into a vector, so zero extend
9703 // it to i32 first.
9704 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
9705 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
9706 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
9707 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
9708 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
9709 return DAG.getBitcast(VT, Item);
9710 }
9711 }
9712
9713 // Is it a vector logical left shift?
9714 if (NumElems == 2 && Idx == 1 &&
9715 X86::isZeroNode(Op.getOperand(0)) &&
9716 !X86::isZeroNode(Op.getOperand(1))) {
9717 unsigned NumBits = VT.getSizeInBits();
9718 return getVShift(true, VT,
9719 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
9720 VT, Op.getOperand(1)),
9721 NumBits/2, DAG, *this, dl);
9722 }
9723
9724 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
9725 return SDValue();
9726
9727 // Otherwise, if this is a vector with i32 or f32 elements, and the element
9728 // is a non-constant being inserted into an element other than the low one,
9729 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
9730 // movd/movss) to move this into the low element, then shuffle it into
9731 // place.
9732 if (EVTBits == 32) {
9733 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
9734 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
9735 }
9736 }
9737
9738 // Splat is obviously ok. Let legalizer expand it to a shuffle.
9739 if (Values.size() == 1) {
9740 if (EVTBits == 32) {
9741 // Instead of a shuffle like this:
9742 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
9743 // Check if it's possible to issue this instead.
9744 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
9745 unsigned Idx = countTrailingZeros(NonZeros);
9746 SDValue Item = Op.getOperand(Idx);
9747 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
9748 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
9749 }
9750 return SDValue();
9751 }
9752
9753 // A vector full of immediates; various special cases are already
9754 // handled, so this is best done with a single constant-pool load.
9755 if (IsAllConstants)
9756 return SDValue();
9757
9758 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
9759 return V;
9760
9761 // See if we can use a vector load to get all of the elements.
9762 {
9763 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
9764 if (SDValue LD =
9765 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
9766 return LD;
9767 }
9768
9769 // If this is a splat of pairs of 32-bit elements, we can use a narrower
9770 // build_vector and broadcast it.
9771 // TODO: We could probably generalize this more.
9772 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
9773 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
9774 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
9775 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
9776 // Make sure all the even/odd operands match.
9777 for (unsigned i = 2; i != NumElems; ++i)
9778 if (Ops[i % 2] != Op.getOperand(i))
9779 return false;
9780 return true;
9781 };
9782 if (CanSplat(Op, NumElems, Ops)) {
9783 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
9784 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
9785 // Create a new build vector and cast to v2i64/v2f64.
9786 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
9787 DAG.getBuildVector(NarrowVT, dl, Ops));
9788 // Broadcast from v2i64/v2f64 and cast to final VT.
9789 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
9790 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
9791 NewBV));
9792 }
9793 }
9794
9795 // For AVX-length vectors, build the individual 128-bit pieces and use
9796 // shuffles to put them in place.
9797 if (VT.getSizeInBits() > 128) {
9798 MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
9799
9800 // Build both the lower and upper subvector.
9801 SDValue Lower =
9802 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
9803 SDValue Upper = DAG.getBuildVector(
9804 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
9805
9806 // Recreate the wider vector with the lower and upper part.
9807 return concatSubVectors(Lower, Upper, DAG, dl);
9808 }
9809
9810 // Let legalizer expand 2-wide build_vectors.
9811 if (EVTBits == 64) {
9812 if (NumNonZero == 1) {
9813 // One half is zero or undef.
9814 unsigned Idx = countTrailingZeros(NonZeros);
9815 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
9816 Op.getOperand(Idx));
9817 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
9818 }
9819 return SDValue();
9820 }
9821
9822 // If element VT is < 32 bits, convert it to inserts into a zero vector.
9823 if (EVTBits == 8 && NumElems == 16)
9824 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
9825 DAG, Subtarget))
9826 return V;
9827
9828 if (EVTBits == 16 && NumElems == 8)
9829 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
9830 DAG, Subtarget))
9831 return V;
9832
9833 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
9834 if (EVTBits == 32 && NumElems == 4)
9835 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
9836 return V;
9837
9838 // If element VT is == 32 bits, turn it into a number of shuffles.
9839 if (NumElems == 4 && NumZero > 0) {
9840 SmallVector<SDValue, 8> Ops(NumElems);
9841 for (unsigned i = 0; i < 4; ++i) {
9842 bool isZero = !(NonZeros & (1ULL << i));
9843 if (isZero)
9844 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
9845 else
9846 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9847 }
9848
9849 for (unsigned i = 0; i < 2; ++i) {
9850 switch ((NonZeros >> (i*2)) & 0x3) {
9851 default: llvm_unreachable("Unexpected NonZero count")::llvm::llvm_unreachable_internal("Unexpected NonZero count",
"/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9851)
;
9852 case 0:
9853 Ops[i] = Ops[i*2]; // Must be a zero vector.
9854 break;
9855 case 1:
9856 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
9857 break;
9858 case 2:
9859 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9860 break;
9861 case 3:
9862 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9863 break;
9864 }
9865 }
9866
9867 bool Reverse1 = (NonZeros & 0x3) == 2;
9868 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
9869 int MaskVec[] = {
9870 Reverse1 ? 1 : 0,
9871 Reverse1 ? 0 : 1,
9872 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
9873 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
9874 };
9875 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
9876 }
9877
9878 assert(Values.size() > 1 && "Expected non-undef and non-splat vector")((Values.size() > 1 && "Expected non-undef and non-splat vector"
) ? static_cast<void> (0) : __assert_fail ("Values.size() > 1 && \"Expected non-undef and non-splat vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9878, __PRETTY_FUNCTION__))
;
9879
9880 // Check for a build vector from mostly shuffle plus few inserting.
9881 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
9882 return Sh;
9883
9884 // For SSE 4.1, use insertps to put the high elements into the low element.
9885 if (Subtarget.hasSSE41()) {
9886 SDValue Result;
9887 if (!Op.getOperand(0).isUndef())
9888 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
9889 else
9890 Result = DAG.getUNDEF(VT);
9891
9892 for (unsigned i = 1; i < NumElems; ++i) {
9893 if (Op.getOperand(i).isUndef()) continue;
9894 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
9895 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
9896 }
9897 return Result;
9898 }
9899
9900 // Otherwise, expand into a number of unpckl*, start by extending each of
9901 // our (non-undef) elements to the full vector width with the element in the
9902 // bottom slot of the vector (which generates no code for SSE).
9903 SmallVector<SDValue, 8> Ops(NumElems);
9904 for (unsigned i = 0; i < NumElems; ++i) {
9905 if (!Op.getOperand(i).isUndef())
9906 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9907 else
9908 Ops[i] = DAG.getUNDEF(VT);
9909 }
9910
9911 // Next, we iteratively mix elements, e.g. for v4f32:
9912 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
9913 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
9914 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
9915 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
9916 // Generate scaled UNPCKL shuffle mask.
9917 SmallVector<int, 16> Mask;
9918 for(unsigned i = 0; i != Scale; ++i)
9919 Mask.push_back(i);
9920 for (unsigned i = 0; i != Scale; ++i)
9921 Mask.push_back(NumElems+i);
9922 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
9923
9924 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
9925 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
9926 }
9927 return Ops[0];
9928}
9929
9930// 256-bit AVX can use the vinsertf128 instruction
9931// to create 256-bit vectors from two other 128-bit ones.
9932// TODO: Detect subvector broadcast here instead of DAG combine?
9933static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
9934 const X86Subtarget &Subtarget) {
9935 SDLoc dl(Op);
9936 MVT ResVT = Op.getSimpleValueType();
9937
9938 assert((ResVT.is256BitVector() ||(((ResVT.is256BitVector() || ResVT.is512BitVector()) &&
"Value type must be 256-/512-bit wide") ? static_cast<void
> (0) : __assert_fail ("(ResVT.is256BitVector() || ResVT.is512BitVector()) && \"Value type must be 256-/512-bit wide\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9939, __PRETTY_FUNCTION__))
9939 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide")(((ResVT.is256BitVector() || ResVT.is512BitVector()) &&
"Value type must be 256-/512-bit wide") ? static_cast<void
> (0) : __assert_fail ("(ResVT.is256BitVector() || ResVT.is512BitVector()) && \"Value type must be 256-/512-bit wide\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9939, __PRETTY_FUNCTION__))
;
9940
9941 unsigned NumOperands = Op.getNumOperands();
9942 unsigned NumZero = 0;
9943 unsigned NumNonZero = 0;
9944 unsigned NonZeros = 0;
9945 for (unsigned i = 0; i != NumOperands; ++i) {
9946 SDValue SubVec = Op.getOperand(i);
9947 if (SubVec.isUndef())
9948 continue;
9949 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9950 ++NumZero;
9951 else {
9952 assert(i < sizeof(NonZeros) * CHAR_BIT)((i < sizeof(NonZeros) * 8) ? static_cast<void> (0) :
__assert_fail ("i < sizeof(NonZeros) * CHAR_BIT", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9952, __PRETTY_FUNCTION__))
; // Ensure the shift is in range.
9953 NonZeros |= 1 << i;
9954 ++NumNonZero;
9955 }
9956 }
9957
9958 // If we have more than 2 non-zeros, build each half separately.
9959 if (NumNonZero > 2) {
9960 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
9961 ArrayRef<SDUse> Ops = Op->ops();
9962 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9963 Ops.slice(0, NumOperands/2));
9964 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9965 Ops.slice(NumOperands/2));
9966 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9967 }
9968
9969 // Otherwise, build it up through insert_subvectors.
9970 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
9971 : DAG.getUNDEF(ResVT);
9972
9973 MVT SubVT = Op.getOperand(0).getSimpleValueType();
9974 unsigned NumSubElems = SubVT.getVectorNumElements();
9975 for (unsigned i = 0; i != NumOperands; ++i) {
9976 if ((NonZeros & (1 << i)) == 0)
9977 continue;
9978
9979 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
9980 Op.getOperand(i),
9981 DAG.getIntPtrConstant(i * NumSubElems, dl));
9982 }
9983
9984 return Vec;
9985}
9986
9987// Returns true if the given node is a type promotion (by concatenating i1
9988// zeros) of the result of a node that already zeros all upper bits of
9989// k-register.
9990// TODO: Merge this with LowerAVXCONCAT_VECTORS?
9991static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
9992 const X86Subtarget &Subtarget,
9993 SelectionDAG & DAG) {
9994 SDLoc dl(Op);
9995 MVT ResVT = Op.getSimpleValueType();
9996 unsigned NumOperands = Op.getNumOperands();
9997
9998 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&((NumOperands > 1 && isPowerOf2_32(NumOperands) &&
"Unexpected number of operands in CONCAT_VECTORS") ? static_cast
<void> (0) : __assert_fail ("NumOperands > 1 && isPowerOf2_32(NumOperands) && \"Unexpected number of operands in CONCAT_VECTORS\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9999, __PRETTY_FUNCTION__))
9999 "Unexpected number of operands in CONCAT_VECTORS")((NumOperands > 1 && isPowerOf2_32(NumOperands) &&
"Unexpected number of operands in CONCAT_VECTORS") ? static_cast
<void> (0) : __assert_fail ("NumOperands > 1 && isPowerOf2_32(NumOperands) && \"Unexpected number of operands in CONCAT_VECTORS\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 9999, __PRETTY_FUNCTION__))
;
10000
10001 uint64_t Zeros = 0;
10002 uint64_t NonZeros = 0;
10003 for (unsigned i = 0; i != NumOperands; ++i) {
10004 SDValue SubVec = Op.getOperand(i);
10005 if (SubVec.isUndef())
10006 continue;
10007 assert(i < sizeof(NonZeros) * CHAR_BIT)((i < sizeof(NonZeros) * 8) ? static_cast<void> (0) :
__assert_fail ("i < sizeof(NonZeros) * CHAR_BIT", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10007, __PRETTY_FUNCTION__))
; // Ensure the shift is in range.
10008 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10009 Zeros |= (uint64_t)1 << i;
10010 else
10011 NonZeros |= (uint64_t)1 << i;
10012 }
10013
10014 unsigned NumElems = ResVT.getVectorNumElements();
10015
10016 // If we are inserting non-zero vector and there are zeros in LSBs and undef
10017 // in the MSBs we need to emit a KSHIFTL. The generic lowering to
10018 // insert_subvector will give us two kshifts.
10019 if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
10020 Log2_64(NonZeros) != NumOperands - 1) {
10021 MVT ShiftVT = ResVT;
10022 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
10023 ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
10024 unsigned Idx = Log2_64(NonZeros);
10025 SDValue SubVec = Op.getOperand(Idx);
10026 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10027 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
10028 DAG.getUNDEF(ShiftVT), SubVec,
10029 DAG.getIntPtrConstant(0, dl));
10030 Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
10031 DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
10032 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
10033 DAG.getIntPtrConstant(0, dl));
10034 }
10035
10036 // If there are zero or one non-zeros we can handle this very simply.
10037 if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
10038 SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
10039 if (!NonZeros)
10040 return Vec;
10041 unsigned Idx = Log2_64(NonZeros);
10042 SDValue SubVec = Op.getOperand(Idx);
10043 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10044 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
10045 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
10046 }
10047
10048 if (NumOperands > 2) {
10049 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10050 ArrayRef<SDUse> Ops = Op->ops();
10051 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10052 Ops.slice(0, NumOperands/2));
10053 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10054 Ops.slice(NumOperands/2));
10055 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10056 }
10057
10058 assert(countPopulation(NonZeros) == 2 && "Simple cases not handled?")((countPopulation(NonZeros) == 2 && "Simple cases not handled?"
) ? static_cast<void> (0) : __assert_fail ("countPopulation(NonZeros) == 2 && \"Simple cases not handled?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10058, __PRETTY_FUNCTION__))
;
10059
10060 if (ResVT.getVectorNumElements() >= 16)
10061 return Op; // The operation is legal with KUNPCK
10062
10063 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
10064 DAG.getUNDEF(ResVT), Op.getOperand(0),
10065 DAG.getIntPtrConstant(0, dl));
10066 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
10067 DAG.getIntPtrConstant(NumElems/2, dl));
10068}
10069
10070static SDValue LowerCONCAT_VECTORS(SDValue Op,
10071 const X86Subtarget &Subtarget,
10072 SelectionDAG &DAG) {
10073 MVT VT = Op.getSimpleValueType();
10074 if (VT.getVectorElementType() == MVT::i1)
10075 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
10076
10077 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||(((VT.is256BitVector() && Op.getNumOperands() == 2) ||
(VT.is512BitVector() && (Op.getNumOperands() == 2 ||
Op.getNumOperands() == 4))) ? static_cast<void> (0) : __assert_fail
("(VT.is256BitVector() && Op.getNumOperands() == 2) || (VT.is512BitVector() && (Op.getNumOperands() == 2 || Op.getNumOperands() == 4))"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10079, __PRETTY_FUNCTION__))
10078 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||(((VT.is256BitVector() && Op.getNumOperands() == 2) ||
(VT.is512BitVector() && (Op.getNumOperands() == 2 ||
Op.getNumOperands() == 4))) ? static_cast<void> (0) : __assert_fail
("(VT.is256BitVector() && Op.getNumOperands() == 2) || (VT.is512BitVector() && (Op.getNumOperands() == 2 || Op.getNumOperands() == 4))"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10079, __PRETTY_FUNCTION__))
10079 Op.getNumOperands() == 4)))(((VT.is256BitVector() && Op.getNumOperands() == 2) ||
(VT.is512BitVector() && (Op.getNumOperands() == 2 ||
Op.getNumOperands() == 4))) ? static_cast<void> (0) : __assert_fail
("(VT.is256BitVector() && Op.getNumOperands() == 2) || (VT.is512BitVector() && (Op.getNumOperands() == 2 || Op.getNumOperands() == 4))"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10079, __PRETTY_FUNCTION__))
;
10080
10081 // AVX can use the vinsertf128 instruction to create 256-bit vectors
10082 // from two other 128-bit ones.
10083
10084 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
10085 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
10086}
10087
10088//===----------------------------------------------------------------------===//
10089// Vector shuffle lowering
10090//
10091// This is an experimental code path for lowering vector shuffles on x86. It is
10092// designed to handle arbitrary vector shuffles and blends, gracefully
10093// degrading performance as necessary. It works hard to recognize idiomatic
10094// shuffles and lower them to optimal instruction patterns without leaving
10095// a framework that allows reasonably efficient handling of all vector shuffle
10096// patterns.
10097//===----------------------------------------------------------------------===//
10098
10099/// Tiny helper function to identify a no-op mask.
10100///
10101/// This is a somewhat boring predicate function. It checks whether the mask
10102/// array input, which is assumed to be a single-input shuffle mask of the kind
10103/// used by the X86 shuffle instructions (not a fully general
10104/// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
10105/// in-place shuffle are 'no-op's.
10106static bool isNoopShuffleMask(ArrayRef<int> Mask) {
10107 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10108 assert(Mask[i] >= -1 && "Out of bound mask element!")((Mask[i] >= -1 && "Out of bound mask element!") ?
static_cast<void> (0) : __assert_fail ("Mask[i] >= -1 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10108, __PRETTY_FUNCTION__))
;
10109 if (Mask[i] >= 0 && Mask[i] != i)
10110 return false;
10111 }
10112 return true;
10113}
10114
10115/// Test whether there are elements crossing 128-bit lanes in this
10116/// shuffle mask.
10117///
10118/// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
10119/// and we routinely test for these.
10120static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
10121 int LaneSize = 128 / VT.getScalarSizeInBits();
10122 int Size = Mask.size();
10123 for (int i = 0; i < Size; ++i)
10124 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10125 return true;
10126 return false;
10127}
10128
10129/// Test whether a shuffle mask is equivalent within each sub-lane.
10130///
10131/// This checks a shuffle mask to see if it is performing the same
10132/// lane-relative shuffle in each sub-lane. This trivially implies
10133/// that it is also not lane-crossing. It may however involve a blend from the
10134/// same lane of a second vector.
10135///
10136/// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
10137/// non-trivial to compute in the face of undef lanes. The representation is
10138/// suitable for use with existing 128-bit shuffles as entries from the second
10139/// vector have been remapped to [LaneSize, 2*LaneSize).
10140static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
10141 ArrayRef<int> Mask,
10142 SmallVectorImpl<int> &RepeatedMask) {
10143 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10144 RepeatedMask.assign(LaneSize, -1);
10145 int Size = Mask.size();
10146 for (int i = 0; i < Size; ++i) {
10147 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0)((Mask[i] == SM_SentinelUndef || Mask[i] >= 0) ? static_cast
<void> (0) : __assert_fail ("Mask[i] == SM_SentinelUndef || Mask[i] >= 0"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10147, __PRETTY_FUNCTION__))
;
10148 if (Mask[i] < 0)
10149 continue;
10150 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10151 // This entry crosses lanes, so there is no way to model this shuffle.
10152 return false;
10153
10154 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10155 // Adjust second vector indices to start at LaneSize instead of Size.
10156 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
10157 : Mask[i] % LaneSize + LaneSize;
10158 if (RepeatedMask[i % LaneSize] < 0)
10159 // This is the first non-undef entry in this slot of a 128-bit lane.
10160 RepeatedMask[i % LaneSize] = LocalM;
10161 else if (RepeatedMask[i % LaneSize] != LocalM)
10162 // Found a mismatch with the repeated mask.
10163 return false;
10164 }
10165 return true;
10166}
10167
10168/// Test whether a shuffle mask is equivalent within each 128-bit lane.
10169static bool
10170is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10171 SmallVectorImpl<int> &RepeatedMask) {
10172 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10173}
10174
10175static bool
10176is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
10177 SmallVector<int, 32> RepeatedMask;
10178 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10179}
10180
10181/// Test whether a shuffle mask is equivalent within each 256-bit lane.
10182static bool
10183is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10184 SmallVectorImpl<int> &RepeatedMask) {
10185 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
10186}
10187
10188/// Test whether a target shuffle mask is equivalent within each sub-lane.
10189/// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
10190static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
10191 ArrayRef<int> Mask,
10192 SmallVectorImpl<int> &RepeatedMask) {
10193 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10194 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
10195 int Size = Mask.size();
10196 for (int i = 0; i < Size; ++i) {
10197 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0))((isUndefOrZero(Mask[i]) || (Mask[i] >= 0)) ? static_cast<
void> (0) : __assert_fail ("isUndefOrZero(Mask[i]) || (Mask[i] >= 0)"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10197, __PRETTY_FUNCTION__))
;
10198 if (Mask[i] == SM_SentinelUndef)
10199 continue;
10200 if (Mask[i] == SM_SentinelZero) {
10201 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
10202 return false;
10203 RepeatedMask[i % LaneSize] = SM_SentinelZero;
10204 continue;
10205 }
10206 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10207 // This entry crosses lanes, so there is no way to model this shuffle.
10208 return false;
10209
10210 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10211 // Adjust second vector indices to start at LaneSize instead of Size.
10212 int LocalM =
10213 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
10214 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
10215 // This is the first non-undef entry in this slot of a 128-bit lane.
10216 RepeatedMask[i % LaneSize] = LocalM;
10217 else if (RepeatedMask[i % LaneSize] != LocalM)
10218 // Found a mismatch with the repeated mask.
10219 return false;
10220 }
10221 return true;
10222}
10223
10224/// Checks whether a shuffle mask is equivalent to an explicit list of
10225/// arguments.
10226///
10227/// This is a fast way to test a shuffle mask against a fixed pattern:
10228///
10229/// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
10230///
10231/// It returns true if the mask is exactly as wide as the argument list, and
10232/// each element of the mask is either -1 (signifying undef) or the value given
10233/// in the argument.
10234static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
10235 ArrayRef<int> ExpectedMask) {
10236 if (Mask.size() != ExpectedMask.size())
10237 return false;
10238
10239 int Size = Mask.size();
10240
10241 // If the values are build vectors, we can look through them to find
10242 // equivalent inputs that make the shuffles equivalent.
10243 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
10244 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
10245
10246 for (int i = 0; i < Size; ++i) {
10247 assert(Mask[i] >= -1 && "Out of bound mask element!")((Mask[i] >= -1 && "Out of bound mask element!") ?
static_cast<void> (0) : __assert_fail ("Mask[i] >= -1 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10247, __PRETTY_FUNCTION__))
;
10248 if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
10249 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10250 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10251 if (!MaskBV || !ExpectedBV ||
10252 MaskBV->getOperand(Mask[i] % Size) !=
10253 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10254 return false;
10255 }
10256 }
10257
10258 return true;
10259}
10260
10261/// Checks whether a target shuffle mask is equivalent to an explicit pattern.
10262///
10263/// The masks must be exactly the same width.
10264///
10265/// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
10266/// value in ExpectedMask is always accepted. Otherwise the indices must match.
10267///
10268/// SM_SentinelZero is accepted as a valid negative index but must match in
10269/// both.
10270static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
10271 ArrayRef<int> ExpectedMask,
10272 SDValue V1 = SDValue(),
10273 SDValue V2 = SDValue()) {
10274 int Size = Mask.size();
10275 if (Size != (int)ExpectedMask.size())
10276 return false;
10277 assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&((isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
"Illegal target shuffle mask") ? static_cast<void> (0)
: __assert_fail ("isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) && \"Illegal target shuffle mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10278, __PRETTY_FUNCTION__))
10278 "Illegal target shuffle mask")((isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
"Illegal target shuffle mask") ? static_cast<void> (0)
: __assert_fail ("isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) && \"Illegal target shuffle mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10278, __PRETTY_FUNCTION__))
;
10279
10280 // Check for out-of-range target shuffle mask indices.
10281 if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
10282 return false;
10283
10284 // If the values are build vectors, we can look through them to find
10285 // equivalent inputs that make the shuffles equivalent.
10286 auto *BV1 = dyn_cast_or_null<BuildVectorSDNode>(V1);
10287 auto *BV2 = dyn_cast_or_null<BuildVectorSDNode>(V2);
10288 BV1 = ((BV1 && Size != (int)BV1->getNumOperands()) ? nullptr : BV1);
10289 BV2 = ((BV2 && Size != (int)BV2->getNumOperands()) ? nullptr : BV2);
10290
10291 for (int i = 0; i < Size; ++i) {
10292 if (Mask[i] == SM_SentinelUndef || Mask[i] == ExpectedMask[i])
10293 continue;
10294 if (0 <= Mask[i] && 0 <= ExpectedMask[i]) {
10295 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10296 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10297 if (MaskBV && ExpectedBV &&
10298 MaskBV->getOperand(Mask[i] % Size) ==
10299 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10300 continue;
10301 }
10302 // TODO - handle SM_Sentinel equivalences.
10303 return false;
10304 }
10305 return true;
10306}
10307
10308// Attempt to create a shuffle mask from a VSELECT condition mask.
10309static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
10310 SDValue Cond) {
10311 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10312 return false;
10313
10314 unsigned Size = Cond.getValueType().getVectorNumElements();
10315 Mask.resize(Size, SM_SentinelUndef);
10316
10317 for (int i = 0; i != (int)Size; ++i) {
10318 SDValue CondElt = Cond.getOperand(i);
10319 Mask[i] = i;
10320 // Arbitrarily choose from the 2nd operand if the select condition element
10321 // is undef.
10322 // TODO: Can we do better by matching patterns such as even/odd?
10323 if (CondElt.isUndef() || isNullConstant(CondElt))
10324 Mask[i] += Size;
10325 }
10326
10327 return true;
10328}
10329
10330// Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
10331// instructions.
10332static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
10333 if (VT != MVT::v8i32 && VT != MVT::v8f32)
10334 return false;
10335
10336 SmallVector<int, 8> Unpcklwd;
10337 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
10338 /* Unary = */ false);
10339 SmallVector<int, 8> Unpckhwd;
10340 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
10341 /* Unary = */ false);
10342 bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
10343 isTargetShuffleEquivalent(Mask, Unpckhwd));
10344 return IsUnpackwdMask;
10345}
10346
10347static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
10348 // Create 128-bit vector type based on mask size.
10349 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
10350 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
10351
10352 // We can't assume a canonical shuffle mask, so try the commuted version too.
10353 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10354 ShuffleVectorSDNode::commuteMask(CommutedMask);
10355
10356 // Match any of unary/binary or low/high.
10357 for (unsigned i = 0; i != 4; ++i) {
10358 SmallVector<int, 16> UnpackMask;
10359 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
10360 if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
10361 isTargetShuffleEquivalent(CommutedMask, UnpackMask))
10362 return true;
10363 }
10364 return false;
10365}
10366
10367/// Return true if a shuffle mask chooses elements identically in its top and
10368/// bottom halves. For example, any splat mask has the same top and bottom
10369/// halves. If an element is undefined in only one half of the mask, the halves
10370/// are not considered identical.
10371static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
10372 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask")((Mask.size() % 2 == 0 && "Expecting even number of elements in mask"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() % 2 == 0 && \"Expecting even number of elements in mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10372, __PRETTY_FUNCTION__))
;
10373 unsigned HalfSize = Mask.size() / 2;
10374 for (unsigned i = 0; i != HalfSize; ++i) {
10375 if (Mask[i] != Mask[i + HalfSize])
10376 return false;
10377 }
10378 return true;
10379}
10380
10381/// Get a 4-lane 8-bit shuffle immediate for a mask.
10382///
10383/// This helper function produces an 8-bit shuffle immediate corresponding to
10384/// the ubiquitous shuffle encoding scheme used in x86 instructions for
10385/// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
10386/// example.
10387///
10388/// NB: We rely heavily on "undef" masks preserving the input lane.
10389static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
10390 assert(Mask.size() == 4 && "Only 4-lane shuffle masks")((Mask.size() == 4 && "Only 4-lane shuffle masks") ? static_cast
<void> (0) : __assert_fail ("Mask.size() == 4 && \"Only 4-lane shuffle masks\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10390, __PRETTY_FUNCTION__))
;
10391 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!")((Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!"
) ? static_cast<void> (0) : __assert_fail ("Mask[0] >= -1 && Mask[0] < 4 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10391, __PRETTY_FUNCTION__))
;
10392 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!")((Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!"
) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= -1 && Mask[1] < 4 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10392, __PRETTY_FUNCTION__))
;
10393 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!")((Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!"
) ? static_cast<void> (0) : __assert_fail ("Mask[2] >= -1 && Mask[2] < 4 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10393, __PRETTY_FUNCTION__))
;
10394 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!")((Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!"
) ? static_cast<void> (0) : __assert_fail ("Mask[3] >= -1 && Mask[3] < 4 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10394, __PRETTY_FUNCTION__))
;
10395
10396 unsigned Imm = 0;
10397 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
10398 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
10399 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
10400 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
10401 return Imm;
10402}
10403
10404static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
10405 SelectionDAG &DAG) {
10406 return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
10407}
10408
10409/// Compute whether each element of a shuffle is zeroable.
10410///
10411/// A "zeroable" vector shuffle element is one which can be lowered to zero.
10412/// Either it is an undef element in the shuffle mask, the element of the input
10413/// referenced is undef, or the element of the input referenced is known to be
10414/// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
10415/// as many lanes with this technique as possible to simplify the remaining
10416/// shuffle.
10417static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
10418 SDValue V1, SDValue V2) {
10419 APInt Zeroable(Mask.size(), 0);
10420 V1 = peekThroughBitcasts(V1);
10421 V2 = peekThroughBitcasts(V2);
10422
10423 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
10424 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
10425
10426 int VectorSizeInBits = V1.getValueSizeInBits();
10427 int ScalarSizeInBits = VectorSizeInBits / Mask.size();
10428 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size")((!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size"
) ? static_cast<void> (0) : __assert_fail ("!(VectorSizeInBits % ScalarSizeInBits) && \"Illegal shuffle mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10428, __PRETTY_FUNCTION__))
;
10429
10430 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10431 int M = Mask[i];
10432 // Handle the easy cases.
10433 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
10434 Zeroable.setBit(i);
10435 continue;
10436 }
10437
10438 // Determine shuffle input and normalize the mask.
10439 SDValue V = M < Size ? V1 : V2;
10440 M %= Size;
10441
10442 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
10443 if (V.getOpcode() != ISD::BUILD_VECTOR)
10444 continue;
10445
10446 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
10447 // the (larger) source element must be UNDEF/ZERO.
10448 if ((Size % V.getNumOperands()) == 0) {
10449 int Scale = Size / V->getNumOperands();
10450 SDValue Op = V.getOperand(M / Scale);
10451 if (Op.isUndef() || X86::isZeroNode(Op))
10452 Zeroable.setBit(i);
10453 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
10454 APInt Val = Cst->getAPIntValue();
10455 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
10456 Val = Val.getLoBits(ScalarSizeInBits);
10457 if (Val == 0)
10458 Zeroable.setBit(i);
10459 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
10460 APInt Val = Cst->getValueAPF().bitcastToAPInt();
10461 Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
10462 Val = Val.getLoBits(ScalarSizeInBits);
10463 if (Val == 0)
10464 Zeroable.setBit(i);
10465 }
10466 continue;
10467 }
10468
10469 // If the BUILD_VECTOR has more elements then all the (smaller) source
10470 // elements must be UNDEF or ZERO.
10471 if ((V.getNumOperands() % Size) == 0) {
10472 int Scale = V->getNumOperands() / Size;
10473 bool AllZeroable = true;
10474 for (int j = 0; j < Scale; ++j) {
10475 SDValue Op = V.getOperand((M * Scale) + j);
10476 AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op));
10477 }
10478 if (AllZeroable)
10479 Zeroable.setBit(i);
10480 continue;
10481 }
10482 }
10483
10484 return Zeroable;
10485}
10486
10487// The Shuffle result is as follow:
10488// 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
10489// Each Zeroable's element correspond to a particular Mask's element.
10490// As described in computeZeroableShuffleElements function.
10491//
10492// The function looks for a sub-mask that the nonzero elements are in
10493// increasing order. If such sub-mask exist. The function returns true.
10494static bool isNonZeroElementsInOrder(const APInt &Zeroable,
10495 ArrayRef<int> Mask, const EVT &VectorType,
10496 bool &IsZeroSideLeft) {
10497 int NextElement = -1;
10498 // Check if the Mask's nonzero elements are in increasing order.
10499 for (int i = 0, e = Mask.size(); i < e; i++) {
10500 // Checks if the mask's zeros elements are built from only zeros.
10501 assert(Mask[i] >= -1 && "Out of bound mask element!")((Mask[i] >= -1 && "Out of bound mask element!") ?
static_cast<void> (0) : __assert_fail ("Mask[i] >= -1 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10501, __PRETTY_FUNCTION__))
;
10502 if (Mask[i] < 0)
10503 return false;
10504 if (Zeroable[i])
10505 continue;
10506 // Find the lowest non zero element
10507 if (NextElement < 0) {
10508 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
10509 IsZeroSideLeft = NextElement != 0;
10510 }
10511 // Exit if the mask's non zero elements are not in increasing order.
10512 if (NextElement != Mask[i])
10513 return false;
10514 NextElement++;
10515 }
10516 return true;
10517}
10518
10519/// Try to lower a shuffle with a single PSHUFB of V1 or V2.
10520static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
10521 ArrayRef<int> Mask, SDValue V1,
10522 SDValue V2, const APInt &Zeroable,
10523 const X86Subtarget &Subtarget,
10524 SelectionDAG &DAG) {
10525 int Size = Mask.size();
10526 int LaneSize = 128 / VT.getScalarSizeInBits();
10527 const int NumBytes = VT.getSizeInBits() / 8;
10528 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
10529
10530 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||(((Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget
.hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI
() && VT.is512BitVector())) ? static_cast<void>
(0) : __assert_fail ("(Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget.hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI() && VT.is512BitVector())"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10532, __PRETTY_FUNCTION__))
10531 (Subtarget.hasAVX2() && VT.is256BitVector()) ||(((Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget
.hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI
() && VT.is512BitVector())) ? static_cast<void>
(0) : __assert_fail ("(Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget.hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI() && VT.is512BitVector())"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10532, __PRETTY_FUNCTION__))
10532 (Subtarget.hasBWI() && VT.is512BitVector()))(((Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget
.hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI
() && VT.is512BitVector())) ? static_cast<void>
(0) : __assert_fail ("(Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget.hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI() && VT.is512BitVector())"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10532, __PRETTY_FUNCTION__))
;
10533
10534 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
10535 // Sign bit set in i8 mask means zero element.
10536 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
10537
10538 SDValue V;
10539 for (int i = 0; i < NumBytes; ++i) {
10540 int M = Mask[i / NumEltBytes];
10541 if (M < 0) {
10542 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
10543 continue;
10544 }
10545 if (Zeroable[i / NumEltBytes]) {
10546 PSHUFBMask[i] = ZeroMask;
10547 continue;
10548 }
10549
10550 // We can only use a single input of V1 or V2.
10551 SDValue SrcV = (M >= Size ? V2 : V1);
10552 if (V && V != SrcV)
10553 return SDValue();
10554 V = SrcV;
10555 M %= Size;
10556
10557 // PSHUFB can't cross lanes, ensure this doesn't happen.
10558 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
10559 return SDValue();
10560
10561 M = M % LaneSize;
10562 M = M * NumEltBytes + (i % NumEltBytes);
10563 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
10564 }
10565 assert(V && "Failed to find a source input")((V && "Failed to find a source input") ? static_cast
<void> (0) : __assert_fail ("V && \"Failed to find a source input\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10565, __PRETTY_FUNCTION__))
;
10566
10567 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
10568 return DAG.getBitcast(
10569 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
10570 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
10571}
10572
10573static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
10574 const X86Subtarget &Subtarget, SelectionDAG &DAG,
10575 const SDLoc &dl);
10576
10577// X86 has dedicated shuffle that can be lowered to VEXPAND
10578static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
10579 const APInt &Zeroable,
10580 ArrayRef<int> Mask, SDValue &V1,
10581 SDValue &V2, SelectionDAG &DAG,
10582 const X86Subtarget &Subtarget) {
10583 bool IsLeftZeroSide = true;
10584 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
10585 IsLeftZeroSide))
10586 return SDValue();
10587 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
10588 MVT IntegerType =
10589 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
10590 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
10591 unsigned NumElts = VT.getVectorNumElements();
10592 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&(((NumElts == 4 || NumElts == 8 || NumElts == 16) && "Unexpected number of vector elements"
) ? static_cast<void> (0) : __assert_fail ("(NumElts == 4 || NumElts == 8 || NumElts == 16) && \"Unexpected number of vector elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10593, __PRETTY_FUNCTION__))
10593 "Unexpected number of vector elements")(((NumElts == 4 || NumElts == 8 || NumElts == 16) && "Unexpected number of vector elements"
) ? static_cast<void> (0) : __assert_fail ("(NumElts == 4 || NumElts == 8 || NumElts == 16) && \"Unexpected number of vector elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10593, __PRETTY_FUNCTION__))
;
10594 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
10595 Subtarget, DAG, DL);
10596 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
10597 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
10598 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
10599}
10600
10601static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
10602 unsigned &UnpackOpcode, bool IsUnary,
10603 ArrayRef<int> TargetMask,
10604 const SDLoc &DL, SelectionDAG &DAG,
10605 const X86Subtarget &Subtarget) {
10606 int NumElts = VT.getVectorNumElements();
10607
10608 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
10609 for (int i = 0; i != NumElts; i += 2) {
10610 int M1 = TargetMask[i + 0];
10611 int M2 = TargetMask[i + 1];
10612 Undef1 &= (SM_SentinelUndef == M1);
10613 Undef2 &= (SM_SentinelUndef == M2);
10614 Zero1 &= isUndefOrZero(M1);
10615 Zero2 &= isUndefOrZero(M2);
10616 }
10617 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&((!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
"Zeroable shuffle detected") ? static_cast<void> (0) :
__assert_fail ("!((Undef1 || Zero1) && (Undef2 || Zero2)) && \"Zeroable shuffle detected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10618, __PRETTY_FUNCTION__))
10618 "Zeroable shuffle detected")((!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
"Zeroable shuffle detected") ? static_cast<void> (0) :
__assert_fail ("!((Undef1 || Zero1) && (Undef2 || Zero2)) && \"Zeroable shuffle detected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10618, __PRETTY_FUNCTION__))
;
10619
10620 // Attempt to match the target mask against the unpack lo/hi mask patterns.
10621 SmallVector<int, 64> Unpckl, Unpckh;
10622 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
10623 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10624 UnpackOpcode = X86ISD::UNPCKL;
10625 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10626 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10627 return true;
10628 }
10629
10630 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
10631 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10632 UnpackOpcode = X86ISD::UNPCKH;
10633 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
10634 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
10635 return true;
10636 }
10637
10638 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
10639 if (IsUnary && (Zero1 || Zero2)) {
10640 // Don't bother if we can blend instead.
10641 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
10642 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
10643 return false;
10644
10645 bool MatchLo = true, MatchHi = true;
10646 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
10647 int M = TargetMask[i];
10648
10649 // Ignore if the input is known to be zero or the index is undef.
10650 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
10651 (M == SM_SentinelUndef))
10652 continue;
10653
10654 MatchLo &= (M == Unpckl[i]);
10655 MatchHi &= (M == Unpckh[i]);
10656 }
10657
10658 if (MatchLo || MatchHi) {
10659 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10660 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10661 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
10662 return true;
10663 }
10664 }
10665
10666 // If a binary shuffle, commute and try again.
10667 if (!IsUnary) {
10668 ShuffleVectorSDNode::commuteMask(Unpckl);
10669 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
10670 UnpackOpcode = X86ISD::UNPCKL;
10671 std::swap(V1, V2);
10672 return true;
10673 }
10674
10675 ShuffleVectorSDNode::commuteMask(Unpckh);
10676 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
10677 UnpackOpcode = X86ISD::UNPCKH;
10678 std::swap(V1, V2);
10679 return true;
10680 }
10681 }
10682
10683 return false;
10684}
10685
10686// X86 has dedicated unpack instructions that can handle specific blend
10687// operations: UNPCKH and UNPCKL.
10688static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
10689 ArrayRef<int> Mask, SDValue V1, SDValue V2,
10690 SelectionDAG &DAG) {
10691 SmallVector<int, 8> Unpckl;
10692 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
10693 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10694 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
10695
10696 SmallVector<int, 8> Unpckh;
10697 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
10698 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10699 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
10700
10701 // Commute and try again.
10702 ShuffleVectorSDNode::commuteMask(Unpckl);
10703 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
10704 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
10705
10706 ShuffleVectorSDNode::commuteMask(Unpckh);
10707 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
10708 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
10709
10710 return SDValue();
10711}
10712
10713static bool matchVectorShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
10714 int Delta) {
10715 int Size = (int)Mask.size();
10716 int Split = Size / Delta;
10717 int TruncatedVectorStart = SwappedOps ? Size : 0;
10718
10719 // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
10720 if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
10721 return false;
10722
10723 // The rest of the mask should not refer to the truncated vector's elements.
10724 if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
10725 TruncatedVectorStart + Size))
10726 return false;
10727
10728 return true;
10729}
10730
10731// Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
10732//
10733// An example is the following:
10734//
10735// t0: ch = EntryToken
10736// t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10737// t25: v4i32 = truncate t2
10738// t41: v8i16 = bitcast t25
10739// t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10740// Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10741// t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10742// t18: v2i64 = bitcast t51
10743//
10744// Without avx512vl, this is lowered to:
10745//
10746// vpmovqd %zmm0, %ymm0
10747// vpshufb {{.*#+}} xmm0 =
10748// xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
10749//
10750// But when avx512vl is available, one can just use a single vpmovdw
10751// instruction.
10752static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
10753 MVT VT, SDValue V1, SDValue V2,
10754 SelectionDAG &DAG,
10755 const X86Subtarget &Subtarget) {
10756 if (VT != MVT::v16i8 && VT != MVT::v8i16)
10757 return SDValue();
10758
10759 if (Mask.size() != VT.getVectorNumElements())
10760 return SDValue();
10761
10762 bool SwappedOps = false;
10763
10764 if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
10765 if (!ISD::isBuildVectorAllZeros(V1.getNode()))
10766 return SDValue();
10767
10768 std::swap(V1, V2);
10769 SwappedOps = true;
10770 }
10771
10772 // Look for:
10773 //
10774 // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
10775 // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
10776 //
10777 // and similar ones.
10778 if (V1.getOpcode() != ISD::BITCAST)
10779 return SDValue();
10780 if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
10781 return SDValue();
10782
10783 SDValue Src = V1.getOperand(0).getOperand(0);
10784 MVT SrcVT = Src.getSimpleValueType();
10785
10786 // The vptrunc** instructions truncating 128 bit and 256 bit vectors
10787 // are only available with avx512vl.
10788 if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
10789 return SDValue();
10790
10791 // Down Convert Word to Byte is only available with avx512bw. The case with
10792 // 256-bit output doesn't contain a shuffle and is therefore not handled here.
10793 if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
10794 !Subtarget.hasBWI())
10795 return SDValue();
10796
10797 // The first half/quarter of the mask should refer to every second/fourth
10798 // element of the vector truncated and bitcasted.
10799 if (!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 2) &&
10800 !matchVectorShuffleAsVPMOV(Mask, SwappedOps, 4))
10801 return SDValue();
10802
10803 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
10804}
10805
10806// X86 has dedicated pack instructions that can handle specific truncation
10807// operations: PACKSS and PACKUS.
10808static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1,
10809 SDValue &V2, unsigned &PackOpcode,
10810 ArrayRef<int> TargetMask,
10811 SelectionDAG &DAG,
10812 const X86Subtarget &Subtarget) {
10813 unsigned NumElts = VT.getVectorNumElements();
10814 unsigned BitSize = VT.getScalarSizeInBits();
10815 MVT PackSVT = MVT::getIntegerVT(BitSize * 2);
10816 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2);
10817
10818 auto MatchPACK = [&](SDValue N1, SDValue N2) {
10819 SDValue VV1 = DAG.getBitcast(PackVT, N1);
10820 SDValue VV2 = DAG.getBitcast(PackVT, N2);
10821 if (Subtarget.hasSSE41() || PackSVT == MVT::i16) {
10822 APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize);
10823 if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
10824 (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
10825 V1 = VV1;
10826 V2 = VV2;
10827 SrcVT = PackVT;
10828 PackOpcode = X86ISD::PACKUS;
10829 return true;
10830 }
10831 }
10832 if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) &&
10833 (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) {
10834 V1 = VV1;
10835 V2 = VV2;
10836 SrcVT = PackVT;
10837 PackOpcode = X86ISD::PACKSS;
10838 return true;
10839 }
10840 return false;
10841 };
10842
10843 // Try binary shuffle.
10844 SmallVector<int, 32> BinaryMask;
10845 createPackShuffleMask(VT, BinaryMask, false);
10846 if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
10847 if (MatchPACK(V1, V2))
10848 return true;
10849
10850 // Try unary shuffle.
10851 SmallVector<int, 32> UnaryMask;
10852 createPackShuffleMask(VT, UnaryMask, true);
10853 if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
10854 if (MatchPACK(V1, V1))
10855 return true;
10856
10857 return false;
10858}
10859
10860static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
10861 SDValue V1, SDValue V2, SelectionDAG &DAG,
10862 const X86Subtarget &Subtarget) {
10863 MVT PackVT;
10864 unsigned PackOpcode;
10865 if (matchVectorShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
10866 Subtarget))
10867 return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1),
10868 DAG.getBitcast(PackVT, V2));
10869
10870 return SDValue();
10871}
10872
10873/// Try to emit a bitmask instruction for a shuffle.
10874///
10875/// This handles cases where we can model a blend exactly as a bitmask due to
10876/// one of the inputs being zeroable.
10877static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
10878 SDValue V2, ArrayRef<int> Mask,
10879 const APInt &Zeroable,
10880 const X86Subtarget &Subtarget,
10881 SelectionDAG &DAG) {
10882 MVT MaskVT = VT;
10883 MVT EltVT = VT.getVectorElementType();
10884 SDValue Zero, AllOnes;
10885 // Use f64 if i64 isn't legal.
10886 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
10887 EltVT = MVT::f64;
10888 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
10889 }
10890
10891 MVT LogicVT = VT;
10892 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
10893 Zero = DAG.getConstantFP(0.0, DL, EltVT);
10894 AllOnes = DAG.getConstantFP(
10895 APFloat::getAllOnesValue(EltVT.getSizeInBits(), true), DL, EltVT);
10896 LogicVT =
10897 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
10898 } else {
10899 Zero = DAG.getConstant(0, DL, EltVT);
10900 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10901 }
10902
10903 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
10904 SDValue V;
10905 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10906 if (Zeroable[i])
10907 continue;
10908 if (Mask[i] % Size != i)
10909 return SDValue(); // Not a blend.
10910 if (!V)
10911 V = Mask[i] < Size ? V1 : V2;
10912 else if (V != (Mask[i] < Size ? V1 : V2))
10913 return SDValue(); // Can only let one input through the mask.
10914
10915 VMaskOps[i] = AllOnes;
10916 }
10917 if (!V)
10918 return SDValue(); // No non-zeroable elements!
10919
10920 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
10921 VMask = DAG.getBitcast(LogicVT, VMask);
10922 V = DAG.getBitcast(LogicVT, V);
10923 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
10924 return DAG.getBitcast(VT, And);
10925}
10926
10927/// Try to emit a blend instruction for a shuffle using bit math.
10928///
10929/// This is used as a fallback approach when first class blend instructions are
10930/// unavailable. Currently it is only suitable for integer vectors, but could
10931/// be generalized for floating point vectors if desirable.
10932static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
10933 SDValue V2, ArrayRef<int> Mask,
10934 SelectionDAG &DAG) {
10935 assert(VT.isInteger() && "Only supports integer vector types!")((VT.isInteger() && "Only supports integer vector types!"
) ? static_cast<void> (0) : __assert_fail ("VT.isInteger() && \"Only supports integer vector types!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10935, __PRETTY_FUNCTION__))
;
10936 MVT EltVT = VT.getVectorElementType();
10937 SDValue Zero = DAG.getConstant(0, DL, EltVT);
10938 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10939 SmallVector<SDValue, 16> MaskOps;
10940 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10941 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
10942 return SDValue(); // Shuffled input!
10943 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
10944 }
10945
10946 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
10947 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
10948 V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
10949 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
10950}
10951
10952static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
10953 SDValue PreservedSrc,
10954 const X86Subtarget &Subtarget,
10955 SelectionDAG &DAG);
10956
10957static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2,
10958 MutableArrayRef<int> Mask,
10959 const APInt &Zeroable, bool &ForceV1Zero,
10960 bool &ForceV2Zero, uint64_t &BlendMask) {
10961 bool V1IsZeroOrUndef =
10962 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
10963 bool V2IsZeroOrUndef =
10964 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
10965
10966 BlendMask = 0;
10967 ForceV1Zero = false, ForceV2Zero = false;
10968 assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask")((Mask.size() <= 64 && "Shuffle mask too big for blend mask"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() <= 64 && \"Shuffle mask too big for blend mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 10968, __PRETTY_FUNCTION__))
;
10969
10970 // Attempt to generate the binary blend mask. If an input is zero then
10971 // we can use any lane.
10972 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10973 int M = Mask[i];
10974 if (M == SM_SentinelUndef)
10975 continue;
10976 if (M == i)
10977 continue;
10978 if (M == i + Size) {
10979 BlendMask |= 1ull << i;
10980 continue;
10981 }
10982 if (Zeroable[i]) {
10983 if (V1IsZeroOrUndef) {
10984 ForceV1Zero = true;
10985 Mask[i] = i;
10986 continue;
10987 }
10988 if (V2IsZeroOrUndef) {
10989 ForceV2Zero = true;
10990 BlendMask |= 1ull << i;
10991 Mask[i] = i + Size;
10992 continue;
10993 }
10994 }
10995 return false;
10996 }
10997 return true;
10998}
10999
11000static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
11001 int Scale) {
11002 uint64_t ScaledMask = 0;
11003 for (int i = 0; i != Size; ++i)
11004 if (BlendMask & (1ull << i))
11005 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
11006 return ScaledMask;
11007}
11008
11009/// Try to emit a blend instruction for a shuffle.
11010///
11011/// This doesn't do any checks for the availability of instructions for blending
11012/// these values. It relies on the availability of the X86ISD::BLENDI pattern to
11013/// be matched in the backend with the type given. What it does check for is
11014/// that the shuffle mask is a blend, or convertible into a blend with zero.
11015static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
11016 SDValue V2, ArrayRef<int> Original,
11017 const APInt &Zeroable,
11018 const X86Subtarget &Subtarget,
11019 SelectionDAG &DAG) {
11020 uint64_t BlendMask = 0;
11021 bool ForceV1Zero = false, ForceV2Zero = false;
11022 SmallVector<int, 64> Mask(Original.begin(), Original.end());
11023 if (!matchVectorShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
11024 BlendMask))
11025 return SDValue();
11026
11027 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
11028 if (ForceV1Zero)
11029 V1 = getZeroVector(VT, Subtarget, DAG, DL);
11030 if (ForceV2Zero)
11031 V2 = getZeroVector(VT, Subtarget, DAG, DL);
11032
11033 switch (VT.SimpleTy) {
11034 case MVT::v4i64:
11035 case MVT::v8i32:
11036 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!")((Subtarget.hasAVX2() && "256-bit integer blends require AVX2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"256-bit integer blends require AVX2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11036, __PRETTY_FUNCTION__))
;
11037 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11038 case MVT::v4f64:
11039 case MVT::v8f32:
11040 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!")((Subtarget.hasAVX() && "256-bit float blends require AVX!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX() && \"256-bit float blends require AVX!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11040, __PRETTY_FUNCTION__))
;
11041 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11042 case MVT::v2f64:
11043 case MVT::v2i64:
11044 case MVT::v4f32:
11045 case MVT::v4i32:
11046 case MVT::v8i16:
11047 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!")((Subtarget.hasSSE41() && "128-bit blends require SSE41!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE41() && \"128-bit blends require SSE41!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11047, __PRETTY_FUNCTION__))
;
11048 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
11049 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11050 case MVT::v16i16: {
11051 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!")((Subtarget.hasAVX2() && "v16i16 blends require AVX2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"v16i16 blends require AVX2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11051, __PRETTY_FUNCTION__))
;
11052 SmallVector<int, 8> RepeatedMask;
11053 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
11054 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
11055 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!")((RepeatedMask.size() == 8 && "Repeated mask size doesn't match!"
) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 8 && \"Repeated mask size doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11055, __PRETTY_FUNCTION__))
;
11056 BlendMask = 0;
11057 for (int i = 0; i < 8; ++i)
11058 if (RepeatedMask[i] >= 8)
11059 BlendMask |= 1ull << i;
11060 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11061 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11062 }
11063 // Use PBLENDW for lower/upper lanes and then blend lanes.
11064 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
11065 // merge to VSELECT where useful.
11066 uint64_t LoMask = BlendMask & 0xFF;
11067 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
11068 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
11069 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11070 DAG.getTargetConstant(LoMask, DL, MVT::i8));
11071 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11072 DAG.getTargetConstant(HiMask, DL, MVT::i8));
11073 return DAG.getVectorShuffle(
11074 MVT::v16i16, DL, Lo, Hi,
11075 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
11076 }
11077 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11078 }
11079 case MVT::v32i8:
11080 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!")((Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"256-bit byte-blends require AVX2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11080, __PRETTY_FUNCTION__))
;
11081 LLVM_FALLTHROUGH[[gnu::fallthrough]];
11082 case MVT::v16i8: {
11083 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!")((Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE41() && \"128-bit byte-blends require SSE41!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11083, __PRETTY_FUNCTION__))
;
11084
11085 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
11086 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11087 Subtarget, DAG))
11088 return Masked;
11089
11090 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
11091 MVT IntegerType =
11092 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11093 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11094 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11095 }
11096
11097 // Scale the blend by the number of bytes per element.
11098 int Scale = VT.getScalarSizeInBits() / 8;
11099
11100 // This form of blend is always done on bytes. Compute the byte vector
11101 // type.
11102 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11103
11104 // x86 allows load folding with blendvb from the 2nd source operand. But
11105 // we are still using LLVM select here (see comment below), so that's V1.
11106 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
11107 // allow that load-folding possibility.
11108 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
11109 ShuffleVectorSDNode::commuteMask(Mask);
11110 std::swap(V1, V2);
11111 }
11112
11113 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
11114 // mix of LLVM's code generator and the x86 backend. We tell the code
11115 // generator that boolean values in the elements of an x86 vector register
11116 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
11117 // mapping a select to operand #1, and 'false' mapping to operand #2. The
11118 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
11119 // of the element (the remaining are ignored) and 0 in that high bit would
11120 // mean operand #1 while 1 in the high bit would mean operand #2. So while
11121 // the LLVM model for boolean values in vector elements gets the relevant
11122 // bit set, it is set backwards and over constrained relative to x86's
11123 // actual model.
11124 SmallVector<SDValue, 32> VSELECTMask;
11125 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11126 for (int j = 0; j < Scale; ++j)
11127 VSELECTMask.push_back(
11128 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
11129 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
11130 MVT::i8));
11131
11132 V1 = DAG.getBitcast(BlendVT, V1);
11133 V2 = DAG.getBitcast(BlendVT, V2);
11134 return DAG.getBitcast(
11135 VT,
11136 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
11137 V1, V2));
11138 }
11139 case MVT::v16f32:
11140 case MVT::v8f64:
11141 case MVT::v8i64:
11142 case MVT::v16i32:
11143 case MVT::v32i16:
11144 case MVT::v64i8: {
11145 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
11146 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
11147 if (!OptForSize) {
11148 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11149 Subtarget, DAG))
11150 return Masked;
11151 }
11152
11153 // Otherwise load an immediate into a GPR, cast to k-register, and use a
11154 // masked move.
11155 MVT IntegerType =
11156 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11157 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11158 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11159 }
11160 default:
11161 llvm_unreachable("Not a supported integer vector type!")::llvm::llvm_unreachable_internal("Not a supported integer vector type!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11161)
;
11162 }
11163}
11164
11165/// Try to lower as a blend of elements from two inputs followed by
11166/// a single-input permutation.
11167///
11168/// This matches the pattern where we can blend elements from two inputs and
11169/// then reduce the shuffle to a single-input permutation.
11170static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
11171 SDValue V1, SDValue V2,
11172 ArrayRef<int> Mask,
11173 SelectionDAG &DAG,
11174 bool ImmBlends = false) {
11175 // We build up the blend mask while checking whether a blend is a viable way
11176 // to reduce the shuffle.
11177 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11178 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
11179
11180 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11181 if (Mask[i] < 0)
11182 continue;
11183
11184 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.")((Mask[i] < Size * 2 && "Shuffle input is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("Mask[i] < Size * 2 && \"Shuffle input is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11184, __PRETTY_FUNCTION__))
;
11185
11186 if (BlendMask[Mask[i] % Size] < 0)
11187 BlendMask[Mask[i] % Size] = Mask[i];
11188 else if (BlendMask[Mask[i] % Size] != Mask[i])
11189 return SDValue(); // Can't blend in the needed input!
11190
11191 PermuteMask[i] = Mask[i] % Size;
11192 }
11193
11194 // If only immediate blends, then bail if the blend mask can't be widened to
11195 // i16.
11196 unsigned EltSize = VT.getScalarSizeInBits();
11197 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
11198 return SDValue();
11199
11200 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11201 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
11202}
11203
11204/// Try to lower as an unpack of elements from two inputs followed by
11205/// a single-input permutation.
11206///
11207/// This matches the pattern where we can unpack elements from two inputs and
11208/// then reduce the shuffle to a single-input (wider) permutation.
11209static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
11210 SDValue V1, SDValue V2,
11211 ArrayRef<int> Mask,
11212 SelectionDAG &DAG) {
11213 int NumElts = Mask.size();
11214 int NumLanes = VT.getSizeInBits() / 128;
11215 int NumLaneElts = NumElts / NumLanes;
11216 int NumHalfLaneElts = NumLaneElts / 2;
11217
11218 bool MatchLo = true, MatchHi = true;
11219 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
11220
11221 // Determine UNPCKL/UNPCKH type and operand order.
11222 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11223 for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
11224 int M = Mask[Lane + Elt];
11225 if (M < 0)
11226 continue;
11227
11228 SDValue &Op = Ops[Elt & 1];
11229 if (M < NumElts && (Op.isUndef() || Op == V1))
11230 Op = V1;
11231 else if (NumElts <= M && (Op.isUndef() || Op == V2))
11232 Op = V2;
11233 else
11234 return SDValue();
11235
11236 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
11237 MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
11238 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
11239 MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
11240 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
11241 if (!MatchLo && !MatchHi)
11242 return SDValue();
11243 }
11244 }
11245 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI")(((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI"
) ? static_cast<void> (0) : __assert_fail ("(MatchLo ^ MatchHi) && \"Failed to match UNPCKLO/UNPCKHI\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11245, __PRETTY_FUNCTION__))
;
11246
11247 // Now check that each pair of elts come from the same unpack pair
11248 // and set the permute mask based on each pair.
11249 // TODO - Investigate cases where we permute individual elements.
11250 SmallVector<int, 32> PermuteMask(NumElts, -1);
11251 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11252 for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
11253 int M0 = Mask[Lane + Elt + 0];
11254 int M1 = Mask[Lane + Elt + 1];
11255 if (0 <= M0 && 0 <= M1 &&
11256 (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
11257 return SDValue();
11258 if (0 <= M0)
11259 PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
11260 if (0 <= M1)
11261 PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
11262 }
11263 }
11264
11265 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11266 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
11267 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
11268}
11269
11270/// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
11271/// permuting the elements of the result in place.
11272static SDValue lowerShuffleAsByteRotateAndPermute(
11273 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11274 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11275 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
11276 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
11277 (VT.is512BitVector() && !Subtarget.hasBWI()))
11278 return SDValue();
11279
11280 // We don't currently support lane crossing permutes.
11281 if (is128BitLaneCrossingShuffleMask(VT, Mask))
11282 return SDValue();
11283
11284 int Scale = VT.getScalarSizeInBits() / 8;
11285 int NumLanes = VT.getSizeInBits() / 128;
11286 int NumElts = VT.getVectorNumElements();
11287 int NumEltsPerLane = NumElts / NumLanes;
11288
11289 // Determine range of mask elts.
11290 bool Blend1 = true;
11291 bool Blend2 = true;
11292 std::pair<int, int> Range1 = std::make_pair(INT_MAX2147483647, INT_MIN(-2147483647 -1));
11293 std::pair<int, int> Range2 = std::make_pair(INT_MAX2147483647, INT_MIN(-2147483647 -1));
11294 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11295 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11296 int M = Mask[Lane + Elt];
11297 if (M < 0)
11298 continue;
11299 if (M < NumElts) {
11300 Blend1 &= (M == (Lane + Elt));
11301 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask")((Lane <= M && M < (Lane + NumEltsPerLane) &&
"Out of range mask") ? static_cast<void> (0) : __assert_fail
("Lane <= M && M < (Lane + NumEltsPerLane) && \"Out of range mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11301, __PRETTY_FUNCTION__))
;
11302 M = M % NumEltsPerLane;
11303 Range1.first = std::min(Range1.first, M);
11304 Range1.second = std::max(Range1.second, M);
11305 } else {
11306 M -= NumElts;
11307 Blend2 &= (M == (Lane + Elt));
11308 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask")((Lane <= M && M < (Lane + NumEltsPerLane) &&
"Out of range mask") ? static_cast<void> (0) : __assert_fail
("Lane <= M && M < (Lane + NumEltsPerLane) && \"Out of range mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11308, __PRETTY_FUNCTION__))
;
11309 M = M % NumEltsPerLane;
11310 Range2.first = std::min(Range2.first, M);
11311 Range2.second = std::max(Range2.second, M);
11312 }
11313 }
11314 }
11315
11316 // Bail if we don't need both elements.
11317 // TODO - it might be worth doing this for unary shuffles if the permute
11318 // can be widened.
11319 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
11320 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
11321 return SDValue();
11322
11323 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
11324 return SDValue();
11325
11326 // Rotate the 2 ops so we can access both ranges, then permute the result.
11327 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
11328 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11329 SDValue Rotate = DAG.getBitcast(
11330 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
11331 DAG.getBitcast(ByteVT, Lo),
11332 DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
11333 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11334 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11335 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11336 int M = Mask[Lane + Elt];
11337 if (M < 0)
11338 continue;
11339 if (M < NumElts)
11340 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
11341 else
11342 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
11343 }
11344 }
11345 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
11346 };
11347
11348 // Check if the ranges are small enough to rotate from either direction.
11349 if (Range2.second < Range1.first)
11350 return RotateAndPermute(V1, V2, Range1.first, 0);
11351 if (Range1.second < Range2.first)
11352 return RotateAndPermute(V2, V1, Range2.first, NumElts);
11353 return SDValue();
11354}
11355
11356/// Generic routine to decompose a shuffle and blend into independent
11357/// blends and permutes.
11358///
11359/// This matches the extremely common pattern for handling combined
11360/// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11361/// operations. It will try to pick the best arrangement of shuffles and
11362/// blends.
11363static SDValue lowerShuffleAsDecomposedShuffleBlend(
11364 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11365 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11366 // Shuffle the input elements into the desired positions in V1 and V2 and
11367 // blend them together.
11368 SmallVector<int, 32> V1Mask(Mask.size(), -1);
11369 SmallVector<int, 32> V2Mask(Mask.size(), -1);
11370 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11371 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11372 if (Mask[i] >= 0 && Mask[i] < Size) {
11373 V1Mask[i] = Mask[i];
11374 BlendMask[i] = i;
11375 } else if (Mask[i] >= Size) {
11376 V2Mask[i] = Mask[i] - Size;
11377 BlendMask[i] = i + Size;
11378 }
11379
11380 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11381 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11382 // the shuffle may be able to fold with a load or other benefit. However, when
11383 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11384 // pre-shuffle first is a better strategy.
11385 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11386 // Only prefer immediate blends to unpack/rotate.
11387 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11388 DAG, true))
11389 return BlendPerm;
11390 if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
11391 DAG))
11392 return UnpackPerm;
11393 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11394 DL, VT, V1, V2, Mask, Subtarget, DAG))
11395 return RotatePerm;
11396 // Unpack/rotate failed - try again with variable blends.
11397 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11398 DAG))
11399 return BlendPerm;
11400 }
11401
11402 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11403 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11404 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11405}
11406
11407/// Try to lower a vector shuffle as a rotation.
11408///
11409/// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
11410static int matchShuffleAsRotate(SDValue &V1, SDValue &V2, ArrayRef<int> Mask) {
11411 int NumElts = Mask.size();
11412
11413 // We need to detect various ways of spelling a rotation:
11414 // [11, 12, 13, 14, 15, 0, 1, 2]
11415 // [-1, 12, 13, 14, -1, -1, 1, -1]
11416 // [-1, -1, -1, -1, -1, -1, 1, 2]
11417 // [ 3, 4, 5, 6, 7, 8, 9, 10]
11418 // [-1, 4, 5, 6, -1, -1, 9, -1]
11419 // [-1, 4, 5, 6, -1, -1, -1, -1]
11420 int Rotation = 0;
11421 SDValue Lo, Hi;
11422 for (int i = 0; i < NumElts; ++i) {
11423 int M = Mask[i];
11424 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&(((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts
))) && "Unexpected mask index.") ? static_cast<void
> (0) : __assert_fail ("(M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) && \"Unexpected mask index.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11425, __PRETTY_FUNCTION__))
11425 "Unexpected mask index.")(((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts
))) && "Unexpected mask index.") ? static_cast<void
> (0) : __assert_fail ("(M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) && \"Unexpected mask index.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11425, __PRETTY_FUNCTION__))
;
11426 if (M < 0)
11427 continue;
11428
11429 // Determine where a rotated vector would have started.
11430 int StartIdx = i - (M % NumElts);
11431 if (StartIdx == 0)
11432 // The identity rotation isn't interesting, stop.
11433 return -1;
11434
11435 // If we found the tail of a vector the rotation must be the missing
11436 // front. If we found the head of a vector, it must be how much of the
11437 // head.
11438 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11439
11440 if (Rotation == 0)
11441 Rotation = CandidateRotation;
11442 else if (Rotation != CandidateRotation)
11443 // The rotations don't match, so we can't match this mask.
11444 return -1;
11445
11446 // Compute which value this mask is pointing at.
11447 SDValue MaskV = M < NumElts ? V1 : V2;
11448
11449 // Compute which of the two target values this index should be assigned
11450 // to. This reflects whether the high elements are remaining or the low
11451 // elements are remaining.
11452 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11453
11454 // Either set up this value if we've not encountered it before, or check
11455 // that it remains consistent.
11456 if (!TargetV)
11457 TargetV = MaskV;
11458 else if (TargetV != MaskV)
11459 // This may be a rotation, but it pulls from the inputs in some
11460 // unsupported interleaving.
11461 return -1;
11462 }
11463
11464 // Check that we successfully analyzed the mask, and normalize the results.
11465 assert(Rotation != 0 && "Failed to locate a viable rotation!")((Rotation != 0 && "Failed to locate a viable rotation!"
) ? static_cast<void> (0) : __assert_fail ("Rotation != 0 && \"Failed to locate a viable rotation!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11465, __PRETTY_FUNCTION__))
;
11466 assert((Lo || Hi) && "Failed to find a rotated input vector!")(((Lo || Hi) && "Failed to find a rotated input vector!"
) ? static_cast<void> (0) : __assert_fail ("(Lo || Hi) && \"Failed to find a rotated input vector!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11466, __PRETTY_FUNCTION__))
;
11467 if (!Lo)
11468 Lo = Hi;
11469 else if (!Hi)
11470 Hi = Lo;
11471
11472 V1 = Lo;
11473 V2 = Hi;
11474
11475 return Rotation;
11476}
11477
11478/// Try to lower a vector shuffle as a byte rotation.
11479///
11480/// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11481/// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11482/// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11483/// try to generically lower a vector shuffle through such an pattern. It
11484/// does not check for the profitability of lowering either as PALIGNR or
11485/// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11486/// This matches shuffle vectors that look like:
11487///
11488/// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11489///
11490/// Essentially it concatenates V1 and V2, shifts right by some number of
11491/// elements, and takes the low elements as the result. Note that while this is
11492/// specified as a *right shift* because x86 is little-endian, it is a *left
11493/// rotate* of the vector lanes.
11494static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11495 ArrayRef<int> Mask) {
11496 // Don't accept any shuffles with zero elements.
11497 if (any_of(Mask, [](int M) { return M == SM_SentinelZero; }))
11498 return -1;
11499
11500 // PALIGNR works on 128-bit lanes.
11501 SmallVector<int, 16> RepeatedMask;
11502 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11503 return -1;
11504
11505 int Rotation = matchShuffleAsRotate(V1, V2, RepeatedMask);
11506 if (Rotation <= 0)
11507 return -1;
11508
11509 // PALIGNR rotates bytes, so we need to scale the
11510 // rotation based on how many bytes are in the vector lane.
11511 int NumElts = RepeatedMask.size();
11512 int Scale = 16 / NumElts;
11513 return Rotation * Scale;
11514}
11515
11516static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11517 SDValue V2, ArrayRef<int> Mask,
11518 const X86Subtarget &Subtarget,
11519 SelectionDAG &DAG) {
11520 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!")((!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!"
) ? static_cast<void> (0) : __assert_fail ("!isNoopShuffleMask(Mask) && \"We shouldn't lower no-op shuffles!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11520, __PRETTY_FUNCTION__))
;
11521
11522 SDValue Lo = V1, Hi = V2;
11523 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11524 if (ByteRotation <= 0)
11525 return SDValue();
11526
11527 // Cast the inputs to i8 vector of correct length to match PALIGNR or
11528 // PSLLDQ/PSRLDQ.
11529 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11530 Lo = DAG.getBitcast(ByteVT, Lo);
11531 Hi = DAG.getBitcast(ByteVT, Hi);
11532
11533 // SSSE3 targets can use the palignr instruction.
11534 if (Subtarget.hasSSSE3()) {
11535 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&(((!VT.is512BitVector() || Subtarget.hasBWI()) && "512-bit PALIGNR requires BWI instructions"
) ? static_cast<void> (0) : __assert_fail ("(!VT.is512BitVector() || Subtarget.hasBWI()) && \"512-bit PALIGNR requires BWI instructions\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11536, __PRETTY_FUNCTION__))
11536 "512-bit PALIGNR requires BWI instructions")(((!VT.is512BitVector() || Subtarget.hasBWI()) && "512-bit PALIGNR requires BWI instructions"
) ? static_cast<void> (0) : __assert_fail ("(!VT.is512BitVector() || Subtarget.hasBWI()) && \"512-bit PALIGNR requires BWI instructions\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11536, __PRETTY_FUNCTION__))
;
11537 return DAG.getBitcast(
11538 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11539 DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
11540 }
11541
11542 assert(VT.is128BitVector() &&((VT.is128BitVector() && "Rotate-based lowering only supports 128-bit lowering!"
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Rotate-based lowering only supports 128-bit lowering!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11543, __PRETTY_FUNCTION__))
11543 "Rotate-based lowering only supports 128-bit lowering!")((VT.is128BitVector() && "Rotate-based lowering only supports 128-bit lowering!"
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Rotate-based lowering only supports 128-bit lowering!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11543, __PRETTY_FUNCTION__))
;
11544 assert(Mask.size() <= 16 &&((Mask.size() <= 16 && "Can shuffle at most 16 bytes in a 128-bit vector!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() <= 16 && \"Can shuffle at most 16 bytes in a 128-bit vector!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11545, __PRETTY_FUNCTION__))
11545 "Can shuffle at most 16 bytes in a 128-bit vector!")((Mask.size() <= 16 && "Can shuffle at most 16 bytes in a 128-bit vector!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() <= 16 && \"Can shuffle at most 16 bytes in a 128-bit vector!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11545, __PRETTY_FUNCTION__))
;
11546 assert(ByteVT == MVT::v16i8 &&((ByteVT == MVT::v16i8 && "SSE2 rotate lowering only needed for v16i8!"
) ? static_cast<void> (0) : __assert_fail ("ByteVT == MVT::v16i8 && \"SSE2 rotate lowering only needed for v16i8!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11547, __PRETTY_FUNCTION__))
11547 "SSE2 rotate lowering only needed for v16i8!")((ByteVT == MVT::v16i8 && "SSE2 rotate lowering only needed for v16i8!"
) ? static_cast<void> (0) : __assert_fail ("ByteVT == MVT::v16i8 && \"SSE2 rotate lowering only needed for v16i8!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11547, __PRETTY_FUNCTION__))
;
11548
11549 // Default SSE2 implementation
11550 int LoByteShift = 16 - ByteRotation;
11551 int HiByteShift = ByteRotation;
11552
11553 SDValue LoShift =
11554 DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11555 DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
11556 SDValue HiShift =
11557 DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11558 DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
11559 return DAG.getBitcast(VT,
11560 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11561}
11562
11563/// Try to lower a vector shuffle as a dword/qword rotation.
11564///
11565/// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11566/// rotation of the concatenation of two vectors; This routine will
11567/// try to generically lower a vector shuffle through such an pattern.
11568///
11569/// Essentially it concatenates V1 and V2, shifts right by some number of
11570/// elements, and takes the low elements as the result. Note that while this is
11571/// specified as a *right shift* because x86 is little-endian, it is a *left
11572/// rotate* of the vector lanes.
11573static SDValue lowerShuffleAsRotate(const SDLoc &DL, MVT VT, SDValue V1,
11574 SDValue V2, ArrayRef<int> Mask,
11575 const X86Subtarget &Subtarget,
11576 SelectionDAG &DAG) {
11577 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&(((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT
::i64) && "Only 32-bit and 64-bit elements are supported!"
) ? static_cast<void> (0) : __assert_fail ("(VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) && \"Only 32-bit and 64-bit elements are supported!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11578, __PRETTY_FUNCTION__))
11578 "Only 32-bit and 64-bit elements are supported!")(((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT
::i64) && "Only 32-bit and 64-bit elements are supported!"
) ? static_cast<void> (0) : __assert_fail ("(VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) && \"Only 32-bit and 64-bit elements are supported!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11578, __PRETTY_FUNCTION__))
;
11579
11580 // 128/256-bit vectors are only supported with VLX.
11581 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))(((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT
.is256BitVector())) && "VLX required for 128/256-bit vectors"
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector())) && \"VLX required for 128/256-bit vectors\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11582, __PRETTY_FUNCTION__))
11582 && "VLX required for 128/256-bit vectors")(((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT
.is256BitVector())) && "VLX required for 128/256-bit vectors"
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector())) && \"VLX required for 128/256-bit vectors\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11582, __PRETTY_FUNCTION__))
;
11583
11584 SDValue Lo = V1, Hi = V2;
11585 int Rotation = matchShuffleAsRotate(Lo, Hi, Mask);
11586 if (Rotation <= 0)
11587 return SDValue();
11588
11589 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11590 DAG.getTargetConstant(Rotation, DL, MVT::i8));
11591}
11592
11593/// Try to lower a vector shuffle as a byte shift sequence.
11594static SDValue lowerVectorShuffleAsByteShiftMask(
11595 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11596 const APInt &Zeroable, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11597 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!")((!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!"
) ? static_cast<void> (0) : __assert_fail ("!isNoopShuffleMask(Mask) && \"We shouldn't lower no-op shuffles!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11597, __PRETTY_FUNCTION__))
;
11598 assert(VT.is128BitVector() && "Only 128-bit vectors supported")((VT.is128BitVector() && "Only 128-bit vectors supported"
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only 128-bit vectors supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11598, __PRETTY_FUNCTION__))
;
11599
11600 // We need a shuffle that has zeros at one/both ends and a sequential
11601 // shuffle from one source within.
11602 unsigned ZeroLo = Zeroable.countTrailingOnes();
11603 unsigned ZeroHi = Zeroable.countLeadingOnes();
11604 if (!ZeroLo && !ZeroHi)
11605 return SDValue();
11606
11607 unsigned NumElts = Mask.size();
11608 unsigned Len = NumElts - (ZeroLo + ZeroHi);
11609 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11610 return SDValue();
11611
11612 unsigned Scale = VT.getScalarSizeInBits() / 8;
11613 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11614 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11615 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11616 return SDValue();
11617
11618 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11619 Res = DAG.getBitcast(MVT::v16i8, Res);
11620
11621 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11622 // inner sequential set of elements, possibly offset:
11623 // 01234567 --> zzzzzz01 --> 1zzzzzzz
11624 // 01234567 --> 4567zzzz --> zzzzz456
11625 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11626 if (ZeroLo == 0) {
11627 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11628 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11629 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11630 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11631 DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
11632 } else if (ZeroHi == 0) {
11633 unsigned Shift = Mask[ZeroLo] % NumElts;
11634 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11635 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11636 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11637 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11638 } else if (!Subtarget.hasSSSE3()) {
11639 // If we don't have PSHUFB then its worth avoiding an AND constant mask
11640 // by performing 3 byte shifts. Shuffle combining can kick in above that.
11641 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11642 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11643 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11644 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11645 Shift += Mask[ZeroLo] % NumElts;
11646 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11647 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11648 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11649 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11650 } else
11651 return SDValue();
11652
11653 return DAG.getBitcast(VT, Res);
11654}
11655
11656/// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11657///
11658/// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11659/// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11660/// matches elements from one of the input vectors shuffled to the left or
11661/// right with zeroable elements 'shifted in'. It handles both the strictly
11662/// bit-wise element shifts and the byte shift across an entire 128-bit double
11663/// quad word lane.
11664///
11665/// PSHL : (little-endian) left bit shift.
11666/// [ zz, 0, zz, 2 ]
11667/// [ -1, 4, zz, -1 ]
11668/// PSRL : (little-endian) right bit shift.
11669/// [ 1, zz, 3, zz]
11670/// [ -1, -1, 7, zz]
11671/// PSLLDQ : (little-endian) left byte shift
11672/// [ zz, 0, 1, 2, 3, 4, 5, 6]
11673/// [ zz, zz, -1, -1, 2, 3, 4, -1]
11674/// [ zz, zz, zz, zz, zz, zz, -1, 1]
11675/// PSRLDQ : (little-endian) right byte shift
11676/// [ 5, 6, 7, zz, zz, zz, zz, zz]
11677/// [ -1, 5, 6, 7, zz, zz, zz, zz]
11678/// [ 1, 2, -1, -1, -1, -1, zz, zz]
11679static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11680 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11681 int MaskOffset, const APInt &Zeroable,
11682 const X86Subtarget &Subtarget) {
11683 int Size = Mask.size();
11684 unsigned SizeInBits = Size * ScalarSizeInBits;
11685
11686 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11687 for (int i = 0; i < Size; i += Scale)
11688 for (int j = 0; j < Shift; ++j)
11689 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11690 return false;
11691
11692 return true;
11693 };
11694
11695 auto MatchShift = [&](int Shift, int Scale, bool Left) {
11696 for (int i = 0; i != Size; i += Scale) {
11697 unsigned Pos = Left ? i + Shift : i;
11698 unsigned Low = Left ? i : i + Shift;
11699 unsigned Len = Scale - Shift;
11700 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11701 return -1;
11702 }
11703
11704 int ShiftEltBits = ScalarSizeInBits * Scale;
11705 bool ByteShift = ShiftEltBits > 64;
11706 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11707 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11708 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11709
11710 // Normalize the scale for byte shifts to still produce an i64 element
11711 // type.
11712 Scale = ByteShift ? Scale / 2 : Scale;
11713
11714 // We need to round trip through the appropriate type for the shift.
11715 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11716 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11717 : MVT::getVectorVT(ShiftSVT, Size / Scale);
11718 return (int)ShiftAmt;
11719 };
11720
11721 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11722 // keep doubling the size of the integer elements up to that. We can
11723 // then shift the elements of the integer vector by whole multiples of
11724 // their width within the elements of the larger integer vector. Test each
11725 // multiple to see if we can find a match with the moved element indices
11726 // and that the shifted in elements are all zeroable.
11727 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11728 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11729 for (int Shift = 1; Shift != Scale; ++Shift)
11730 for (bool Left : {true, false})
11731 if (CheckZeros(Shift, Scale, Left)) {
11732 int ShiftAmt = MatchShift(Shift, Scale, Left);
11733 if (0 < ShiftAmt)
11734 return ShiftAmt;
11735 }
11736
11737 // no match
11738 return -1;
11739}
11740
11741static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11742 SDValue V2, ArrayRef<int> Mask,
11743 const APInt &Zeroable,
11744 const X86Subtarget &Subtarget,
11745 SelectionDAG &DAG) {
11746 int Size = Mask.size();
11747 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size")((Size == (int)VT.getVectorNumElements() && "Unexpected mask size"
) ? static_cast<void> (0) : __assert_fail ("Size == (int)VT.getVectorNumElements() && \"Unexpected mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11747, __PRETTY_FUNCTION__))
;
11748
11749 MVT ShiftVT;
11750 SDValue V = V1;
11751 unsigned Opcode;
11752
11753 // Try to match shuffle against V1 shift.
11754 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11755 Mask, 0, Zeroable, Subtarget);
11756
11757 // If V1 failed, try to match shuffle against V2 shift.
11758 if (ShiftAmt < 0) {
11759 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11760 Mask, Size, Zeroable, Subtarget);
11761 V = V2;
11762 }
11763
11764 if (ShiftAmt < 0)
11765 return SDValue();
11766
11767 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&((DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
"Illegal integer vector type") ? static_cast<void> (0)
: __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) && \"Illegal integer vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11768, __PRETTY_FUNCTION__))
11768 "Illegal integer vector type")((DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
"Illegal integer vector type") ? static_cast<void> (0)
: __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) && \"Illegal integer vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11768, __PRETTY_FUNCTION__))
;
11769 V = DAG.getBitcast(ShiftVT, V);
11770 V = DAG.getNode(Opcode, DL, ShiftVT, V,
11771 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
11772 return DAG.getBitcast(VT, V);
11773}
11774
11775// EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11776// Remainder of lower half result is zero and upper half is all undef.
11777static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11778 ArrayRef<int> Mask, uint64_t &BitLen,
11779 uint64_t &BitIdx, const APInt &Zeroable) {
11780 int Size = Mask.size();
11781 int HalfSize = Size / 2;
11782 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size")((Size == (int)VT.getVectorNumElements() && "Unexpected mask size"
) ? static_cast<void> (0) : __assert_fail ("Size == (int)VT.getVectorNumElements() && \"Unexpected mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11782, __PRETTY_FUNCTION__))
;
11783 assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask")((!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask"
) ? static_cast<void> (0) : __assert_fail ("!Zeroable.isAllOnesValue() && \"Fully zeroable shuffle mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11783, __PRETTY_FUNCTION__))
;
11784
11785 // Upper half must be undefined.
11786 if (!isUndefUpperHalf(Mask))
11787 return false;
11788
11789 // Determine the extraction length from the part of the
11790 // lower half that isn't zeroable.
11791 int Len = HalfSize;
11792 for (; Len > 0; --Len)
11793 if (!Zeroable[Len - 1])
11794 break;
11795 assert(Len > 0 && "Zeroable shuffle mask")((Len > 0 && "Zeroable shuffle mask") ? static_cast
<void> (0) : __assert_fail ("Len > 0 && \"Zeroable shuffle mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11795, __PRETTY_FUNCTION__))
;
11796
11797 // Attempt to match first Len sequential elements from the lower half.
11798 SDValue Src;
11799 int Idx = -1;
11800 for (int i = 0; i != Len; ++i) {
11801 int M = Mask[i];
11802 if (M == SM_SentinelUndef)
11803 continue;
11804 SDValue &V = (M < Size ? V1 : V2);
11805 M = M % Size;
11806
11807 // The extracted elements must start at a valid index and all mask
11808 // elements must be in the lower half.
11809 if (i > M || M >= HalfSize)
11810 return false;
11811
11812 if (Idx < 0 || (Src == V && Idx == (M - i))) {
11813 Src = V;
11814 Idx = M - i;
11815 continue;
11816 }
11817 return false;
11818 }
11819
11820 if (!Src || Idx < 0)
11821 return false;
11822
11823 assert((Idx + Len) <= HalfSize && "Illegal extraction mask")(((Idx + Len) <= HalfSize && "Illegal extraction mask"
) ? static_cast<void> (0) : __assert_fail ("(Idx + Len) <= HalfSize && \"Illegal extraction mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11823, __PRETTY_FUNCTION__))
;
11824 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11825 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11826 V1 = Src;
11827 return true;
11828}
11829
11830// INSERTQ: Extract lowest Len elements from lower half of second source and
11831// insert over first source, starting at Idx.
11832// { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
11833static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
11834 ArrayRef<int> Mask, uint64_t &BitLen,
11835 uint64_t &BitIdx) {
11836 int Size = Mask.size();
11837 int HalfSize = Size / 2;
11838 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size")((Size == (int)VT.getVectorNumElements() && "Unexpected mask size"
) ? static_cast<void> (0) : __assert_fail ("Size == (int)VT.getVectorNumElements() && \"Unexpected mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11838, __PRETTY_FUNCTION__))
;
11839
11840 // Upper half must be undefined.
11841 if (!isUndefUpperHalf(Mask))
11842 return false;
11843
11844 for (int Idx = 0; Idx != HalfSize; ++Idx) {
11845 SDValue Base;
11846
11847 // Attempt to match first source from mask before insertion point.
11848 if (isUndefInRange(Mask, 0, Idx)) {
11849 /* EMPTY */
11850 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
11851 Base = V1;
11852 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
11853 Base = V2;
11854 } else {
11855 continue;
11856 }
11857
11858 // Extend the extraction length looking to match both the insertion of
11859 // the second source and the remaining elements of the first.
11860 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
11861 SDValue Insert;
11862 int Len = Hi - Idx;
11863
11864 // Match insertion.
11865 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
11866 Insert = V1;
11867 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
11868 Insert = V2;
11869 } else {
11870 continue;
11871 }
11872
11873 // Match the remaining elements of the lower half.
11874 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
11875 /* EMPTY */
11876 } else if ((!Base || (Base == V1)) &&
11877 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
11878 Base = V1;
11879 } else if ((!Base || (Base == V2)) &&
11880 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
11881 Size + Hi)) {
11882 Base = V2;
11883 } else {
11884 continue;
11885 }
11886
11887 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11888 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11889 V1 = Base;
11890 V2 = Insert;
11891 return true;
11892 }
11893 }
11894
11895 return false;
11896}
11897
11898/// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
11899static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
11900 SDValue V2, ArrayRef<int> Mask,
11901 const APInt &Zeroable, SelectionDAG &DAG) {
11902 uint64_t BitLen, BitIdx;
11903 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
11904 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
11905 DAG.getTargetConstant(BitLen, DL, MVT::i8),
11906 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11907
11908 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
11909 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
11910 V2 ? V2 : DAG.getUNDEF(VT),
11911 DAG.getTargetConstant(BitLen, DL, MVT::i8),
11912 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11913
11914 return SDValue();
11915}
11916
11917/// Lower a vector shuffle as a zero or any extension.
11918///
11919/// Given a specific number of elements, element bit width, and extension
11920/// stride, produce either a zero or any extension based on the available
11921/// features of the subtarget. The extended elements are consecutive and
11922/// begin and can start from an offsetted element index in the input; to
11923/// avoid excess shuffling the offset must either being in the bottom lane
11924/// or at the start of a higher lane. All extended elements must be from
11925/// the same lane.
11926static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
11927 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
11928 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11929 assert(Scale > 1 && "Need a scale to extend.")((Scale > 1 && "Need a scale to extend.") ? static_cast
<void> (0) : __assert_fail ("Scale > 1 && \"Need a scale to extend.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11929, __PRETTY_FUNCTION__))
;
11930 int EltBits = VT.getScalarSizeInBits();
11931 int NumElements = VT.getVectorNumElements();
11932 int NumEltsPerLane = 128 / EltBits;
11933 int OffsetLane = Offset / NumEltsPerLane;
11934 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&(((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
"Only 8, 16, and 32 bit elements can be extended.") ? static_cast
<void> (0) : __assert_fail ("(EltBits == 8 || EltBits == 16 || EltBits == 32) && \"Only 8, 16, and 32 bit elements can be extended.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11935, __PRETTY_FUNCTION__))
11935 "Only 8, 16, and 32 bit elements can be extended.")(((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
"Only 8, 16, and 32 bit elements can be extended.") ? static_cast
<void> (0) : __assert_fail ("(EltBits == 8 || EltBits == 16 || EltBits == 32) && \"Only 8, 16, and 32 bit elements can be extended.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11935, __PRETTY_FUNCTION__))
;
11936 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.")((Scale * EltBits <= 64 && "Cannot zero extend past 64 bits."
) ? static_cast<void> (0) : __assert_fail ("Scale * EltBits <= 64 && \"Cannot zero extend past 64 bits.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11936, __PRETTY_FUNCTION__))
;
11937 assert(0 <= Offset && "Extension offset must be positive.")((0 <= Offset && "Extension offset must be positive."
) ? static_cast<void> (0) : __assert_fail ("0 <= Offset && \"Extension offset must be positive.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11937, __PRETTY_FUNCTION__))
;
11938 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&(((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0
) && "Extension offset must be in the first lane or start an upper lane."
) ? static_cast<void> (0) : __assert_fail ("(Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) && \"Extension offset must be in the first lane or start an upper lane.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11939, __PRETTY_FUNCTION__))
11939 "Extension offset must be in the first lane or start an upper lane.")(((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0
) && "Extension offset must be in the first lane or start an upper lane."
) ? static_cast<void> (0) : __assert_fail ("(Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) && \"Extension offset must be in the first lane or start an upper lane.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11939, __PRETTY_FUNCTION__))
;
11940
11941 // Check that an index is in same lane as the base offset.
11942 auto SafeOffset = [&](int Idx) {
11943 return OffsetLane == (Idx / NumEltsPerLane);
11944 };
11945
11946 // Shift along an input so that the offset base moves to the first element.
11947 auto ShuffleOffset = [&](SDValue V) {
11948 if (!Offset)
11949 return V;
11950
11951 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11952 for (int i = 0; i * Scale < NumElements; ++i) {
11953 int SrcIdx = i + Offset;
11954 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
11955 }
11956 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
11957 };
11958
11959 // Found a valid a/zext mask! Try various lowering strategies based on the
11960 // input type and available ISA extensions.
11961 if (Subtarget.hasSSE41()) {
11962 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
11963 // PUNPCK will catch this in a later shuffle match.
11964 if (Offset && Scale == 2 && VT.is128BitVector())
11965 return SDValue();
11966 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
11967 NumElements / Scale);
11968 InputV = ShuffleOffset(InputV);
11969 InputV = getExtendInVec(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND, DL,
11970 ExtVT, InputV, DAG);
11971 return DAG.getBitcast(VT, InputV);
11972 }
11973
11974 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.")((VT.is128BitVector() && "Only 128-bit vectors can be extended."
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only 128-bit vectors can be extended.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 11974, __PRETTY_FUNCTION__))
;
11975
11976 // For any extends we can cheat for larger element sizes and use shuffle
11977 // instructions that can fold with a load and/or copy.
11978 if (AnyExt && EltBits == 32) {
11979 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
11980 -1};
11981 return DAG.getBitcast(
11982 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11983 DAG.getBitcast(MVT::v4i32, InputV),
11984 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
11985 }
11986 if (AnyExt && EltBits == 16 && Scale > 2) {
11987 int PSHUFDMask[4] = {Offset / 2, -1,
11988 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
11989 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11990 DAG.getBitcast(MVT::v4i32, InputV),
11991 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
11992 int PSHUFWMask[4] = {1, -1, -1, -1};
11993 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
11994 return DAG.getBitcast(
11995 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
11996 DAG.getBitcast(MVT::v8i16, InputV),
11997 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
11998 }
11999
12000 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
12001 // to 64-bits.
12002 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
12003 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!")((NumElements == (int)Mask.size() && "Unexpected shuffle mask size!"
) ? static_cast<void> (0) : __assert_fail ("NumElements == (int)Mask.size() && \"Unexpected shuffle mask size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12003, __PRETTY_FUNCTION__))
;
12004 assert(VT.is128BitVector() && "Unexpected vector width!")((VT.is128BitVector() && "Unexpected vector width!") ?
static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Unexpected vector width!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12004, __PRETTY_FUNCTION__))
;
12005
12006 int LoIdx = Offset * EltBits;
12007 SDValue Lo = DAG.getBitcast(
12008 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12009 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12010 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
12011
12012 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
12013 return DAG.getBitcast(VT, Lo);
12014
12015 int HiIdx = (Offset + 1) * EltBits;
12016 SDValue Hi = DAG.getBitcast(
12017 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12018 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12019 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
12020 return DAG.getBitcast(VT,
12021 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
12022 }
12023
12024 // If this would require more than 2 unpack instructions to expand, use
12025 // pshufb when available. We can only use more than 2 unpack instructions
12026 // when zero extending i8 elements which also makes it easier to use pshufb.
12027 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
12028 assert(NumElements == 16 && "Unexpected byte vector width!")((NumElements == 16 && "Unexpected byte vector width!"
) ? static_cast<void> (0) : __assert_fail ("NumElements == 16 && \"Unexpected byte vector width!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12028, __PRETTY_FUNCTION__))
;
12029 SDValue PSHUFBMask[16];
12030 for (int i = 0; i < 16; ++i) {
12031 int Idx = Offset + (i / Scale);
12032 if ((i % Scale == 0 && SafeOffset(Idx))) {
12033 PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
12034 continue;
12035 }
12036 PSHUFBMask[i] =
12037 AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
12038 }
12039 InputV = DAG.getBitcast(MVT::v16i8, InputV);
12040 return DAG.getBitcast(
12041 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
12042 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
12043 }
12044
12045 // If we are extending from an offset, ensure we start on a boundary that
12046 // we can unpack from.
12047 int AlignToUnpack = Offset % (NumElements / Scale);
12048 if (AlignToUnpack) {
12049 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12050 for (int i = AlignToUnpack; i < NumElements; ++i)
12051 ShMask[i - AlignToUnpack] = i;
12052 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
12053 Offset -= AlignToUnpack;
12054 }
12055
12056 // Otherwise emit a sequence of unpacks.
12057 do {
12058 unsigned UnpackLoHi = X86ISD::UNPCKL;
12059 if (Offset >= (NumElements / 2)) {
12060 UnpackLoHi = X86ISD::UNPCKH;
12061 Offset -= (NumElements / 2);
12062 }
12063
12064 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
12065 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
12066 : getZeroVector(InputVT, Subtarget, DAG, DL);
12067 InputV = DAG.getBitcast(InputVT, InputV);
12068 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
12069 Scale /= 2;
12070 EltBits *= 2;
12071 NumElements /= 2;
12072 } while (Scale > 1);
12073 return DAG.getBitcast(VT, InputV);
12074}
12075
12076/// Try to lower a vector shuffle as a zero extension on any microarch.
12077///
12078/// This routine will try to do everything in its power to cleverly lower
12079/// a shuffle which happens to match the pattern of a zero extend. It doesn't
12080/// check for the profitability of this lowering, it tries to aggressively
12081/// match this pattern. It will use all of the micro-architectural details it
12082/// can to emit an efficient lowering. It handles both blends with all-zero
12083/// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
12084/// masking out later).
12085///
12086/// The reason we have dedicated lowering for zext-style shuffles is that they
12087/// are both incredibly common and often quite performance sensitive.
12088static SDValue lowerShuffleAsZeroOrAnyExtend(
12089 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12090 const APInt &Zeroable, const X86Subtarget &Subtarget,
12091 SelectionDAG &DAG) {
12092 int Bits = VT.getSizeInBits();
12093 int NumLanes = Bits / 128;
12094 int NumElements = VT.getVectorNumElements();
12095 int NumEltsPerLane = NumElements / NumLanes;
12096 assert(VT.getScalarSizeInBits() <= 32 &&((VT.getScalarSizeInBits() <= 32 && "Exceeds 32-bit integer zero extension limit"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() <= 32 && \"Exceeds 32-bit integer zero extension limit\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12097, __PRETTY_FUNCTION__))
12097 "Exceeds 32-bit integer zero extension limit")((VT.getScalarSizeInBits() <= 32 && "Exceeds 32-bit integer zero extension limit"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() <= 32 && \"Exceeds 32-bit integer zero extension limit\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12097, __PRETTY_FUNCTION__))
;
12098 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size")(((int)Mask.size() == NumElements && "Unexpected shuffle mask size"
) ? static_cast<void> (0) : __assert_fail ("(int)Mask.size() == NumElements && \"Unexpected shuffle mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12098, __PRETTY_FUNCTION__))
;
12099
12100 // Define a helper function to check a particular ext-scale and lower to it if
12101 // valid.
12102 auto Lower = [&](int Scale) -> SDValue {
12103 SDValue InputV;
12104 bool AnyExt = true;
12105 int Offset = 0;
12106 int Matches = 0;
12107 for (int i = 0; i < NumElements; ++i) {
12108 int M = Mask[i];
12109 if (M < 0)
12110 continue; // Valid anywhere but doesn't tell us anything.
12111 if (i % Scale != 0) {
12112 // Each of the extended elements need to be zeroable.
12113 if (!Zeroable[i])
12114 return SDValue();
12115
12116 // We no longer are in the anyext case.
12117 AnyExt = false;
12118 continue;
12119 }
12120
12121 // Each of the base elements needs to be consecutive indices into the
12122 // same input vector.
12123 SDValue V = M < NumElements ? V1 : V2;
12124 M = M % NumElements;
12125 if (!InputV) {
12126 InputV = V;
12127 Offset = M - (i / Scale);
12128 } else if (InputV != V)
12129 return SDValue(); // Flip-flopping inputs.
12130
12131 // Offset must start in the lowest 128-bit lane or at the start of an
12132 // upper lane.
12133 // FIXME: Is it ever worth allowing a negative base offset?
12134 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
12135 (Offset % NumEltsPerLane) == 0))
12136 return SDValue();
12137
12138 // If we are offsetting, all referenced entries must come from the same
12139 // lane.
12140 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
12141 return SDValue();
12142
12143 if ((M % NumElements) != (Offset + (i / Scale)))
12144 return SDValue(); // Non-consecutive strided elements.
12145 Matches++;
12146 }
12147
12148 // If we fail to find an input, we have a zero-shuffle which should always
12149 // have already been handled.
12150 // FIXME: Maybe handle this here in case during blending we end up with one?
12151 if (!InputV)
12152 return SDValue();
12153
12154 // If we are offsetting, don't extend if we only match a single input, we
12155 // can always do better by using a basic PSHUF or PUNPCK.
12156 if (Offset != 0 && Matches < 2)
12157 return SDValue();
12158
12159 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
12160 InputV, Mask, Subtarget, DAG);
12161 };
12162
12163 // The widest scale possible for extending is to a 64-bit integer.
12164 assert(Bits % 64 == 0 &&((Bits % 64 == 0 && "The number of bits in a vector must be divisible by 64 on x86!"
) ? static_cast<void> (0) : __assert_fail ("Bits % 64 == 0 && \"The number of bits in a vector must be divisible by 64 on x86!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12165, __PRETTY_FUNCTION__))
12165 "The number of bits in a vector must be divisible by 64 on x86!")((Bits % 64 == 0 && "The number of bits in a vector must be divisible by 64 on x86!"
) ? static_cast<void> (0) : __assert_fail ("Bits % 64 == 0 && \"The number of bits in a vector must be divisible by 64 on x86!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12165, __PRETTY_FUNCTION__))
;
12166 int NumExtElements = Bits / 64;
12167
12168 // Each iteration, try extending the elements half as much, but into twice as
12169 // many elements.
12170 for (; NumExtElements < NumElements; NumExtElements *= 2) {
12171 assert(NumElements % NumExtElements == 0 &&((NumElements % NumExtElements == 0 && "The input vector size must be divisible by the extended size."
) ? static_cast<void> (0) : __assert_fail ("NumElements % NumExtElements == 0 && \"The input vector size must be divisible by the extended size.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12172, __PRETTY_FUNCTION__))
12172 "The input vector size must be divisible by the extended size.")((NumElements % NumExtElements == 0 && "The input vector size must be divisible by the extended size."
) ? static_cast<void> (0) : __assert_fail ("NumElements % NumExtElements == 0 && \"The input vector size must be divisible by the extended size.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12172, __PRETTY_FUNCTION__))
;
12173 if (SDValue V = Lower(NumElements / NumExtElements))
12174 return V;
12175 }
12176
12177 // General extends failed, but 128-bit vectors may be able to use MOVQ.
12178 if (Bits != 128)
12179 return SDValue();
12180
12181 // Returns one of the source operands if the shuffle can be reduced to a
12182 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12183 auto CanZExtLowHalf = [&]() {
12184 for (int i = NumElements / 2; i != NumElements; ++i)
12185 if (!Zeroable[i])
12186 return SDValue();
12187 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12188 return V1;
12189 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12190 return V2;
12191 return SDValue();
12192 };
12193
12194 if (SDValue V = CanZExtLowHalf()) {
12195 V = DAG.getBitcast(MVT::v2i64, V);
12196 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12197 return DAG.getBitcast(VT, V);
12198 }
12199
12200 // No viable ext lowering found.
12201 return SDValue();
12202}
12203
12204/// Try to get a scalar value for a specific element of a vector.
12205///
12206/// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
12207static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12208 SelectionDAG &DAG) {
12209 MVT VT = V.getSimpleValueType();
12210 MVT EltVT = VT.getVectorElementType();
12211 V = peekThroughBitcasts(V);
12212
12213 // If the bitcasts shift the element size, we can't extract an equivalent
12214 // element from it.
12215 MVT NewVT = V.getSimpleValueType();
12216 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12217 return SDValue();
12218
12219 if (V.getOpcode() == ISD::BUILD_VECTOR ||
12220 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12221 // Ensure the scalar operand is the same size as the destination.
12222 // FIXME: Add support for scalar truncation where possible.
12223 SDValue S = V.getOperand(Idx);
12224 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12225 return DAG.getBitcast(EltVT, S);
12226 }
12227
12228 return SDValue();
12229}
12230
12231/// Helper to test for a load that can be folded with x86 shuffles.
12232///
12233/// This is particularly important because the set of instructions varies
12234/// significantly based on whether the operand is a load or not.
12235static bool isShuffleFoldableLoad(SDValue V) {
12236 V = peekThroughBitcasts(V);
12237 return ISD::isNON_EXTLoad(V.getNode());
12238}
12239
12240/// Try to lower insertion of a single element into a zero vector.
12241///
12242/// This is a common pattern that we have especially efficient patterns to lower
12243/// across all subtarget feature sets.
12244static SDValue lowerShuffleAsElementInsertion(
12245 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12246 const APInt &Zeroable, const X86Subtarget &Subtarget,
12247 SelectionDAG &DAG) {
12248 MVT ExtVT = VT;
12249 MVT EltVT = VT.getVectorElementType();
12250
12251 int V2Index =
12252 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
12253 Mask.begin();
12254 bool IsV1Zeroable = true;
12255 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12256 if (i != V2Index && !Zeroable[i]) {
12257 IsV1Zeroable = false;
12258 break;
12259 }
12260
12261 // Check for a single input from a SCALAR_TO_VECTOR node.
12262 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
12263 // all the smarts here sunk into that routine. However, the current
12264 // lowering of BUILD_VECTOR makes that nearly impossible until the old
12265 // vector shuffle lowering is dead.
12266 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
12267 DAG);
12268 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
12269 // We need to zext the scalar if it is smaller than an i32.
12270 V2S = DAG.getBitcast(EltVT, V2S);
12271 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
12272 // Using zext to expand a narrow element won't work for non-zero
12273 // insertions.
12274 if (!IsV1Zeroable)
12275 return SDValue();
12276
12277 // Zero-extend directly to i32.
12278 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
12279 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
12280 }
12281 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12282 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
12283 EltVT == MVT::i16) {
12284 // Either not inserting from the low element of the input or the input
12285 // element size is too small to use VZEXT_MOVL to clear the high bits.
12286 return SDValue();
12287 }
12288
12289 if (!IsV1Zeroable) {
12290 // If V1 can't be treated as a zero vector we have fewer options to lower
12291 // this. We can't support integer vectors or non-zero targets cheaply, and
12292 // the V1 elements can't be permuted in any way.
12293 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!")((VT == ExtVT && "Cannot change extended type when non-zeroable!"
) ? static_cast<void> (0) : __assert_fail ("VT == ExtVT && \"Cannot change extended type when non-zeroable!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12293, __PRETTY_FUNCTION__))
;
12294 if (!VT.isFloatingPoint() || V2Index != 0)
12295 return SDValue();
12296 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
12297 V1Mask[V2Index] = -1;
12298 if (!isNoopShuffleMask(V1Mask))
12299 return SDValue();
12300 if (!VT.is128BitVector())
12301 return SDValue();
12302
12303 // Otherwise, use MOVSD or MOVSS.
12304 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&(((EltVT == MVT::f32 || EltVT == MVT::f64) && "Only two types of floating point element types to handle!"
) ? static_cast<void> (0) : __assert_fail ("(EltVT == MVT::f32 || EltVT == MVT::f64) && \"Only two types of floating point element types to handle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12305, __PRETTY_FUNCTION__))
12305 "Only two types of floating point element types to handle!")(((EltVT == MVT::f32 || EltVT == MVT::f64) && "Only two types of floating point element types to handle!"
) ? static_cast<void> (0) : __assert_fail ("(EltVT == MVT::f32 || EltVT == MVT::f64) && \"Only two types of floating point element types to handle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12305, __PRETTY_FUNCTION__))
;
12306 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
12307 ExtVT, V1, V2);
12308 }
12309
12310 // This lowering only works for the low element with floating point vectors.
12311 if (VT.isFloatingPoint() && V2Index != 0)
12312 return SDValue();
12313
12314 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12315 if (ExtVT != VT)
12316 V2 = DAG.getBitcast(VT, V2);
12317
12318 if (V2Index != 0) {
12319 // If we have 4 or fewer lanes we can cheaply shuffle the element into
12320 // the desired position. Otherwise it is more efficient to do a vector
12321 // shift left. We know that we can do a vector shift left because all
12322 // the inputs are zero.
12323 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
12324 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12325 V2Shuffle[V2Index] = 0;
12326 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12327 } else {
12328 V2 = DAG.getBitcast(MVT::v16i8, V2);
12329 V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12330 DAG.getTargetConstant(
12331 V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
12332 V2 = DAG.getBitcast(VT, V2);
12333 }
12334 }
12335 return V2;
12336}
12337
12338/// Try to lower broadcast of a single - truncated - integer element,
12339/// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12340///
12341/// This assumes we have AVX2.
12342static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12343 int BroadcastIdx,
12344 const X86Subtarget &Subtarget,
12345 SelectionDAG &DAG) {
12346 assert(Subtarget.hasAVX2() &&((Subtarget.hasAVX2() && "We can only lower integer broadcasts with AVX2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower integer broadcasts with AVX2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12347, __PRETTY_FUNCTION__))
12347 "We can only lower integer broadcasts with AVX2!")((Subtarget.hasAVX2() && "We can only lower integer broadcasts with AVX2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower integer broadcasts with AVX2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12347, __PRETTY_FUNCTION__))
;
12348
12349 EVT EltVT = VT.getVectorElementType();
12350 EVT V0VT = V0.getValueType();
12351
12352 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!")((VT.isInteger() && "Unexpected non-integer trunc broadcast!"
) ? static_cast<void> (0) : __assert_fail ("VT.isInteger() && \"Unexpected non-integer trunc broadcast!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12352, __PRETTY_FUNCTION__))
;
12353 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!")((V0VT.isVector() && "Unexpected non-vector vector-sized value!"
) ? static_cast<void> (0) : __assert_fail ("V0VT.isVector() && \"Unexpected non-vector vector-sized value!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12353, __PRETTY_FUNCTION__))
;
12354
12355 EVT V0EltVT = V0VT.getVectorElementType();
12356 if (!V0EltVT.isInteger())
12357 return SDValue();
12358
12359 const unsigned EltSize = EltVT.getSizeInBits();
12360 const unsigned V0EltSize = V0EltVT.getSizeInBits();
12361
12362 // This is only a truncation if the original element type is larger.
12363 if (V0EltSize <= EltSize)
12364 return SDValue();
12365
12366 assert(((V0EltSize % EltSize) == 0) &&((((V0EltSize % EltSize) == 0) && "Scalar type sizes must all be powers of 2 on x86!"
) ? static_cast<void> (0) : __assert_fail ("((V0EltSize % EltSize) == 0) && \"Scalar type sizes must all be powers of 2 on x86!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12367, __PRETTY_FUNCTION__))
12367 "Scalar type sizes must all be powers of 2 on x86!")((((V0EltSize % EltSize) == 0) && "Scalar type sizes must all be powers of 2 on x86!"
) ? static_cast<void> (0) : __assert_fail ("((V0EltSize % EltSize) == 0) && \"Scalar type sizes must all be powers of 2 on x86!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12367, __PRETTY_FUNCTION__))
;
12368
12369 const unsigned V0Opc = V0.getOpcode();
12370 const unsigned Scale = V0EltSize / EltSize;
12371 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12372
12373 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12374 V0Opc != ISD::BUILD_VECTOR)
12375 return SDValue();
12376
12377 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12378
12379 // If we're extracting non-least-significant bits, shift so we can truncate.
12380 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12381 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12382 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12383 if (const int OffsetIdx = BroadcastIdx % Scale)
12384 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12385 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12386
12387 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12388 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12389}
12390
12391/// Test whether this can be lowered with a single SHUFPS instruction.
12392///
12393/// This is used to disable more specialized lowerings when the shufps lowering
12394/// will happen to be efficient.
12395static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12396 // This routine only handles 128-bit shufps.
12397 assert(Mask.size() == 4 && "Unsupported mask size!")((Mask.size() == 4 && "Unsupported mask size!") ? static_cast
<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unsupported mask size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12397, __PRETTY_FUNCTION__))
;
12398 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!")((Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!"
) ? static_cast<void> (0) : __assert_fail ("Mask[0] >= -1 && Mask[0] < 8 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12398, __PRETTY_FUNCTION__))
;
12399 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!")((Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!"
) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= -1 && Mask[1] < 8 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12399, __PRETTY_FUNCTION__))
;
12400 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!")((Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!"
) ? static_cast<void> (0) : __assert_fail ("Mask[2] >= -1 && Mask[2] < 8 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12400, __PRETTY_FUNCTION__))
;
12401 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!")((Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!"
) ? static_cast<void> (0) : __assert_fail ("Mask[3] >= -1 && Mask[3] < 8 && \"Out of bound mask element!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12401, __PRETTY_FUNCTION__))
;
12402
12403 // To lower with a single SHUFPS we need to have the low half and high half
12404 // each requiring a single input.
12405 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12406 return false;
12407 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12408 return false;
12409
12410 return true;
12411}
12412
12413/// If we are extracting two 128-bit halves of a vector and shuffling the
12414/// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12415/// multi-shuffle lowering.
12416static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12417 SDValue N1, ArrayRef<int> Mask,
12418 SelectionDAG &DAG) {
12419 EVT VT = N0.getValueType();
12420 assert((VT.is128BitVector() &&(((VT.is128BitVector() && (VT.getScalarSizeInBits() ==
32 || VT.getScalarSizeInBits() == 64)) && "VPERM* family of shuffles requires 32-bit or 64-bit elements"
) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() && (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) && \"VPERM* family of shuffles requires 32-bit or 64-bit elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12422, __PRETTY_FUNCTION__))
12421 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&(((VT.is128BitVector() && (VT.getScalarSizeInBits() ==
32 || VT.getScalarSizeInBits() == 64)) && "VPERM* family of shuffles requires 32-bit or 64-bit elements"
) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() && (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) && \"VPERM* family of shuffles requires 32-bit or 64-bit elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12422, __PRETTY_FUNCTION__))
12422 "VPERM* family of shuffles requires 32-bit or 64-bit elements")(((VT.is128BitVector() && (VT.getScalarSizeInBits() ==
32 || VT.getScalarSizeInBits() == 64)) && "VPERM* family of shuffles requires 32-bit or 64-bit elements"
) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() && (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) && \"VPERM* family of shuffles requires 32-bit or 64-bit elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12422, __PRETTY_FUNCTION__))
;
12423
12424 // Check that both sources are extracts of the same source vector.
12425 if (!N0.hasOneUse() || !N1.hasOneUse() ||
12426 N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12427 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12428 N0.getOperand(0) != N1.getOperand(0))
12429 return SDValue();
12430
12431 SDValue WideVec = N0.getOperand(0);
12432 EVT WideVT = WideVec.getValueType();
12433 if (!WideVT.is256BitVector() || !isa<ConstantSDNode>(N0.getOperand(1)) ||
12434 !isa<ConstantSDNode>(N1.getOperand(1)))
12435 return SDValue();
12436
12437 // Match extracts of each half of the wide source vector. Commute the shuffle
12438 // if the extract of the low half is N1.
12439 unsigned NumElts = VT.getVectorNumElements();
12440 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
12441 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12442 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12443 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12444 ShuffleVectorSDNode::commuteMask(NewMask);
12445 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12446 return SDValue();
12447
12448 // Final bailout: if the mask is simple, we are better off using an extract
12449 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12450 // because that avoids a constant load from memory.
12451 if (NumElts == 4 &&
12452 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
12453 return SDValue();
12454
12455 // Extend the shuffle mask with undef elements.
12456 NewMask.append(NumElts, -1);
12457
12458 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12459 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12460 NewMask);
12461 // This is free: ymm -> xmm.
12462 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12463 DAG.getIntPtrConstant(0, DL));
12464}
12465
12466/// Try to lower broadcast of a single element.
12467///
12468/// For convenience, this code also bundles all of the subtarget feature set
12469/// filtering. While a little annoying to re-dispatch on type here, there isn't
12470/// a convenient way to factor it out.
12471static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12472 SDValue V2, ArrayRef<int> Mask,
12473 const X86Subtarget &Subtarget,
12474 SelectionDAG &DAG) {
12475 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12476 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
12477 (Subtarget.hasAVX2() && VT.isInteger())))
12478 return SDValue();
12479
12480 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12481 // we can only broadcast from a register with AVX2.
12482 unsigned NumElts = Mask.size();
12483 unsigned NumEltBits = VT.getScalarSizeInBits();
12484 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12485 ? X86ISD::MOVDDUP
12486 : X86ISD::VBROADCAST;
12487 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12488
12489 // Check that the mask is a broadcast.
12490 int BroadcastIdx = -1;
12491 for (int i = 0; i != (int)NumElts; ++i) {
12492 SmallVector<int, 8> BroadcastMask(NumElts, i);
12493 if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) {
12494 BroadcastIdx = i;
12495 break;
12496 }
12497 }
12498
12499 if (BroadcastIdx < 0)
12500 return SDValue();
12501 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "((BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
"a sorted mask where the broadcast " "comes from V1.") ? static_cast
<void> (0) : __assert_fail ("BroadcastIdx < (int)Mask.size() && \"We only expect to be called with \" \"a sorted mask where the broadcast \" \"comes from V1.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12503, __PRETTY_FUNCTION__))
12502 "a sorted mask where the broadcast "((BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
"a sorted mask where the broadcast " "comes from V1.") ? static_cast
<void> (0) : __assert_fail ("BroadcastIdx < (int)Mask.size() && \"We only expect to be called with \" \"a sorted mask where the broadcast \" \"comes from V1.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12503, __PRETTY_FUNCTION__))
12503 "comes from V1.")((BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
"a sorted mask where the broadcast " "comes from V1.") ? static_cast
<void> (0) : __assert_fail ("BroadcastIdx < (int)Mask.size() && \"We only expect to be called with \" \"a sorted mask where the broadcast \" \"comes from V1.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12503, __PRETTY_FUNCTION__))
;
12504
12505 // Go up the chain of (vector) values to find a scalar load that we can
12506 // combine with the broadcast.
12507 int BitOffset = BroadcastIdx * NumEltBits;
12508 SDValue V = V1;
12509 for (;;) {
12510 switch (V.getOpcode()) {
12511 case ISD::BITCAST: {
12512 V = V.getOperand(0);
12513 continue;
12514 }
12515 case ISD::CONCAT_VECTORS: {
12516 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12517 int OpIdx = BitOffset / OpBitWidth;
12518 V = V.getOperand(OpIdx);
12519 BitOffset %= OpBitWidth;
12520 continue;
12521 }
12522 case ISD::INSERT_SUBVECTOR: {
12523 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12524 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
12525 if (!ConstantIdx)
12526 break;
12527
12528 int EltBitWidth = VOuter.getScalarValueSizeInBits();
12529 int Idx = (int)ConstantIdx->getZExtValue();
12530 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12531 int BeginOffset = Idx * EltBitWidth;
12532 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12533 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12534 BitOffset -= BeginOffset;
12535 V = VInner;
12536 } else {
12537 V = VOuter;
12538 }
12539 continue;
12540 }
12541 }
12542 break;
12543 }
12544 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset")(((BitOffset % NumEltBits) == 0 && "Illegal bit-offset"
) ? static_cast<void> (0) : __assert_fail ("(BitOffset % NumEltBits) == 0 && \"Illegal bit-offset\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12544, __PRETTY_FUNCTION__))
;
12545 BroadcastIdx = BitOffset / NumEltBits;
12546
12547 // Do we need to bitcast the source to retrieve the original broadcast index?
12548 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12549
12550 // Check if this is a broadcast of a scalar. We special case lowering
12551 // for scalars so that we can more effectively fold with loads.
12552 // If the original value has a larger element type than the shuffle, the
12553 // broadcast element is in essence truncated. Make that explicit to ease
12554 // folding.
12555 if (BitCastSrc && VT.isInteger())
12556 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12557 DL, VT, V, BroadcastIdx, Subtarget, DAG))
12558 return TruncBroadcast;
12559
12560 MVT BroadcastVT = VT;
12561
12562 // Also check the simpler case, where we can directly reuse the scalar.
12563 if (!BitCastSrc &&
12564 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12565 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12566 V = V.getOperand(BroadcastIdx);
12567
12568 // If we can't broadcast from a register, check that the input is a load.
12569 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12570 return SDValue();
12571 } else if (MayFoldLoad(V) && cast<LoadSDNode>(V)->isSimple()) {
12572 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12573 if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) {
12574 BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements());
12575 Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2())
12576 ? X86ISD::MOVDDUP
12577 : Opcode;
12578 }
12579
12580 // If we are broadcasting a load that is only used by the shuffle
12581 // then we can reduce the vector load to the broadcasted scalar load.
12582 LoadSDNode *Ld = cast<LoadSDNode>(V);
12583 SDValue BaseAddr = Ld->getOperand(1);
12584 EVT SVT = BroadcastVT.getScalarType();
12585 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12586 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset")(((int)(Offset * 8) == BitOffset && "Unexpected bit-offset"
) ? static_cast<void> (0) : __assert_fail ("(int)(Offset * 8) == BitOffset && \"Unexpected bit-offset\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12586, __PRETTY_FUNCTION__))
;
12587 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
12588 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12589 DAG.getMachineFunction().getMachineMemOperand(
12590 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12591 DAG.makeEquivalentMemoryOrdering(Ld, V);
12592 } else if (!BroadcastFromReg) {
12593 // We can't broadcast from a vector register.
12594 return SDValue();
12595 } else if (BitOffset != 0) {
12596 // We can only broadcast from the zero-element of a vector register,
12597 // but it can be advantageous to broadcast from the zero-element of a
12598 // subvector.
12599 if (!VT.is256BitVector() && !VT.is512BitVector())
12600 return SDValue();
12601
12602 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12603 if (VT == MVT::v4f64 || VT == MVT::v4i64)
12604 return SDValue();
12605
12606 // Only broadcast the zero-element of a 128-bit subvector.
12607 if ((BitOffset % 128) != 0)
12608 return SDValue();
12609
12610 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&(((BitOffset % V.getScalarValueSizeInBits()) == 0 && "Unexpected bit-offset"
) ? static_cast<void> (0) : __assert_fail ("(BitOffset % V.getScalarValueSizeInBits()) == 0 && \"Unexpected bit-offset\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12611, __PRETTY_FUNCTION__))
12611 "Unexpected bit-offset")(((BitOffset % V.getScalarValueSizeInBits()) == 0 && "Unexpected bit-offset"
) ? static_cast<void> (0) : __assert_fail ("(BitOffset % V.getScalarValueSizeInBits()) == 0 && \"Unexpected bit-offset\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12611, __PRETTY_FUNCTION__))
;
12612 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&(((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() ==
512) && "Unexpected vector size") ? static_cast<void
> (0) : __assert_fail ("(V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12613, __PRETTY_FUNCTION__))
12613 "Unexpected vector size")(((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() ==
512) && "Unexpected vector size") ? static_cast<void
> (0) : __assert_fail ("(V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12613, __PRETTY_FUNCTION__))
;
12614 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12615 V = extract128BitVector(V, ExtractIdx, DAG, DL);
12616 }
12617
12618 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
12619 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
12620 DAG.getBitcast(MVT::f64, V));
12621
12622 // Bitcast back to the same scalar type as BroadcastVT.
12623 if (V.getValueType().getScalarType() != BroadcastVT.getScalarType()) {
12624 assert(NumEltBits == BroadcastVT.getScalarSizeInBits() &&((NumEltBits == BroadcastVT.getScalarSizeInBits() && "Unexpected vector element size"
) ? static_cast<void> (0) : __assert_fail ("NumEltBits == BroadcastVT.getScalarSizeInBits() && \"Unexpected vector element size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12625, __PRETTY_FUNCTION__))
12625 "Unexpected vector element size")((NumEltBits == BroadcastVT.getScalarSizeInBits() && "Unexpected vector element size"
) ? static_cast<void> (0) : __assert_fail ("NumEltBits == BroadcastVT.getScalarSizeInBits() && \"Unexpected vector element size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12625, __PRETTY_FUNCTION__))
;
12626 MVT ExtVT;
12627 if (V.getValueType().isVector()) {
12628 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12629 ExtVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts);
12630 } else {
12631 ExtVT = BroadcastVT.getScalarType();
12632 }
12633 V = DAG.getBitcast(ExtVT, V);
12634 }
12635
12636 // 32-bit targets need to load i64 as a f64 and then bitcast the result.
12637 if (!Subtarget.is64Bit() && V.getValueType() == MVT::i64) {
12638 V = DAG.getBitcast(MVT::f64, V);
12639 unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements();
12640 BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts);
12641 }
12642
12643 // We only support broadcasting from 128-bit vectors to minimize the
12644 // number of patterns we need to deal with in isel. So extract down to
12645 // 128-bits, removing as many bitcasts as possible.
12646 if (V.getValueSizeInBits() > 128) {
12647 MVT ExtVT = V.getSimpleValueType().getScalarType();
12648 ExtVT = MVT::getVectorVT(ExtVT, 128 / ExtVT.getScalarSizeInBits());
12649 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12650 V = DAG.getBitcast(ExtVT, V);
12651 }
12652
12653 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12654}
12655
12656// Check for whether we can use INSERTPS to perform the shuffle. We only use
12657// INSERTPS when the V1 elements are already in the correct locations
12658// because otherwise we can just always use two SHUFPS instructions which
12659// are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12660// perform INSERTPS if a single V1 element is out of place and all V2
12661// elements are zeroable.
12662static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12663 unsigned &InsertPSMask,
12664 const APInt &Zeroable,
12665 ArrayRef<int> Mask, SelectionDAG &DAG) {
12666 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!")((V1.getSimpleValueType().is128BitVector() && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType().is128BitVector() && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12666, __PRETTY_FUNCTION__))
;
12667 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!")((V2.getSimpleValueType().is128BitVector() && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType().is128BitVector() && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12667, __PRETTY_FUNCTION__))
;
12668 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12668, __PRETTY_FUNCTION__))
;
12669
12670 // Attempt to match INSERTPS with one element from VA or VB being
12671 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12672 // are updated.
12673 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12674 ArrayRef<int> CandidateMask) {
12675 unsigned ZMask = 0;
12676 int VADstIndex = -1;
12677 int VBDstIndex = -1;
12678 bool VAUsedInPlace = false;
12679
12680 for (int i = 0; i < 4; ++i) {
12681 // Synthesize a zero mask from the zeroable elements (includes undefs).
12682 if (Zeroable[i]) {
12683 ZMask |= 1 << i;
12684 continue;
12685 }
12686
12687 // Flag if we use any VA inputs in place.
12688 if (i == CandidateMask[i]) {
12689 VAUsedInPlace = true;
12690 continue;
12691 }
12692
12693 // We can only insert a single non-zeroable element.
12694 if (VADstIndex >= 0 || VBDstIndex >= 0)
12695 return false;
12696
12697 if (CandidateMask[i] < 4) {
12698 // VA input out of place for insertion.
12699 VADstIndex = i;
12700 } else {
12701 // VB input for insertion.
12702 VBDstIndex = i;
12703 }
12704 }
12705
12706 // Don't bother if we have no (non-zeroable) element for insertion.
12707 if (VADstIndex < 0 && VBDstIndex < 0)
12708 return false;
12709
12710 // Determine element insertion src/dst indices. The src index is from the
12711 // start of the inserted vector, not the start of the concatenated vector.
12712 unsigned VBSrcIndex = 0;
12713 if (VADstIndex >= 0) {
12714 // If we have a VA input out of place, we use VA as the V2 element
12715 // insertion and don't use the original V2 at all.
12716 VBSrcIndex = CandidateMask[VADstIndex];
12717 VBDstIndex = VADstIndex;
12718 VB = VA;
12719 } else {
12720 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12721 }
12722
12723 // If no V1 inputs are used in place, then the result is created only from
12724 // the zero mask and the V2 insertion - so remove V1 dependency.
12725 if (!VAUsedInPlace)
12726 VA = DAG.getUNDEF(MVT::v4f32);
12727
12728 // Update V1, V2 and InsertPSMask accordingly.
12729 V1 = VA;
12730 V2 = VB;
12731
12732 // Insert the V2 element into the desired position.
12733 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12734 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!")(((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!"
) ? static_cast<void> (0) : __assert_fail ("(InsertPSMask & ~0xFFu) == 0 && \"Invalid mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12734, __PRETTY_FUNCTION__))
;
12735 return true;
12736 };
12737
12738 if (matchAsInsertPS(V1, V2, Mask))
12739 return true;
12740
12741 // Commute and try again.
12742 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
12743 ShuffleVectorSDNode::commuteMask(CommutedMask);
12744 if (matchAsInsertPS(V2, V1, CommutedMask))
12745 return true;
12746
12747 return false;
12748}
12749
12750static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12751 ArrayRef<int> Mask, const APInt &Zeroable,
12752 SelectionDAG &DAG) {
12753 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4f32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12753, __PRETTY_FUNCTION__))
;
12754 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4f32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12754, __PRETTY_FUNCTION__))
;
12755
12756 // Attempt to match the insertps pattern.
12757 unsigned InsertPSMask;
12758 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12759 return SDValue();
12760
12761 // Insert the V2 element into the desired position.
12762 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12763 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
12764}
12765
12766/// Try to lower a shuffle as a permute of the inputs followed by an
12767/// UNPCK instruction.
12768///
12769/// This specifically targets cases where we end up with alternating between
12770/// the two inputs, and so can permute them into something that feeds a single
12771/// UNPCK instruction. Note that this routine only targets integer vectors
12772/// because for floating point vectors we have a generalized SHUFPS lowering
12773/// strategy that handles everything that doesn't *exactly* match an unpack,
12774/// making this clever lowering unnecessary.
12775static SDValue lowerShuffleAsPermuteAndUnpack(
12776 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12777 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12778 assert(!VT.isFloatingPoint() &&((!VT.isFloatingPoint() && "This routine only supports integer vectors."
) ? static_cast<void> (0) : __assert_fail ("!VT.isFloatingPoint() && \"This routine only supports integer vectors.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12779, __PRETTY_FUNCTION__))
12779 "This routine only supports integer vectors.")((!VT.isFloatingPoint() && "This routine only supports integer vectors."
) ? static_cast<void> (0) : __assert_fail ("!VT.isFloatingPoint() && \"This routine only supports integer vectors.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12779, __PRETTY_FUNCTION__))
;
12780 assert(VT.is128BitVector() &&((VT.is128BitVector() && "This routine only works on 128-bit vectors."
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"This routine only works on 128-bit vectors.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12781, __PRETTY_FUNCTION__))
12781 "This routine only works on 128-bit vectors.")((VT.is128BitVector() && "This routine only works on 128-bit vectors."
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"This routine only works on 128-bit vectors.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12781, __PRETTY_FUNCTION__))
;
12782 assert(!V2.isUndef() &&((!V2.isUndef() && "This routine should only be used when blending two inputs."
) ? static_cast<void> (0) : __assert_fail ("!V2.isUndef() && \"This routine should only be used when blending two inputs.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12783, __PRETTY_FUNCTION__))
12783 "This routine should only be used when blending two inputs.")((!V2.isUndef() && "This routine should only be used when blending two inputs."
) ? static_cast<void> (0) : __assert_fail ("!V2.isUndef() && \"This routine should only be used when blending two inputs.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12783, __PRETTY_FUNCTION__))
;
12784 assert(Mask.size() >= 2 && "Single element masks are invalid.")((Mask.size() >= 2 && "Single element masks are invalid."
) ? static_cast<void> (0) : __assert_fail ("Mask.size() >= 2 && \"Single element masks are invalid.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12784, __PRETTY_FUNCTION__))
;
12785
12786 int Size = Mask.size();
12787
12788 int NumLoInputs =
12789 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
12790 int NumHiInputs =
12791 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
12792
12793 bool UnpackLo = NumLoInputs >= NumHiInputs;
12794
12795 auto TryUnpack = [&](int ScalarSize, int Scale) {
12796 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
12797 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
12798
12799 for (int i = 0; i < Size; ++i) {
12800 if (Mask[i] < 0)
12801 continue;
12802
12803 // Each element of the unpack contains Scale elements from this mask.
12804 int UnpackIdx = i / Scale;
12805
12806 // We only handle the case where V1 feeds the first slots of the unpack.
12807 // We rely on canonicalization to ensure this is the case.
12808 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
12809 return SDValue();
12810
12811 // Setup the mask for this input. The indexing is tricky as we have to
12812 // handle the unpack stride.
12813 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
12814 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
12815 Mask[i] % Size;
12816 }
12817
12818 // If we will have to shuffle both inputs to use the unpack, check whether
12819 // we can just unpack first and shuffle the result. If so, skip this unpack.
12820 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
12821 !isNoopShuffleMask(V2Mask))
12822 return SDValue();
12823
12824 // Shuffle the inputs into place.
12825 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
12826 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
12827
12828 // Cast the inputs to the type we will use to unpack them.
12829 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
12830 V1 = DAG.getBitcast(UnpackVT, V1);
12831 V2 = DAG.getBitcast(UnpackVT, V2);
12832
12833 // Unpack the inputs and cast the result back to the desired type.
12834 return DAG.getBitcast(
12835 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
12836 UnpackVT, V1, V2));
12837 };
12838
12839 // We try each unpack from the largest to the smallest to try and find one
12840 // that fits this mask.
12841 int OrigScalarSize = VT.getScalarSizeInBits();
12842 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
12843 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
12844 return Unpack;
12845
12846 // If we're shuffling with a zero vector then we're better off not doing
12847 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
12848 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
12849 ISD::isBuildVectorAllZeros(V2.getNode()))
12850 return SDValue();
12851
12852 // If none of the unpack-rooted lowerings worked (or were profitable) try an
12853 // initial unpack.
12854 if (NumLoInputs == 0 || NumHiInputs == 0) {
12855 assert((NumLoInputs > 0 || NumHiInputs > 0) &&(((NumLoInputs > 0 || NumHiInputs > 0) && "We have to have *some* inputs!"
) ? static_cast<void> (0) : __assert_fail ("(NumLoInputs > 0 || NumHiInputs > 0) && \"We have to have *some* inputs!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12856, __PRETTY_FUNCTION__))
12856 "We have to have *some* inputs!")(((NumLoInputs > 0 || NumHiInputs > 0) && "We have to have *some* inputs!"
) ? static_cast<void> (0) : __assert_fail ("(NumLoInputs > 0 || NumHiInputs > 0) && \"We have to have *some* inputs!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12856, __PRETTY_FUNCTION__))
;
12857 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
12858
12859 // FIXME: We could consider the total complexity of the permute of each
12860 // possible unpacking. Or at the least we should consider how many
12861 // half-crossings are created.
12862 // FIXME: We could consider commuting the unpacks.
12863
12864 SmallVector<int, 32> PermMask((unsigned)Size, -1);
12865 for (int i = 0; i < Size; ++i) {
12866 if (Mask[i] < 0)
12867 continue;
12868
12869 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!")((Mask[i] % Size >= HalfOffset && "Found input from wrong half!"
) ? static_cast<void> (0) : __assert_fail ("Mask[i] % Size >= HalfOffset && \"Found input from wrong half!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12869, __PRETTY_FUNCTION__))
;
12870
12871 PermMask[i] =
12872 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
12873 }
12874 return DAG.getVectorShuffle(
12875 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
12876 DL, VT, V1, V2),
12877 DAG.getUNDEF(VT), PermMask);
12878 }
12879
12880 return SDValue();
12881}
12882
12883/// Handle lowering of 2-lane 64-bit floating point shuffles.
12884///
12885/// This is the basis function for the 2-lane 64-bit shuffles as we have full
12886/// support for floating point shuffles but not integer shuffles. These
12887/// instructions will incur a domain crossing penalty on some chips though so
12888/// it is better to avoid lowering through this for integer vectors where
12889/// possible.
12890static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12891 const APInt &Zeroable, SDValue V1, SDValue V2,
12892 const X86Subtarget &Subtarget,
12893 SelectionDAG &DAG) {
12894 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v2f64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12894, __PRETTY_FUNCTION__))
;
12895 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v2f64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12895, __PRETTY_FUNCTION__))
;
12896 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!")((Mask.size() == 2 && "Unexpected mask size for v2 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 2 && \"Unexpected mask size for v2 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12896, __PRETTY_FUNCTION__))
;
12897
12898 if (V2.isUndef()) {
12899 // Check for being able to broadcast a single element.
12900 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
12901 Mask, Subtarget, DAG))
12902 return Broadcast;
12903
12904 // Straight shuffle of a single input vector. Simulate this by using the
12905 // single input as both of the "inputs" to this instruction..
12906 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
12907
12908 if (Subtarget.hasAVX()) {
12909 // If we have AVX, we can use VPERMILPS which will allow folding a load
12910 // into the shuffle.
12911 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
12912 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12913 }
12914
12915 return DAG.getNode(
12916 X86ISD::SHUFP, DL, MVT::v2f64,
12917 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12918 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12919 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12920 }
12921 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!")((Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!"
) ? static_cast<void> (0) : __assert_fail ("Mask[0] >= 0 && \"No undef lanes in multi-input v2 shuffles!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12921, __PRETTY_FUNCTION__))
;
12922 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!")((Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!"
) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= 0 && \"No undef lanes in multi-input v2 shuffles!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12922, __PRETTY_FUNCTION__))
;
12923 assert(Mask[0] < 2 && "We sort V1 to be the first input.")((Mask[0] < 2 && "We sort V1 to be the first input."
) ? static_cast<void> (0) : __assert_fail ("Mask[0] < 2 && \"We sort V1 to be the first input.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12923, __PRETTY_FUNCTION__))
;
12924 assert(Mask[1] >= 2 && "We sort V2 to be the second input.")((Mask[1] >= 2 && "We sort V2 to be the second input."
) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= 2 && \"We sort V2 to be the second input.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12924, __PRETTY_FUNCTION__))
;
12925
12926 if (Subtarget.hasAVX2())
12927 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12928 return Extract;
12929
12930 // When loading a scalar and then shuffling it into a vector we can often do
12931 // the insertion cheaply.
12932 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12933 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12934 return Insertion;
12935 // Try inverting the insertion since for v2 masks it is easy to do and we
12936 // can't reliably sort the mask one way or the other.
12937 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
12938 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
12939 if (SDValue Insertion = lowerShuffleAsElementInsertion(
12940 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12941 return Insertion;
12942
12943 // Try to use one of the special instruction patterns to handle two common
12944 // blend patterns if a zero-blend above didn't work.
12945 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
12946 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
12947 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
12948 // We can either use a special instruction to load over the low double or
12949 // to move just the low double.
12950 return DAG.getNode(
12951 X86ISD::MOVSD, DL, MVT::v2f64, V2,
12952 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
12953
12954 if (Subtarget.hasSSE41())
12955 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
12956 Zeroable, Subtarget, DAG))
12957 return Blend;
12958
12959 // Use dedicated unpack instructions for masks that match their pattern.
12960 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
12961 return V;
12962
12963 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
12964 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
12965 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12966}
12967
12968/// Handle lowering of 2-lane 64-bit integer shuffles.
12969///
12970/// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
12971/// the integer unit to minimize domain crossing penalties. However, for blends
12972/// it falls back to the floating point shuffle operation with appropriate bit
12973/// casting.
12974static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12975 const APInt &Zeroable, SDValue V1, SDValue V2,
12976 const X86Subtarget &Subtarget,
12977 SelectionDAG &DAG) {
12978 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v2i64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12978, __PRETTY_FUNCTION__))
;
12979 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v2i64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12979, __PRETTY_FUNCTION__))
;
12980 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!")((Mask.size() == 2 && "Unexpected mask size for v2 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 2 && \"Unexpected mask size for v2 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 12980, __PRETTY_FUNCTION__))
;
12981
12982 if (V2.isUndef()) {
12983 // Check for being able to broadcast a single element.
12984 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
12985 Mask, Subtarget, DAG))
12986 return Broadcast;
12987
12988 // Straight shuffle of a single input vector. For everything from SSE2
12989 // onward this has a single fast instruction with no scary immediates.
12990 // We have to map the mask as it is actually a v4i32 shuffle instruction.
12991 V1 = DAG.getBitcast(MVT::v4i32, V1);
12992 int WidenedMask[4] = {
12993 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
12994 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
12995 return DAG.getBitcast(
12996 MVT::v2i64,
12997 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
12998 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
12999 }
13000 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!")((Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!"
) ? static_cast<void> (0) : __assert_fail ("Mask[0] != -1 && \"No undef lanes in multi-input v2 shuffles!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13000, __PRETTY_FUNCTION__))
;
13001 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!")((Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!"
) ? static_cast<void> (0) : __assert_fail ("Mask[1] != -1 && \"No undef lanes in multi-input v2 shuffles!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13001, __PRETTY_FUNCTION__))
;
13002 assert(Mask[0] < 2 && "We sort V1 to be the first input.")((Mask[0] < 2 && "We sort V1 to be the first input."
) ? static_cast<void> (0) : __assert_fail ("Mask[0] < 2 && \"We sort V1 to be the first input.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13002, __PRETTY_FUNCTION__))
;
13003 assert(Mask[1] >= 2 && "We sort V2 to be the second input.")((Mask[1] >= 2 && "We sort V2 to be the second input."
) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= 2 && \"We sort V2 to be the second input.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13003, __PRETTY_FUNCTION__))
;
13004
13005 if (Subtarget.hasAVX2())
13006 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13007 return Extract;
13008
13009 // Try to use shift instructions.
13010 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
13011 Zeroable, Subtarget, DAG))
13012 return Shift;
13013
13014 // When loading a scalar and then shuffling it into a vector we can often do
13015 // the insertion cheaply.
13016 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13017 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13018 return Insertion;
13019 // Try inverting the insertion since for v2 masks it is easy to do and we
13020 // can't reliably sort the mask one way or the other.
13021 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
13022 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13023 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13024 return Insertion;
13025
13026 // We have different paths for blend lowering, but they all must use the
13027 // *exact* same predicate.
13028 bool IsBlendSupported = Subtarget.hasSSE41();
13029 if (IsBlendSupported)
13030 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
13031 Zeroable, Subtarget, DAG))
13032 return Blend;
13033
13034 // Use dedicated unpack instructions for masks that match their pattern.
13035 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
13036 return V;
13037
13038 // Try to use byte rotation instructions.
13039 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13040 if (Subtarget.hasSSSE3()) {
13041 if (Subtarget.hasVLX())
13042 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v2i64, V1, V2, Mask,
13043 Subtarget, DAG))
13044 return Rotate;
13045
13046 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
13047 Subtarget, DAG))
13048 return Rotate;
13049 }
13050
13051 // If we have direct support for blends, we should lower by decomposing into
13052 // a permute. That will be faster than the domain cross.
13053 if (IsBlendSupported)
13054 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
13055 Subtarget, DAG);
13056
13057 // We implement this with SHUFPD which is pretty lame because it will likely
13058 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
13059 // However, all the alternatives are still more cycles and newer chips don't
13060 // have this problem. It would be really nice if x86 had better shuffles here.
13061 V1 = DAG.getBitcast(MVT::v2f64, V1);
13062 V2 = DAG.getBitcast(MVT::v2f64, V2);
13063 return DAG.getBitcast(MVT::v2i64,
13064 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
13065}
13066
13067/// Lower a vector shuffle using the SHUFPS instruction.
13068///
13069/// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
13070/// It makes no assumptions about whether this is the *best* lowering, it simply
13071/// uses it.
13072static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
13073 ArrayRef<int> Mask, SDValue V1,
13074 SDValue V2, SelectionDAG &DAG) {
13075 SDValue LowV = V1, HighV = V2;
13076 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
13077
13078 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13079
13080 if (NumV2Elements == 1) {
13081 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
13082
13083 // Compute the index adjacent to V2Index and in the same half by toggling
13084 // the low bit.
13085 int V2AdjIndex = V2Index ^ 1;
13086
13087 if (Mask[V2AdjIndex] < 0) {
13088 // Handles all the cases where we have a single V2 element and an undef.
13089 // This will only ever happen in the high lanes because we commute the
13090 // vector otherwise.
13091 if (V2Index < 2)
13092 std::swap(LowV, HighV);
13093 NewMask[V2Index] -= 4;
13094 } else {
13095 // Handle the case where the V2 element ends up adjacent to a V1 element.
13096 // To make this work, blend them together as the first step.
13097 int V1Index = V2AdjIndex;
13098 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
13099 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
13100 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13101
13102 // Now proceed to reconstruct the final blend as we have the necessary
13103 // high or low half formed.
13104 if (V2Index < 2) {
13105 LowV = V2;
13106 HighV = V1;
13107 } else {
13108 HighV = V2;
13109 }
13110 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
13111 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
13112 }
13113 } else if (NumV2Elements == 2) {
13114 if (Mask[0] < 4 && Mask[1] < 4) {
13115 // Handle the easy case where we have V1 in the low lanes and V2 in the
13116 // high lanes.
13117 NewMask[2] -= 4;
13118 NewMask[3] -= 4;
13119 } else if (Mask[2] < 4 && Mask[3] < 4) {
13120 // We also handle the reversed case because this utility may get called
13121 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
13122 // arrange things in the right direction.
13123 NewMask[0] -= 4;
13124 NewMask[1] -= 4;
13125 HighV = V1;
13126 LowV = V2;
13127 } else {
13128 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
13129 // trying to place elements directly, just blend them and set up the final
13130 // shuffle to place them.
13131
13132 // The first two blend mask elements are for V1, the second two are for
13133 // V2.
13134 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
13135 Mask[2] < 4 ? Mask[2] : Mask[3],
13136 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
13137 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
13138 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
13139 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13140
13141 // Now we do a normal shuffle of V1 by giving V1 as both operands to
13142 // a blend.
13143 LowV = HighV = V1;
13144 NewMask[0] = Mask[0] < 4 ? 0 : 2;
13145 NewMask[1] = Mask[0] < 4 ? 2 : 0;
13146 NewMask[2] = Mask[2] < 4 ? 1 : 3;
13147 NewMask[3] = Mask[2] < 4 ? 3 : 1;
13148 }
13149 }
13150 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
13151 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
13152}
13153
13154/// Lower 4-lane 32-bit floating point shuffles.
13155///
13156/// Uses instructions exclusively from the floating point unit to minimize
13157/// domain crossing penalties, as these are sufficient to implement all v4f32
13158/// shuffles.
13159static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13160 const APInt &Zeroable, SDValue V1, SDValue V2,
13161 const X86Subtarget &Subtarget,
13162 SelectionDAG &DAG) {
13163 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4f32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13163, __PRETTY_FUNCTION__))
;
13164 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4f32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13164, __PRETTY_FUNCTION__))
;
13165 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13165, __PRETTY_FUNCTION__))
;
13166
13167 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13168
13169 if (NumV2Elements == 0) {
13170 // Check for being able to broadcast a single element.
13171 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
13172 Mask, Subtarget, DAG))
13173 return Broadcast;
13174
13175 // Use even/odd duplicate instructions for masks that match their pattern.
13176 if (Subtarget.hasSSE3()) {
13177 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
13178 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
13179 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
13180 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
13181 }
13182
13183 if (Subtarget.hasAVX()) {
13184 // If we have AVX, we can use VPERMILPS which will allow folding a load
13185 // into the shuffle.
13186 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
13187 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13188 }
13189
13190 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
13191 // in SSE1 because otherwise they are widened to v2f64 and never get here.
13192 if (!Subtarget.hasSSE2()) {
13193 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
13194 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
13195 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
13196 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
13197 }
13198
13199 // Otherwise, use a straight shuffle of a single input vector. We pass the
13200 // input vector to both operands to simulate this with a SHUFPS.
13201 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
13202 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13203 }
13204
13205 if (Subtarget.hasAVX2())
13206 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13207 return Extract;
13208
13209 // There are special ways we can lower some single-element blends. However, we
13210 // have custom ways we can lower more complex single-element blends below that
13211 // we defer to if both this and BLENDPS fail to match, so restrict this to
13212 // when the V2 input is targeting element 0 of the mask -- that is the fast
13213 // case here.
13214 if (NumV2Elements == 1 && Mask[0] >= 4)
13215 if (SDValue V = lowerShuffleAsElementInsertion(
13216 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13217 return V;
13218
13219 if (Subtarget.hasSSE41()) {
13220 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
13221 Zeroable, Subtarget, DAG))
13222 return Blend;
13223
13224 // Use INSERTPS if we can complete the shuffle efficiently.
13225 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13226 return V;
13227
13228 if (!isSingleSHUFPSMask(Mask))
13229 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13230 V2, Mask, DAG))
13231 return BlendPerm;
13232 }
13233
13234 // Use low/high mov instructions. These are only valid in SSE1 because
13235 // otherwise they are widened to v2f64 and never get here.
13236 if (!Subtarget.hasSSE2()) {
13237 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
13238 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13239 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
13240 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13241 }
13242
13243 // Use dedicated unpack instructions for masks that match their pattern.
13244 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13245 return V;
13246
13247 // Otherwise fall back to a SHUFPS lowering strategy.
13248 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13249}
13250
13251/// Lower 4-lane i32 vector shuffles.
13252///
13253/// We try to handle these with integer-domain shuffles where we can, but for
13254/// blends we use the floating point domain blend instructions.
13255static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13256 const APInt &Zeroable, SDValue V1, SDValue V2,
13257 const X86Subtarget &Subtarget,
13258 SelectionDAG &DAG) {
13259 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4i32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13259, __PRETTY_FUNCTION__))
;
13260 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4i32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13260, __PRETTY_FUNCTION__))
;
13261 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13261, __PRETTY_FUNCTION__))
;
13262
13263 // Whenever we can lower this as a zext, that instruction is strictly faster
13264 // than any alternative. It also allows us to fold memory operands into the
13265 // shuffle in many cases.
13266 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
13267 Zeroable, Subtarget, DAG))
13268 return ZExt;
13269
13270 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13271
13272 if (NumV2Elements == 0) {
13273 // Try to use broadcast unless the mask only has one non-undef element.
13274 if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
13275 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
13276 Mask, Subtarget, DAG))
13277 return Broadcast;
13278 }
13279
13280 // Straight shuffle of a single input vector. For everything from SSE2
13281 // onward this has a single fast instruction with no scary immediates.
13282 // We coerce the shuffle pattern to be compatible with UNPCK instructions
13283 // but we aren't actually going to use the UNPCK instruction because doing
13284 // so prevents folding a load into this instruction or making a copy.
13285 const int UnpackLoMask[] = {0, 0, 1, 1};
13286 const int UnpackHiMask[] = {2, 2, 3, 3};
13287 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
13288 Mask = UnpackLoMask;
13289 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
13290 Mask = UnpackHiMask;
13291
13292 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13293 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13294 }
13295
13296 if (Subtarget.hasAVX2())
13297 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13298 return Extract;
13299
13300 // Try to use shift instructions.
13301 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
13302 Zeroable, Subtarget, DAG))
13303 return Shift;
13304
13305 // There are special ways we can lower some single-element blends.
13306 if (NumV2Elements == 1)
13307 if (SDValue V = lowerShuffleAsElementInsertion(
13308 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13309 return V;
13310
13311 // We have different paths for blend lowering, but they all must use the
13312 // *exact* same predicate.
13313 bool IsBlendSupported = Subtarget.hasSSE41();
13314 if (IsBlendSupported)
13315 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13316 Zeroable, Subtarget, DAG))
13317 return Blend;
13318
13319 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13320 Zeroable, Subtarget, DAG))
13321 return Masked;
13322
13323 // Use dedicated unpack instructions for masks that match their pattern.
13324 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13325 return V;
13326
13327 // Try to use byte rotation instructions.
13328 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13329 if (Subtarget.hasSSSE3()) {
13330 if (Subtarget.hasVLX())
13331 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i32, V1, V2, Mask,
13332 Subtarget, DAG))
13333 return Rotate;
13334
13335 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13336 Subtarget, DAG))
13337 return Rotate;
13338 }
13339
13340 // Assume that a single SHUFPS is faster than an alternative sequence of
13341 // multiple instructions (even if the CPU has a domain penalty).
13342 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13343 if (!isSingleSHUFPSMask(Mask)) {
13344 // If we have direct support for blends, we should lower by decomposing into
13345 // a permute. That will be faster than the domain cross.
13346 if (IsBlendSupported)
13347 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
13348 Subtarget, DAG);
13349
13350 // Try to lower by permuting the inputs into an unpack instruction.
13351 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13352 Mask, Subtarget, DAG))
13353 return Unpack;
13354 }
13355
13356 // We implement this with SHUFPS because it can blend from two vectors.
13357 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13358 // up the inputs, bypassing domain shift penalties that we would incur if we
13359 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13360 // relevant.
13361 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13362 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13363 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13364 return DAG.getBitcast(MVT::v4i32, ShufPS);
13365}
13366
13367/// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13368/// shuffle lowering, and the most complex part.
13369///
13370/// The lowering strategy is to try to form pairs of input lanes which are
13371/// targeted at the same half of the final vector, and then use a dword shuffle
13372/// to place them onto the right half, and finally unpack the paired lanes into
13373/// their final position.
13374///
13375/// The exact breakdown of how to form these dword pairs and align them on the
13376/// correct sides is really tricky. See the comments within the function for
13377/// more of the details.
13378///
13379/// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13380/// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13381/// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13382/// vector, form the analogous 128-bit 8-element Mask.
13383static SDValue lowerV8I16GeneralSingleInputShuffle(
13384 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13385 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13386 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!")((VT.getVectorElementType() == MVT::i16 && "Bad input type!"
) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i16 && \"Bad input type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13386, __PRETTY_FUNCTION__))
;
13387 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13388
13389 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!")((Mask.size() == 8 && "Shuffle mask length doesn't match!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Shuffle mask length doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13389, __PRETTY_FUNCTION__))
;
13390 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13391 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13392
13393 // Attempt to directly match PSHUFLW or PSHUFHW.
13394 if (isUndefOrInRange(LoMask, 0, 4) &&
13395 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13396 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13397 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13398 }
13399 if (isUndefOrInRange(HiMask, 4, 8) &&
13400 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13401 for (int i = 0; i != 4; ++i)
13402 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13403 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13404 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13405 }
13406
13407 SmallVector<int, 4> LoInputs;
13408 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13409 array_pod_sort(LoInputs.begin(), LoInputs.end());
13410 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13411 SmallVector<int, 4> HiInputs;
13412 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13413 array_pod_sort(HiInputs.begin(), HiInputs.end());
13414 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13415 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13416 int NumHToL = LoInputs.size() - NumLToL;
13417 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13418 int NumHToH = HiInputs.size() - NumLToH;
13419 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13420 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13421 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13422 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13423
13424 // If we are shuffling values from one half - check how many different DWORD
13425 // pairs we need to create. If only 1 or 2 then we can perform this as a
13426 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13427 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13428 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13429 V = DAG.getNode(ShufWOp, DL, VT, V,
13430 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13431 V = DAG.getBitcast(PSHUFDVT, V);
13432 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13433 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13434 return DAG.getBitcast(VT, V);
13435 };
13436
13437 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13438 int PSHUFDMask[4] = { -1, -1, -1, -1 };
13439 SmallVector<std::pair<int, int>, 4> DWordPairs;
13440 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13441
13442 // Collect the different DWORD pairs.
13443 for (int DWord = 0; DWord != 4; ++DWord) {
13444 int M0 = Mask[2 * DWord + 0];
13445 int M1 = Mask[2 * DWord + 1];
13446 M0 = (M0 >= 0 ? M0 % 4 : M0);
13447 M1 = (M1 >= 0 ? M1 % 4 : M1);
13448 if (M0 < 0 && M1 < 0)
13449 continue;
13450
13451 bool Match = false;
13452 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13453 auto &DWordPair = DWordPairs[j];
13454 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13455 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13456 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13457 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13458 PSHUFDMask[DWord] = DOffset + j;
13459 Match = true;
13460 break;
13461 }
13462 }
13463 if (!Match) {
13464 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13465 DWordPairs.push_back(std::make_pair(M0, M1));
13466 }
13467 }
13468
13469 if (DWordPairs.size() <= 2) {
13470 DWordPairs.resize(2, std::make_pair(-1, -1));
13471 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13472 DWordPairs[1].first, DWordPairs[1].second};
13473 if ((NumHToL + NumHToH) == 0)
13474 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13475 if ((NumLToL + NumLToH) == 0)
13476 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13477 }
13478 }
13479
13480 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13481 // such inputs we can swap two of the dwords across the half mark and end up
13482 // with <=2 inputs to each half in each half. Once there, we can fall through
13483 // to the generic code below. For example:
13484 //
13485 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13486 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13487 //
13488 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13489 // and an existing 2-into-2 on the other half. In this case we may have to
13490 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13491 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13492 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13493 // because any other situation (including a 3-into-1 or 1-into-3 in the other
13494 // half than the one we target for fixing) will be fixed when we re-enter this
13495 // path. We will also combine away any sequence of PSHUFD instructions that
13496 // result into a single instruction. Here is an example of the tricky case:
13497 //
13498 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13499 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13500 //
13501 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13502 //
13503 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13504 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13505 //
13506 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13507 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13508 //
13509 // The result is fine to be handled by the generic logic.
13510 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13511 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13512 int AOffset, int BOffset) {
13513 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&(((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
"Must call this with A having 3 or 1 inputs from the A half."
) ? static_cast<void> (0) : __assert_fail ("(AToAInputs.size() == 3 || AToAInputs.size() == 1) && \"Must call this with A having 3 or 1 inputs from the A half.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13514, __PRETTY_FUNCTION__))
13514 "Must call this with A having 3 or 1 inputs from the A half.")(((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
"Must call this with A having 3 or 1 inputs from the A half."
) ? static_cast<void> (0) : __assert_fail ("(AToAInputs.size() == 3 || AToAInputs.size() == 1) && \"Must call this with A having 3 or 1 inputs from the A half.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13514, __PRETTY_FUNCTION__))
;
13515 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&(((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
"Must call this with B having 1 or 3 inputs from the B half."
) ? static_cast<void> (0) : __assert_fail ("(BToAInputs.size() == 1 || BToAInputs.size() == 3) && \"Must call this with B having 1 or 3 inputs from the B half.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13516, __PRETTY_FUNCTION__))
13516 "Must call this with B having 1 or 3 inputs from the B half.")(((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
"Must call this with B having 1 or 3 inputs from the B half."
) ? static_cast<void> (0) : __assert_fail ("(BToAInputs.size() == 1 || BToAInputs.size() == 3) && \"Must call this with B having 1 or 3 inputs from the B half.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13516, __PRETTY_FUNCTION__))
;
13517 assert(AToAInputs.size() + BToAInputs.size() == 4 &&((AToAInputs.size() + BToAInputs.size() == 4 && "Must call this with either 3:1 or 1:3 inputs (summing to 4)."
) ? static_cast<void> (0) : __assert_fail ("AToAInputs.size() + BToAInputs.size() == 4 && \"Must call this with either 3:1 or 1:3 inputs (summing to 4).\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13518, __PRETTY_FUNCTION__))
13518 "Must call this with either 3:1 or 1:3 inputs (summing to 4).")((AToAInputs.size() + BToAInputs.size() == 4 && "Must call this with either 3:1 or 1:3 inputs (summing to 4)."
) ? static_cast<void> (0) : __assert_fail ("AToAInputs.size() + BToAInputs.size() == 4 && \"Must call this with either 3:1 or 1:3 inputs (summing to 4).\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13518, __PRETTY_FUNCTION__))
;
13519
13520 bool ThreeAInputs = AToAInputs.size() == 3;
13521
13522 // Compute the index of dword with only one word among the three inputs in
13523 // a half by taking the sum of the half with three inputs and subtracting
13524 // the sum of the actual three inputs. The difference is the remaining
13525 // slot.
13526 int ADWord = 0, BDWord = 0;
13527 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13528 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13529 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13530 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13531 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13532 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13533 int TripleNonInputIdx =
13534 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13535 TripleDWord = TripleNonInputIdx / 2;
13536
13537 // We use xor with one to compute the adjacent DWord to whichever one the
13538 // OneInput is in.
13539 OneInputDWord = (OneInput / 2) ^ 1;
13540
13541 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13542 // and BToA inputs. If there is also such a problem with the BToB and AToB
13543 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13544 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13545 // is essential that we don't *create* a 3<-1 as then we might oscillate.
13546 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13547 // Compute how many inputs will be flipped by swapping these DWords. We
13548 // need
13549 // to balance this to ensure we don't form a 3-1 shuffle in the other
13550 // half.
13551 int NumFlippedAToBInputs =
13552 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
13553 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
13554 int NumFlippedBToBInputs =
13555 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
13556 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
13557 if ((NumFlippedAToBInputs == 1 &&
13558 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13559 (NumFlippedBToBInputs == 1 &&
13560 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13561 // We choose whether to fix the A half or B half based on whether that
13562 // half has zero flipped inputs. At zero, we may not be able to fix it
13563 // with that half. We also bias towards fixing the B half because that
13564 // will more commonly be the high half, and we have to bias one way.
13565 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13566 ArrayRef<int> Inputs) {
13567 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13568 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13569 // Determine whether the free index is in the flipped dword or the
13570 // unflipped dword based on where the pinned index is. We use this bit
13571 // in an xor to conditionally select the adjacent dword.
13572 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13573 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13574 if (IsFixIdxInput == IsFixFreeIdxInput)
13575 FixFreeIdx += 1;
13576 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13577 assert(IsFixIdxInput != IsFixFreeIdxInput &&((IsFixIdxInput != IsFixFreeIdxInput && "We need to be changing the number of flipped inputs!"
) ? static_cast<void> (0) : __assert_fail ("IsFixIdxInput != IsFixFreeIdxInput && \"We need to be changing the number of flipped inputs!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13578, __PRETTY_FUNCTION__))
13578 "We need to be changing the number of flipped inputs!")((IsFixIdxInput != IsFixFreeIdxInput && "We need to be changing the number of flipped inputs!"
) ? static_cast<void> (0) : __assert_fail ("IsFixIdxInput != IsFixFreeIdxInput && \"We need to be changing the number of flipped inputs!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13578, __PRETTY_FUNCTION__))
;
13579 int PSHUFHalfMask[] = {0, 1, 2, 3};
13580 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13581 V = DAG.getNode(
13582 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13583 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13584 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13585
13586 for (int &M : Mask)
13587 if (M >= 0 && M == FixIdx)
13588 M = FixFreeIdx;
13589 else if (M >= 0 && M == FixFreeIdx)
13590 M = FixIdx;
13591 };
13592 if (NumFlippedBToBInputs != 0) {
13593 int BPinnedIdx =
13594 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13595 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13596 } else {
13597 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!")((NumFlippedAToBInputs != 0 && "Impossible given predicates!"
) ? static_cast<void> (0) : __assert_fail ("NumFlippedAToBInputs != 0 && \"Impossible given predicates!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13597, __PRETTY_FUNCTION__))
;
13598 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13599 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13600 }
13601 }
13602 }
13603
13604 int PSHUFDMask[] = {0, 1, 2, 3};
13605 PSHUFDMask[ADWord] = BDWord;
13606 PSHUFDMask[BDWord] = ADWord;
13607 V = DAG.getBitcast(
13608 VT,
13609 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13610 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13611
13612 // Adjust the mask to match the new locations of A and B.
13613 for (int &M : Mask)
13614 if (M >= 0 && M/2 == ADWord)
13615 M = 2 * BDWord + M % 2;
13616 else if (M >= 0 && M/2 == BDWord)
13617 M = 2 * ADWord + M % 2;
13618
13619 // Recurse back into this routine to re-compute state now that this isn't
13620 // a 3 and 1 problem.
13621 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13622 };
13623 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13624 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13625 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13626 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13627
13628 // At this point there are at most two inputs to the low and high halves from
13629 // each half. That means the inputs can always be grouped into dwords and
13630 // those dwords can then be moved to the correct half with a dword shuffle.
13631 // We use at most one low and one high word shuffle to collect these paired
13632 // inputs into dwords, and finally a dword shuffle to place them.
13633 int PSHUFLMask[4] = {-1, -1, -1, -1};
13634 int PSHUFHMask[4] = {-1, -1, -1, -1};
13635 int PSHUFDMask[4] = {-1, -1, -1, -1};
13636
13637 // First fix the masks for all the inputs that are staying in their
13638 // original halves. This will then dictate the targets of the cross-half
13639 // shuffles.
13640 auto fixInPlaceInputs =
13641 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13642 MutableArrayRef<int> SourceHalfMask,
13643 MutableArrayRef<int> HalfMask, int HalfOffset) {
13644 if (InPlaceInputs.empty())
13645 return;
13646 if (InPlaceInputs.size() == 1) {
13647 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13648 InPlaceInputs[0] - HalfOffset;
13649 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13650 return;
13651 }
13652 if (IncomingInputs.empty()) {
13653 // Just fix all of the in place inputs.
13654 for (int Input : InPlaceInputs) {
13655 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13656 PSHUFDMask[Input / 2] = Input / 2;
13657 }
13658 return;
13659 }
13660
13661 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!")((InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!"
) ? static_cast<void> (0) : __assert_fail ("InPlaceInputs.size() == 2 && \"Cannot handle 3 or 4 inputs!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13661, __PRETTY_FUNCTION__))
;
13662 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13663 InPlaceInputs[0] - HalfOffset;
13664 // Put the second input next to the first so that they are packed into
13665 // a dword. We find the adjacent index by toggling the low bit.
13666 int AdjIndex = InPlaceInputs[0] ^ 1;
13667 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13668 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13669 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13670 };
13671 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13672 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13673
13674 // Now gather the cross-half inputs and place them into a free dword of
13675 // their target half.
13676 // FIXME: This operation could almost certainly be simplified dramatically to
13677 // look more like the 3-1 fixing operation.
13678 auto moveInputsToRightHalf = [&PSHUFDMask](
13679 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13680 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13681 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13682 int DestOffset) {
13683 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13684 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13685 };
13686 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13687 int Word) {
13688 int LowWord = Word & ~1;
13689 int HighWord = Word | 1;
13690 return isWordClobbered(SourceHalfMask, LowWord) ||
13691 isWordClobbered(SourceHalfMask, HighWord);
13692 };
13693
13694 if (IncomingInputs.empty())
13695 return;
13696
13697 if (ExistingInputs.empty()) {
13698 // Map any dwords with inputs from them into the right half.
13699 for (int Input : IncomingInputs) {
13700 // If the source half mask maps over the inputs, turn those into
13701 // swaps and use the swapped lane.
13702 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13703 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13704 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13705 Input - SourceOffset;
13706 // We have to swap the uses in our half mask in one sweep.
13707 for (int &M : HalfMask)
13708 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13709 M = Input;
13710 else if (M == Input)
13711 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13712 } else {
13713 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==((SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input
- SourceOffset && "Previous placement doesn't match!"
) ? static_cast<void> (0) : __assert_fail ("SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input - SourceOffset && \"Previous placement doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13715, __PRETTY_FUNCTION__))
13714 Input - SourceOffset &&((SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input
- SourceOffset && "Previous placement doesn't match!"
) ? static_cast<void> (0) : __assert_fail ("SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input - SourceOffset && \"Previous placement doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13715, __PRETTY_FUNCTION__))
13715 "Previous placement doesn't match!")((SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input
- SourceOffset && "Previous placement doesn't match!"
) ? static_cast<void> (0) : __assert_fail ("SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input - SourceOffset && \"Previous placement doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13715, __PRETTY_FUNCTION__))
;
13716 }
13717 // Note that this correctly re-maps both when we do a swap and when
13718 // we observe the other side of the swap above. We rely on that to
13719 // avoid swapping the members of the input list directly.
13720 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13721 }
13722
13723 // Map the input's dword into the correct half.
13724 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13725 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13726 else
13727 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==((PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input
/ 2 && "Previous placement doesn't match!") ? static_cast
<void> (0) : __assert_fail ("PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input / 2 && \"Previous placement doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13729, __PRETTY_FUNCTION__))
13728 Input / 2 &&((PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input
/ 2 && "Previous placement doesn't match!") ? static_cast
<void> (0) : __assert_fail ("PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input / 2 && \"Previous placement doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13729, __PRETTY_FUNCTION__))
13729 "Previous placement doesn't match!")((PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input
/ 2 && "Previous placement doesn't match!") ? static_cast
<void> (0) : __assert_fail ("PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input / 2 && \"Previous placement doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13729, __PRETTY_FUNCTION__))
;
13730 }
13731
13732 // And just directly shift any other-half mask elements to be same-half
13733 // as we will have mirrored the dword containing the element into the
13734 // same position within that half.
13735 for (int &M : HalfMask)
13736 if (M >= SourceOffset && M < SourceOffset + 4) {
13737 M = M - SourceOffset + DestOffset;
13738 assert(M >= 0 && "This should never wrap below zero!")((M >= 0 && "This should never wrap below zero!") ?
static_cast<void> (0) : __assert_fail ("M >= 0 && \"This should never wrap below zero!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13738, __PRETTY_FUNCTION__))
;
13739 }
13740 return;
13741 }
13742
13743 // Ensure we have the input in a viable dword of its current half. This
13744 // is particularly tricky because the original position may be clobbered
13745 // by inputs being moved and *staying* in that half.
13746 if (IncomingInputs.size() == 1) {
13747 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13748 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13749 SourceOffset;
13750 SourceHalfMask[InputFixed - SourceOffset] =
13751 IncomingInputs[0] - SourceOffset;
13752 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13753 InputFixed);
13754 IncomingInputs[0] = InputFixed;
13755 }
13756 } else if (IncomingInputs.size() == 2) {
13757 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13758 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13759 // We have two non-adjacent or clobbered inputs we need to extract from
13760 // the source half. To do this, we need to map them into some adjacent
13761 // dword slot in the source mask.
13762 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13763 IncomingInputs[1] - SourceOffset};
13764
13765 // If there is a free slot in the source half mask adjacent to one of
13766 // the inputs, place the other input in it. We use (Index XOR 1) to
13767 // compute an adjacent index.
13768 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13769 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13770 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13771 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13772 InputsFixed[1] = InputsFixed[0] ^ 1;
13773 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13774 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13775 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13776 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13777 InputsFixed[0] = InputsFixed[1] ^ 1;
13778 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13779 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
13780 // The two inputs are in the same DWord but it is clobbered and the
13781 // adjacent DWord isn't used at all. Move both inputs to the free
13782 // slot.
13783 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
13784 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
13785 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
13786 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
13787 } else {
13788 // The only way we hit this point is if there is no clobbering
13789 // (because there are no off-half inputs to this half) and there is no
13790 // free slot adjacent to one of the inputs. In this case, we have to
13791 // swap an input with a non-input.
13792 for (int i = 0; i < 4; ++i)
13793 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&(((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
"We can't handle any clobbers here!") ? static_cast<void>
(0) : __assert_fail ("(SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) && \"We can't handle any clobbers here!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13794, __PRETTY_FUNCTION__))
13794 "We can't handle any clobbers here!")(((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
"We can't handle any clobbers here!") ? static_cast<void>
(0) : __assert_fail ("(SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) && \"We can't handle any clobbers here!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13794, __PRETTY_FUNCTION__))
;
13795 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&((InputsFixed[1] != (InputsFixed[0] ^ 1) && "Cannot have adjacent inputs here!"
) ? static_cast<void> (0) : __assert_fail ("InputsFixed[1] != (InputsFixed[0] ^ 1) && \"Cannot have adjacent inputs here!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13796, __PRETTY_FUNCTION__))
13796 "Cannot have adjacent inputs here!")((InputsFixed[1] != (InputsFixed[0] ^ 1) && "Cannot have adjacent inputs here!"
) ? static_cast<void> (0) : __assert_fail ("InputsFixed[1] != (InputsFixed[0] ^ 1) && \"Cannot have adjacent inputs here!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13796, __PRETTY_FUNCTION__))
;
13797
13798 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13799 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
13800
13801 // We also have to update the final source mask in this case because
13802 // it may need to undo the above swap.
13803 for (int &M : FinalSourceHalfMask)
13804 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
13805 M = InputsFixed[1] + SourceOffset;
13806 else if (M == InputsFixed[1] + SourceOffset)
13807 M = (InputsFixed[0] ^ 1) + SourceOffset;
13808
13809 InputsFixed[1] = InputsFixed[0] ^ 1;
13810 }
13811
13812 // Point everything at the fixed inputs.
13813 for (int &M : HalfMask)
13814 if (M == IncomingInputs[0])
13815 M = InputsFixed[0] + SourceOffset;
13816 else if (M == IncomingInputs[1])
13817 M = InputsFixed[1] + SourceOffset;
13818
13819 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
13820 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
13821 }
13822 } else {
13823 llvm_unreachable("Unhandled input size!")::llvm::llvm_unreachable_internal("Unhandled input size!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13823)
;
13824 }
13825
13826 // Now hoist the DWord down to the right half.
13827 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
13828 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free")((PSHUFDMask[FreeDWord] < 0 && "DWord not free") ?
static_cast<void> (0) : __assert_fail ("PSHUFDMask[FreeDWord] < 0 && \"DWord not free\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13828, __PRETTY_FUNCTION__))
;
13829 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
13830 for (int &M : HalfMask)
13831 for (int Input : IncomingInputs)
13832 if (M == Input)
13833 M = FreeDWord * 2 + Input % 2;
13834 };
13835 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
13836 /*SourceOffset*/ 4, /*DestOffset*/ 0);
13837 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
13838 /*SourceOffset*/ 0, /*DestOffset*/ 4);
13839
13840 // Now enact all the shuffles we've computed to move the inputs into their
13841 // target half.
13842 if (!isNoopShuffleMask(PSHUFLMask))
13843 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13844 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
13845 if (!isNoopShuffleMask(PSHUFHMask))
13846 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13847 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
13848 if (!isNoopShuffleMask(PSHUFDMask))
13849 V = DAG.getBitcast(
13850 VT,
13851 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13852 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13853
13854 // At this point, each half should contain all its inputs, and we can then
13855 // just shuffle them into their final position.
13856 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&((count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
"Failed to lift all the high half inputs to the low mask!") ?
static_cast<void> (0) : __assert_fail ("count_if(LoMask, [](int M) { return M >= 4; }) == 0 && \"Failed to lift all the high half inputs to the low mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13857, __PRETTY_FUNCTION__))
13857 "Failed to lift all the high half inputs to the low mask!")((count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
"Failed to lift all the high half inputs to the low mask!") ?
static_cast<void> (0) : __assert_fail ("count_if(LoMask, [](int M) { return M >= 4; }) == 0 && \"Failed to lift all the high half inputs to the low mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13857, __PRETTY_FUNCTION__))
;
13858 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&((count_if(HiMask, [](int M) { return M >= 0 && M <
4; }) == 0 && "Failed to lift all the low half inputs to the high mask!"
) ? static_cast<void> (0) : __assert_fail ("count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 && \"Failed to lift all the low half inputs to the high mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13859, __PRETTY_FUNCTION__))
13859 "Failed to lift all the low half inputs to the high mask!")((count_if(HiMask, [](int M) { return M >= 0 && M <
4; }) == 0 && "Failed to lift all the low half inputs to the high mask!"
) ? static_cast<void> (0) : __assert_fail ("count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 && \"Failed to lift all the low half inputs to the high mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13859, __PRETTY_FUNCTION__))
;
13860
13861 // Do a half shuffle for the low mask.
13862 if (!isNoopShuffleMask(LoMask))
13863 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13864 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13865
13866 // Do a half shuffle with the high mask after shifting its values down.
13867 for (int &M : HiMask)
13868 if (M >= 0)
13869 M -= 4;
13870 if (!isNoopShuffleMask(HiMask))
13871 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13872 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13873
13874 return V;
13875}
13876
13877/// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
13878/// blend if only one input is used.
13879static SDValue lowerShuffleAsBlendOfPSHUFBs(
13880 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13881 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
13882 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&((!is128BitLaneCrossingShuffleMask(VT, Mask) && "Lane crossing shuffle masks not supported"
) ? static_cast<void> (0) : __assert_fail ("!is128BitLaneCrossingShuffleMask(VT, Mask) && \"Lane crossing shuffle masks not supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13883, __PRETTY_FUNCTION__))
13883 "Lane crossing shuffle masks not supported")((!is128BitLaneCrossingShuffleMask(VT, Mask) && "Lane crossing shuffle masks not supported"
) ? static_cast<void> (0) : __assert_fail ("!is128BitLaneCrossingShuffleMask(VT, Mask) && \"Lane crossing shuffle masks not supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13883, __PRETTY_FUNCTION__))
;
13884
13885 int NumBytes = VT.getSizeInBits() / 8;
13886 int Size = Mask.size();
13887 int Scale = NumBytes / Size;
13888
13889 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13890 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13891 V1InUse = false;
13892 V2InUse = false;
13893
13894 for (int i = 0; i < NumBytes; ++i) {
13895 int M = Mask[i / Scale];
13896 if (M < 0)
13897 continue;
13898
13899 const int ZeroMask = 0x80;
13900 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
13901 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
13902 if (Zeroable[i / Scale])
13903 V1Idx = V2Idx = ZeroMask;
13904
13905 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
13906 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
13907 V1InUse |= (ZeroMask != V1Idx);
13908 V2InUse |= (ZeroMask != V2Idx);
13909 }
13910
13911 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
13912 if (V1InUse)
13913 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
13914 DAG.getBuildVector(ShufVT, DL, V1Mask));
13915 if (V2InUse)
13916 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
13917 DAG.getBuildVector(ShufVT, DL, V2Mask));
13918
13919 // If we need shuffled inputs from both, blend the two.
13920 SDValue V;
13921 if (V1InUse && V2InUse)
13922 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
13923 else
13924 V = V1InUse ? V1 : V2;
13925
13926 // Cast the result back to the correct type.
13927 return DAG.getBitcast(VT, V);
13928}
13929
13930/// Generic lowering of 8-lane i16 shuffles.
13931///
13932/// This handles both single-input shuffles and combined shuffle/blends with
13933/// two inputs. The single input shuffles are immediately delegated to
13934/// a dedicated lowering routine.
13935///
13936/// The blends are lowered in one of three fundamental ways. If there are few
13937/// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
13938/// of the input is significantly cheaper when lowered as an interleaving of
13939/// the two inputs, try to interleave them. Otherwise, blend the low and high
13940/// halves of the inputs separately (making them have relatively few inputs)
13941/// and then concatenate them.
13942static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13943 const APInt &Zeroable, SDValue V1, SDValue V2,
13944 const X86Subtarget &Subtarget,
13945 SelectionDAG &DAG) {
13946 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8i16 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13946, __PRETTY_FUNCTION__))
;
13947 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8i16 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13947, __PRETTY_FUNCTION__))
;
13948 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13948, __PRETTY_FUNCTION__))
;
13949
13950 // Whenever we can lower this as a zext, that instruction is strictly faster
13951 // than any alternative.
13952 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
13953 Zeroable, Subtarget, DAG))
13954 return ZExt;
13955
13956 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
13957
13958 if (NumV2Inputs == 0) {
13959 // Try to use shift instructions.
13960 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
13961 Zeroable, Subtarget, DAG))
13962 return Shift;
13963
13964 // Check for being able to broadcast a single element.
13965 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
13966 Mask, Subtarget, DAG))
13967 return Broadcast;
13968
13969 // Use dedicated unpack instructions for masks that match their pattern.
13970 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13971 return V;
13972
13973 // Use dedicated pack instructions for masks that match their pattern.
13974 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13975 Subtarget))
13976 return V;
13977
13978 // Try to use byte rotation instructions.
13979 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
13980 Subtarget, DAG))
13981 return Rotate;
13982
13983 // Make a copy of the mask so it can be modified.
13984 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
13985 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
13986 Subtarget, DAG);
13987 }
13988
13989 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&((llvm::any_of(Mask, [](int M) { return M >= 0 && M
< 8; }) && "All single-input shuffles should be canonicalized to be V1-input "
"shuffles.") ? static_cast<void> (0) : __assert_fail (
"llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) && \"All single-input shuffles should be canonicalized to be V1-input \" \"shuffles.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13991, __PRETTY_FUNCTION__))
13990 "All single-input shuffles should be canonicalized to be V1-input "((llvm::any_of(Mask, [](int M) { return M >= 0 && M
< 8; }) && "All single-input shuffles should be canonicalized to be V1-input "
"shuffles.") ? static_cast<void> (0) : __assert_fail (
"llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) && \"All single-input shuffles should be canonicalized to be V1-input \" \"shuffles.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13991, __PRETTY_FUNCTION__))
13991 "shuffles.")((llvm::any_of(Mask, [](int M) { return M >= 0 && M
< 8; }) && "All single-input shuffles should be canonicalized to be V1-input "
"shuffles.") ? static_cast<void> (0) : __assert_fail (
"llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) && \"All single-input shuffles should be canonicalized to be V1-input \" \"shuffles.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 13991, __PRETTY_FUNCTION__))
;
13992
13993 // Try to use shift instructions.
13994 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
13995 Zeroable, Subtarget, DAG))
13996 return Shift;
13997
13998 // See if we can use SSE4A Extraction / Insertion.
13999 if (Subtarget.hasSSE4A())
14000 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
14001 Zeroable, DAG))
14002 return V;
14003
14004 // There are special ways we can lower some single-element blends.
14005 if (NumV2Inputs == 1)
14006 if (SDValue V = lowerShuffleAsElementInsertion(
14007 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14008 return V;
14009
14010 // We have different paths for blend lowering, but they all must use the
14011 // *exact* same predicate.
14012 bool IsBlendSupported = Subtarget.hasSSE41();
14013 if (IsBlendSupported)
14014 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
14015 Zeroable, Subtarget, DAG))
14016 return Blend;
14017
14018 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
14019 Zeroable, Subtarget, DAG))
14020 return Masked;
14021
14022 // Use dedicated unpack instructions for masks that match their pattern.
14023 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14024 return V;
14025
14026 // Use dedicated pack instructions for masks that match their pattern.
14027 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14028 Subtarget))
14029 return V;
14030
14031 // Try to use byte rotation instructions.
14032 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
14033 Subtarget, DAG))
14034 return Rotate;
14035
14036 if (SDValue BitBlend =
14037 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
14038 return BitBlend;
14039
14040 // Try to use byte shift instructions to mask.
14041 if (SDValue V = lowerVectorShuffleAsByteShiftMask(
14042 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14043 return V;
14044
14045 // Try to lower by permuting the inputs into an unpack instruction.
14046 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
14047 Mask, Subtarget, DAG))
14048 return Unpack;
14049
14050 // If we can't directly blend but can use PSHUFB, that will be better as it
14051 // can both shuffle and set up the inefficient blend.
14052 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
14053 bool V1InUse, V2InUse;
14054 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
14055 Zeroable, DAG, V1InUse, V2InUse);
14056 }
14057
14058 // We can always bit-blend if we have to so the fallback strategy is to
14059 // decompose into single-input permutes and blends.
14060 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
14061 Mask, Subtarget, DAG);
14062}
14063
14064/// Check whether a compaction lowering can be done by dropping even
14065/// elements and compute how many times even elements must be dropped.
14066///
14067/// This handles shuffles which take every Nth element where N is a power of
14068/// two. Example shuffle masks:
14069///
14070/// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
14071/// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
14072/// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
14073/// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
14074/// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
14075/// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
14076///
14077/// Any of these lanes can of course be undef.
14078///
14079/// This routine only supports N <= 3.
14080/// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
14081/// for larger N.
14082///
14083/// \returns N above, or the number of times even elements must be dropped if
14084/// there is such a number. Otherwise returns zero.
14085static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
14086 bool IsSingleInput) {
14087 // The modulus for the shuffle vector entries is based on whether this is
14088 // a single input or not.
14089 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
14090 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&((isPowerOf2_32((uint32_t)ShuffleModulus) && "We should only be called with masks with a power-of-2 size!"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32((uint32_t)ShuffleModulus) && \"We should only be called with masks with a power-of-2 size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14091, __PRETTY_FUNCTION__))
14091 "We should only be called with masks with a power-of-2 size!")((isPowerOf2_32((uint32_t)ShuffleModulus) && "We should only be called with masks with a power-of-2 size!"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32((uint32_t)ShuffleModulus) && \"We should only be called with masks with a power-of-2 size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14091, __PRETTY_FUNCTION__))
;
14092
14093 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
14094
14095 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
14096 // and 2^3 simultaneously. This is because we may have ambiguity with
14097 // partially undef inputs.
14098 bool ViableForN[3] = {true, true, true};
14099
14100 for (int i = 0, e = Mask.size(); i < e; ++i) {
14101 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
14102 // want.
14103 if (Mask[i] < 0)
14104 continue;
14105
14106 bool IsAnyViable = false;
14107 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14108 if (ViableForN[j]) {
14109 uint64_t N = j + 1;
14110
14111 // The shuffle mask must be equal to (i * 2^N) % M.
14112 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
14113 IsAnyViable = true;
14114 else
14115 ViableForN[j] = false;
14116 }
14117 // Early exit if we exhaust the possible powers of two.
14118 if (!IsAnyViable)
14119 break;
14120 }
14121
14122 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
14123 if (ViableForN[j])
14124 return j + 1;
14125
14126 // Return 0 as there is no viable power of two.
14127 return 0;
14128}
14129
14130static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
14131 ArrayRef<int> Mask, SDValue V1,
14132 SDValue V2, SelectionDAG &DAG) {
14133 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
14134 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
14135
14136 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
14137 if (V2.isUndef())
14138 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
14139
14140 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
14141}
14142
14143/// Generic lowering of v16i8 shuffles.
14144///
14145/// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14146/// detect any complexity reducing interleaving. If that doesn't help, it uses
14147/// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14148/// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14149/// back together.
14150static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14151 const APInt &Zeroable, SDValue V1, SDValue V2,
14152 const X86Subtarget &Subtarget,
14153 SelectionDAG &DAG) {
14154 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v16i8 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14154, __PRETTY_FUNCTION__))
;
14155 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v16i8 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14155, __PRETTY_FUNCTION__))
;
14156 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!")((Mask.size() == 16 && "Unexpected mask size for v16 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 16 && \"Unexpected mask size for v16 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14156, __PRETTY_FUNCTION__))
;
14157
14158 // Try to use shift instructions.
14159 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
14160 Zeroable, Subtarget, DAG))
14161 return Shift;
14162
14163 // Try to use byte rotation instructions.
14164 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14165 Subtarget, DAG))
14166 return Rotate;
14167
14168 // Use dedicated pack instructions for masks that match their pattern.
14169 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14170 Subtarget))
14171 return V;
14172
14173 // Try to use a zext lowering.
14174 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14175 Zeroable, Subtarget, DAG))
14176 return ZExt;
14177
14178 // See if we can use SSE4A Extraction / Insertion.
14179 if (Subtarget.hasSSE4A())
14180 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14181 Zeroable, DAG))
14182 return V;
14183
14184 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14185
14186 // For single-input shuffles, there are some nicer lowering tricks we can use.
14187 if (NumV2Elements == 0) {
14188 // Check for being able to broadcast a single element.
14189 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14190 Mask, Subtarget, DAG))
14191 return Broadcast;
14192
14193 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14194 return V;
14195
14196 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14197 // Notably, this handles splat and partial-splat shuffles more efficiently.
14198 // However, it only makes sense if the pre-duplication shuffle simplifies
14199 // things significantly. Currently, this means we need to be able to
14200 // express the pre-duplication shuffle as an i16 shuffle.
14201 //
14202 // FIXME: We should check for other patterns which can be widened into an
14203 // i16 shuffle as well.
14204 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14205 for (int i = 0; i < 16; i += 2)
14206 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14207 return false;
14208
14209 return true;
14210 };
14211 auto tryToWidenViaDuplication = [&]() -> SDValue {
14212 if (!canWidenViaDuplication(Mask))
14213 return SDValue();
14214 SmallVector<int, 4> LoInputs;
14215 copy_if(Mask, std::back_inserter(LoInputs),
14216 [](int M) { return M >= 0 && M < 8; });
14217 array_pod_sort(LoInputs.begin(), LoInputs.end());
14218 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14219 LoInputs.end());
14220 SmallVector<int, 4> HiInputs;
14221 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14222 array_pod_sort(HiInputs.begin(), HiInputs.end());
14223 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14224 HiInputs.end());
14225
14226 bool TargetLo = LoInputs.size() >= HiInputs.size();
14227 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14228 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14229
14230 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14231 SmallDenseMap<int, int, 8> LaneMap;
14232 for (int I : InPlaceInputs) {
14233 PreDupI16Shuffle[I/2] = I/2;
14234 LaneMap[I] = I;
14235 }
14236 int j = TargetLo ? 0 : 4, je = j + 4;
14237 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14238 // Check if j is already a shuffle of this input. This happens when
14239 // there are two adjacent bytes after we move the low one.
14240 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14241 // If we haven't yet mapped the input, search for a slot into which
14242 // we can map it.
14243 while (j < je && PreDupI16Shuffle[j] >= 0)
14244 ++j;
14245
14246 if (j == je)
14247 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14248 return SDValue();
14249
14250 // Map this input with the i16 shuffle.
14251 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14252 }
14253
14254 // Update the lane map based on the mapping we ended up with.
14255 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14256 }
14257 V1 = DAG.getBitcast(
14258 MVT::v16i8,
14259 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14260 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14261
14262 // Unpack the bytes to form the i16s that will be shuffled into place.
14263 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14264 MVT::v16i8, V1, V1);
14265
14266 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14267 for (int i = 0; i < 16; ++i)
14268 if (Mask[i] >= 0) {
14269 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14270 assert(MappedMask < 8 && "Invalid v8 shuffle mask!")((MappedMask < 8 && "Invalid v8 shuffle mask!") ? static_cast
<void> (0) : __assert_fail ("MappedMask < 8 && \"Invalid v8 shuffle mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14270, __PRETTY_FUNCTION__))
;
14271 if (PostDupI16Shuffle[i / 2] < 0)
14272 PostDupI16Shuffle[i / 2] = MappedMask;
14273 else
14274 assert(PostDupI16Shuffle[i / 2] == MappedMask &&((PostDupI16Shuffle[i / 2] == MappedMask && "Conflicting entries in the original shuffle!"
) ? static_cast<void> (0) : __assert_fail ("PostDupI16Shuffle[i / 2] == MappedMask && \"Conflicting entries in the original shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14275, __PRETTY_FUNCTION__))
14275 "Conflicting entries in the original shuffle!")((PostDupI16Shuffle[i / 2] == MappedMask && "Conflicting entries in the original shuffle!"
) ? static_cast<void> (0) : __assert_fail ("PostDupI16Shuffle[i / 2] == MappedMask && \"Conflicting entries in the original shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14275, __PRETTY_FUNCTION__))
;
14276 }
14277 return DAG.getBitcast(
14278 MVT::v16i8,
14279 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14280 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14281 };
14282 if (SDValue V = tryToWidenViaDuplication())
14283 return V;
14284 }
14285
14286 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14287 Zeroable, Subtarget, DAG))
14288 return Masked;
14289
14290 // Use dedicated unpack instructions for masks that match their pattern.
14291 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14292 return V;
14293
14294 // Try to use byte shift instructions to mask.
14295 if (SDValue V = lowerVectorShuffleAsByteShiftMask(
14296 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14297 return V;
14298
14299 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14300 // with PSHUFB. It is important to do this before we attempt to generate any
14301 // blends but after all of the single-input lowerings. If the single input
14302 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14303 // want to preserve that and we can DAG combine any longer sequences into
14304 // a PSHUFB in the end. But once we start blending from multiple inputs,
14305 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14306 // and there are *very* few patterns that would actually be faster than the
14307 // PSHUFB approach because of its ability to zero lanes.
14308 //
14309 // FIXME: The only exceptions to the above are blends which are exact
14310 // interleavings with direct instructions supporting them. We currently don't
14311 // handle those well here.
14312 if (Subtarget.hasSSSE3()) {
14313 bool V1InUse = false;
14314 bool V2InUse = false;
14315
14316 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14317 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14318
14319 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14320 // do so. This avoids using them to handle blends-with-zero which is
14321 // important as a single pshufb is significantly faster for that.
14322 if (V1InUse && V2InUse) {
14323 if (Subtarget.hasSSE41())
14324 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14325 Zeroable, Subtarget, DAG))
14326 return Blend;
14327
14328 // We can use an unpack to do the blending rather than an or in some
14329 // cases. Even though the or may be (very minorly) more efficient, we
14330 // preference this lowering because there are common cases where part of
14331 // the complexity of the shuffles goes away when we do the final blend as
14332 // an unpack.
14333 // FIXME: It might be worth trying to detect if the unpack-feeding
14334 // shuffles will both be pshufb, in which case we shouldn't bother with
14335 // this.
14336 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14337 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14338 return Unpack;
14339
14340 // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
14341 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
14342 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
14343
14344 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14345 // PALIGNR will be cheaper than the second PSHUFB+OR.
14346 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14347 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14348 return V;
14349 }
14350
14351 return PSHUFB;
14352 }
14353
14354 // There are special ways we can lower some single-element blends.
14355 if (NumV2Elements == 1)
14356 if (SDValue V = lowerShuffleAsElementInsertion(
14357 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14358 return V;
14359
14360 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14361 return Blend;
14362
14363 // Check whether a compaction lowering can be done. This handles shuffles
14364 // which take every Nth element for some even N. See the helper function for
14365 // details.
14366 //
14367 // We special case these as they can be particularly efficiently handled with
14368 // the PACKUSB instruction on x86 and they show up in common patterns of
14369 // rearranging bytes to truncate wide elements.
14370 bool IsSingleInput = V2.isUndef();
14371 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) {
14372 // NumEvenDrops is the power of two stride of the elements. Another way of
14373 // thinking about it is that we need to drop the even elements this many
14374 // times to get the original input.
14375
14376 // First we need to zero all the dropped bytes.
14377 assert(NumEvenDrops <= 3 &&((NumEvenDrops <= 3 && "No support for dropping even elements more than 3 times."
) ? static_cast<void> (0) : __assert_fail ("NumEvenDrops <= 3 && \"No support for dropping even elements more than 3 times.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14378, __PRETTY_FUNCTION__))
14378 "No support for dropping even elements more than 3 times.")((NumEvenDrops <= 3 && "No support for dropping even elements more than 3 times."
) ? static_cast<void> (0) : __assert_fail ("NumEvenDrops <= 3 && \"No support for dropping even elements more than 3 times.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14378, __PRETTY_FUNCTION__))
;
14379 SmallVector<SDValue, 16> ByteClearOps(16, DAG.getConstant(0, DL, MVT::i8));
14380 for (unsigned i = 0; i != 16; i += 1 << NumEvenDrops)
14381 ByteClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i8);
14382 SDValue ByteClearMask = DAG.getBuildVector(MVT::v16i8, DL, ByteClearOps);
14383 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
14384 if (!IsSingleInput)
14385 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
14386
14387 // Now pack things back together.
14388 V1 = DAG.getBitcast(MVT::v8i16, V1);
14389 V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2);
14390 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
14391 for (int i = 1; i < NumEvenDrops; ++i) {
14392 Result = DAG.getBitcast(MVT::v8i16, Result);
14393 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14394 }
14395
14396 return Result;
14397 }
14398
14399 // Handle multi-input cases by blending single-input shuffles.
14400 if (NumV2Elements > 0)
14401 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
14402 Subtarget, DAG);
14403
14404 // The fallback path for single-input shuffles widens this into two v8i16
14405 // vectors with unpacks, shuffles those, and then pulls them back together
14406 // with a pack.
14407 SDValue V = V1;
14408
14409 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14410 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14411 for (int i = 0; i < 16; ++i)
14412 if (Mask[i] >= 0)
14413 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14414
14415 SDValue VLoHalf, VHiHalf;
14416 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14417 // them out and avoid using UNPCK{L,H} to extract the elements of V as
14418 // i16s.
14419 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14420 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14421 // Use a mask to drop the high bytes.
14422 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14423 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14424 DAG.getConstant(0x00FF, DL, MVT::v8i16));
14425
14426 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14427 VHiHalf = DAG.getUNDEF(MVT::v8i16);
14428
14429 // Squash the masks to point directly into VLoHalf.
14430 for (int &M : LoBlendMask)
14431 if (M >= 0)
14432 M /= 2;
14433 for (int &M : HiBlendMask)
14434 if (M >= 0)
14435 M /= 2;
14436 } else {
14437 // Otherwise just unpack the low half of V into VLoHalf and the high half into
14438 // VHiHalf so that we can blend them as i16s.
14439 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14440
14441 VLoHalf = DAG.getBitcast(
14442 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14443 VHiHalf = DAG.getBitcast(
14444 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14445 }
14446
14447 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14448 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14449
14450 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14451}
14452
14453/// Dispatching routine to lower various 128-bit x86 vector shuffles.
14454///
14455/// This routine breaks down the specific type of 128-bit shuffle and
14456/// dispatches to the lowering routines accordingly.
14457static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14458 MVT VT, SDValue V1, SDValue V2,
14459 const APInt &Zeroable,
14460 const X86Subtarget &Subtarget,
14461 SelectionDAG &DAG) {
14462 switch (VT.SimpleTy) {
14463 case MVT::v2i64:
14464 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14465 case MVT::v2f64:
14466 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14467 case MVT::v4i32:
14468 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14469 case MVT::v4f32:
14470 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14471 case MVT::v8i16:
14472 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14473 case MVT::v16i8:
14474 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14475
14476 default:
14477 llvm_unreachable("Unimplemented!")::llvm::llvm_unreachable_internal("Unimplemented!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14477)
;
14478 }
14479}
14480
14481/// Generic routine to split vector shuffle into half-sized shuffles.
14482///
14483/// This routine just extracts two subvectors, shuffles them independently, and
14484/// then concatenates them back together. This should work effectively with all
14485/// AVX vector shuffle types.
14486static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14487 SDValue V2, ArrayRef<int> Mask,
14488 SelectionDAG &DAG) {
14489 assert(VT.getSizeInBits() >= 256 &&((VT.getSizeInBits() >= 256 && "Only for 256-bit or wider vector shuffles!"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() >= 256 && \"Only for 256-bit or wider vector shuffles!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14490, __PRETTY_FUNCTION__))
14490 "Only for 256-bit or wider vector shuffles!")((VT.getSizeInBits() >= 256 && "Only for 256-bit or wider vector shuffles!"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() >= 256 && \"Only for 256-bit or wider vector shuffles!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14490, __PRETTY_FUNCTION__))
;
14491 assert(V1.getSimpleValueType() == VT && "Bad operand type!")((V1.getSimpleValueType() == VT && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == VT && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14491, __PRETTY_FUNCTION__))
;
14492 assert(V2.getSimpleValueType() == VT && "Bad operand type!")((V2.getSimpleValueType() == VT && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == VT && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14492, __PRETTY_FUNCTION__))
;
14493
14494 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14495 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14496
14497 int NumElements = VT.getVectorNumElements();
14498 int SplitNumElements = NumElements / 2;
14499 MVT ScalarVT = VT.getVectorElementType();
14500 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
14501
14502 // Rather than splitting build-vectors, just build two narrower build
14503 // vectors. This helps shuffling with splats and zeros.
14504 auto SplitVector = [&](SDValue V) {
14505 V = peekThroughBitcasts(V);
14506
14507 MVT OrigVT = V.getSimpleValueType();
14508 int OrigNumElements = OrigVT.getVectorNumElements();
14509 int OrigSplitNumElements = OrigNumElements / 2;
14510 MVT OrigScalarVT = OrigVT.getVectorElementType();
14511 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
14512
14513 SDValue LoV, HiV;
14514
14515 auto *BV = dyn_cast<BuildVectorSDNode>(V);
14516 if (!BV) {
14517 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14518 DAG.getIntPtrConstant(0, DL));
14519 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
14520 DAG.getIntPtrConstant(OrigSplitNumElements, DL));
14521 } else {
14522
14523 SmallVector<SDValue, 16> LoOps, HiOps;
14524 for (int i = 0; i < OrigSplitNumElements; ++i) {
14525 LoOps.push_back(BV->getOperand(i));
14526 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
14527 }
14528 LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps);
14529 HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps);
14530 }
14531 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14532 DAG.getBitcast(SplitVT, HiV));
14533 };
14534
14535 SDValue LoV1, HiV1, LoV2, HiV2;
14536 std::tie(LoV1, HiV1) = SplitVector(V1);
14537 std::tie(LoV2, HiV2) = SplitVector(V2);
14538
14539 // Now create two 4-way blends of these half-width vectors.
14540 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14541 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
14542 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14543 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14544 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14545 for (int i = 0; i < SplitNumElements; ++i) {
14546 int M = HalfMask[i];
14547 if (M >= NumElements) {
14548 if (M >= NumElements + SplitNumElements)
14549 UseHiV2 = true;
14550 else
14551 UseLoV2 = true;
14552 V2BlendMask[i] = M - NumElements;
14553 BlendMask[i] = SplitNumElements + i;
14554 } else if (M >= 0) {
14555 if (M >= SplitNumElements)
14556 UseHiV1 = true;
14557 else
14558 UseLoV1 = true;
14559 V1BlendMask[i] = M;
14560 BlendMask[i] = i;
14561 }
14562 }
14563
14564 // Because the lowering happens after all combining takes place, we need to
14565 // manually combine these blend masks as much as possible so that we create
14566 // a minimal number of high-level vector shuffle nodes.
14567
14568 // First try just blending the halves of V1 or V2.
14569 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14570 return DAG.getUNDEF(SplitVT);
14571 if (!UseLoV2 && !UseHiV2)
14572 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14573 if (!UseLoV1 && !UseHiV1)
14574 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14575
14576 SDValue V1Blend, V2Blend;
14577 if (UseLoV1 && UseHiV1) {
14578 V1Blend =
14579 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14580 } else {
14581 // We only use half of V1 so map the usage down into the final blend mask.
14582 V1Blend = UseLoV1 ? LoV1 : HiV1;
14583 for (int i = 0; i < SplitNumElements; ++i)
14584 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14585 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14586 }
14587 if (UseLoV2 && UseHiV2) {
14588 V2Blend =
14589 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14590 } else {
14591 // We only use half of V2 so map the usage down into the final blend mask.
14592 V2Blend = UseLoV2 ? LoV2 : HiV2;
14593 for (int i = 0; i < SplitNumElements; ++i)
14594 if (BlendMask[i] >= SplitNumElements)
14595 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14596 }
14597 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14598 };
14599 SDValue Lo = HalfBlend(LoMask);
14600 SDValue Hi = HalfBlend(HiMask);
14601 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14602}
14603
14604/// Either split a vector in halves or decompose the shuffles and the
14605/// blend.
14606///
14607/// This is provided as a good fallback for many lowerings of non-single-input
14608/// shuffles with more than one 128-bit lane. In those cases, we want to select
14609/// between splitting the shuffle into 128-bit components and stitching those
14610/// back together vs. extracting the single-input shuffles and blending those
14611/// results.
14612static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14613 SDValue V2, ArrayRef<int> Mask,
14614 const X86Subtarget &Subtarget,
14615 SelectionDAG &DAG) {
14616 assert(!V2.isUndef() && "This routine must not be used to lower single-input "((!V2.isUndef() && "This routine must not be used to lower single-input "
"shuffles as it could then recurse on itself.") ? static_cast
<void> (0) : __assert_fail ("!V2.isUndef() && \"This routine must not be used to lower single-input \" \"shuffles as it could then recurse on itself.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14617, __PRETTY_FUNCTION__))
14617 "shuffles as it could then recurse on itself.")((!V2.isUndef() && "This routine must not be used to lower single-input "
"shuffles as it could then recurse on itself.") ? static_cast
<void> (0) : __assert_fail ("!V2.isUndef() && \"This routine must not be used to lower single-input \" \"shuffles as it could then recurse on itself.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14617, __PRETTY_FUNCTION__))
;
14618 int Size = Mask.size();
14619
14620 // If this can be modeled as a broadcast of two elements followed by a blend,
14621 // prefer that lowering. This is especially important because broadcasts can
14622 // often fold with memory operands.
14623 auto DoBothBroadcast = [&] {
14624 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14625 for (int M : Mask)
14626 if (M >= Size) {
14627 if (V2BroadcastIdx < 0)
14628 V2BroadcastIdx = M - Size;
14629 else if (M - Size != V2BroadcastIdx)
14630 return false;
14631 } else if (M >= 0) {
14632 if (V1BroadcastIdx < 0)
14633 V1BroadcastIdx = M;
14634 else if (M != V1BroadcastIdx)
14635 return false;
14636 }
14637 return true;
14638 };
14639 if (DoBothBroadcast())
14640 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
14641 Subtarget, DAG);
14642
14643 // If the inputs all stem from a single 128-bit lane of each input, then we
14644 // split them rather than blending because the split will decompose to
14645 // unusually few instructions.
14646 int LaneCount = VT.getSizeInBits() / 128;
14647 int LaneSize = Size / LaneCount;
14648 SmallBitVector LaneInputs[2];
14649 LaneInputs[0].resize(LaneCount, false);
14650 LaneInputs[1].resize(LaneCount, false);
14651 for (int i = 0; i < Size; ++i)
14652 if (Mask[i] >= 0)
14653 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14654 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14655 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14656
14657 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
14658 // that the decomposed single-input shuffles don't end up here.
14659 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
14660 DAG);
14661}
14662
14663/// Lower a vector shuffle crossing multiple 128-bit lanes as
14664/// a lane permutation followed by a per-lane permutation.
14665///
14666/// This is mainly for cases where we can have non-repeating permutes
14667/// in each lane.
14668///
14669/// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14670/// we should investigate merging them.
14671static SDValue lowerShuffleAsLanePermuteAndPermute(
14672 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14673 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14674 int NumElts = VT.getVectorNumElements();
14675 int NumLanes = VT.getSizeInBits() / 128;
14676 int NumEltsPerLane = NumElts / NumLanes;
14677
14678 SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
14679 SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
14680
14681 for (int i = 0; i != NumElts; ++i) {
14682 int M = Mask[i];
14683 if (M < 0)
14684 continue;
14685
14686 // Ensure that each lane comes from a single source lane.
14687 int SrcLane = M / NumEltsPerLane;
14688 int DstLane = i / NumEltsPerLane;
14689 if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
14690 return SDValue();
14691 SrcLaneMask[DstLane] = SrcLane;
14692
14693 PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
14694 }
14695
14696 // Make sure we set all elements of the lane mask, to avoid undef propagation.
14697 SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
14698 for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
14699 int SrcLane = SrcLaneMask[DstLane];
14700 if (0 <= SrcLane)
14701 for (int j = 0; j != NumEltsPerLane; ++j) {
14702 LaneMask[(DstLane * NumEltsPerLane) + j] =
14703 (SrcLane * NumEltsPerLane) + j;
14704 }
14705 }
14706
14707 // If we're only shuffling a single lowest lane and the rest are identity
14708 // then don't bother.
14709 // TODO - isShuffleMaskInputInPlace could be extended to something like this.
14710 int NumIdentityLanes = 0;
14711 bool OnlyShuffleLowestLane = true;
14712 for (int i = 0; i != NumLanes; ++i) {
14713 if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
14714 i * NumEltsPerLane))
14715 NumIdentityLanes++;
14716 else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
14717 OnlyShuffleLowestLane = false;
14718 }
14719 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14720 return SDValue();
14721
14722 SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
14723 return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
14724}
14725
14726/// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
14727/// source with a lane permutation.
14728///
14729/// This lowering strategy results in four instructions in the worst case for a
14730/// single-input cross lane shuffle which is lower than any other fully general
14731/// cross-lane shuffle strategy I'm aware of. Special cases for each particular
14732/// shuffle pattern should be handled prior to trying this lowering.
14733static SDValue lowerShuffleAsLanePermuteAndShuffle(
14734 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14735 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14736 // FIXME: This should probably be generalized for 512-bit vectors as well.
14737 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!")((VT.is256BitVector() && "Only for 256-bit vector shuffles!"
) ? static_cast<void> (0) : __assert_fail ("VT.is256BitVector() && \"Only for 256-bit vector shuffles!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14737, __PRETTY_FUNCTION__))
;
14738 int Size = Mask.size();
14739 int LaneSize = Size / 2;
14740
14741 // If there are only inputs from one 128-bit lane, splitting will in fact be
14742 // less expensive. The flags track whether the given lane contains an element
14743 // that crosses to another lane.
14744 if (!Subtarget.hasAVX2()) {
14745 bool LaneCrossing[2] = {false, false};
14746 for (int i = 0; i < Size; ++i)
14747 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
14748 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
14749 if (!LaneCrossing[0] || !LaneCrossing[1])
14750 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14751 } else {
14752 bool LaneUsed[2] = {false, false};
14753 for (int i = 0; i < Size; ++i)
14754 if (Mask[i] >= 0)
14755 LaneUsed[(Mask[i] / LaneSize)] = true;
14756 if (!LaneUsed[0] || !LaneUsed[1])
14757 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
14758 }
14759
14760 // TODO - we could support shuffling V2 in the Flipped input.
14761 assert(V2.isUndef() &&((V2.isUndef() && "This last part of this routine only works on single input shuffles"
) ? static_cast<void> (0) : __assert_fail ("V2.isUndef() && \"This last part of this routine only works on single input shuffles\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14762, __PRETTY_FUNCTION__))
14762 "This last part of this routine only works on single input shuffles")((V2.isUndef() && "This last part of this routine only works on single input shuffles"
) ? static_cast<void> (0) : __assert_fail ("V2.isUndef() && \"This last part of this routine only works on single input shuffles\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14762, __PRETTY_FUNCTION__))
;
14763
14764 SmallVector<int, 32> InLaneMask(Mask.begin(), Mask.end());
14765 for (int i = 0; i < Size; ++i) {
14766 int &M = InLaneMask[i];
14767 if (M < 0)
14768 continue;
14769 if (((M % Size) / LaneSize) != (i / LaneSize))
14770 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
14771 }
14772 assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&((!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
"In-lane shuffle mask expected") ? static_cast<void> (
0) : __assert_fail ("!is128BitLaneCrossingShuffleMask(VT, InLaneMask) && \"In-lane shuffle mask expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14773, __PRETTY_FUNCTION__))
14773 "In-lane shuffle mask expected")((!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
"In-lane shuffle mask expected") ? static_cast<void> (
0) : __assert_fail ("!is128BitLaneCrossingShuffleMask(VT, InLaneMask) && \"In-lane shuffle mask expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14773, __PRETTY_FUNCTION__))
;
14774
14775 // Flip the lanes, and shuffle the results which should now be in-lane.
14776 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
14777 SDValue Flipped = DAG.getBitcast(PVT, V1);
14778 Flipped =
14779 DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
14780 Flipped = DAG.getBitcast(VT, Flipped);
14781 return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
14782}
14783
14784/// Handle lowering 2-lane 128-bit shuffles.
14785static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
14786 SDValue V2, ArrayRef<int> Mask,
14787 const APInt &Zeroable,
14788 const X86Subtarget &Subtarget,
14789 SelectionDAG &DAG) {
14790 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
14791 if (Subtarget.hasAVX2() && V2.isUndef())
14792 return SDValue();
14793
14794 SmallVector<int, 4> WidenedMask;
14795 if (!canWidenShuffleElements(Mask, Zeroable, WidenedMask))
14796 return SDValue();
14797
14798 bool IsLowZero = (Zeroable & 0x3) == 0x3;
14799 bool IsHighZero = (Zeroable & 0xc) == 0xc;
14800
14801 // Try to use an insert into a zero vector.
14802 if (WidenedMask[0] == 0 && IsHighZero) {
14803 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14804 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
14805 DAG.getIntPtrConstant(0, DL));
14806 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
14807 getZeroVector(VT, Subtarget, DAG, DL), LoV,
14808 DAG.getIntPtrConstant(0, DL));
14809 }
14810
14811 // TODO: If minimizing size and one of the inputs is a zero vector and the
14812 // the zero vector has only one use, we could use a VPERM2X128 to save the
14813 // instruction bytes needed to explicitly generate the zero vector.
14814
14815 // Blends are faster and handle all the non-lane-crossing cases.
14816 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
14817 Subtarget, DAG))
14818 return Blend;
14819
14820 // If either input operand is a zero vector, use VPERM2X128 because its mask
14821 // allows us to replace the zero input with an implicit zero.
14822 if (!IsLowZero && !IsHighZero) {
14823 // Check for patterns which can be matched with a single insert of a 128-bit
14824 // subvector.
14825 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
14826 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
14827
14828 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
14829 // this will likely become vinsertf128 which can't fold a 256-bit memop.
14830 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
14831 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14832 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
14833 OnlyUsesV1 ? V1 : V2,
14834 DAG.getIntPtrConstant(0, DL));
14835 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
14836 DAG.getIntPtrConstant(2, DL));
14837 }
14838 }
14839
14840 // Try to use SHUF128 if possible.
14841 if (Subtarget.hasVLX()) {
14842 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
14843 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
14844 ((WidenedMask[1] % 2) << 1);
14845 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
14846 DAG.getTargetConstant(PermMask, DL, MVT::i8));
14847 }
14848 }
14849 }
14850
14851 // Otherwise form a 128-bit permutation. After accounting for undefs,
14852 // convert the 64-bit shuffle mask selection values into 128-bit
14853 // selection bits by dividing the indexes by 2 and shifting into positions
14854 // defined by a vperm2*128 instruction's immediate control byte.
14855
14856 // The immediate permute control byte looks like this:
14857 // [1:0] - select 128 bits from sources for low half of destination
14858 // [2] - ignore
14859 // [3] - zero low half of destination
14860 // [5:4] - select 128 bits from sources for high half of destination
14861 // [6] - ignore
14862 // [7] - zero high half of destination
14863
14864 assert((WidenedMask[0] >= 0 || IsLowZero) &&(((WidenedMask[0] >= 0 || IsLowZero) && (WidenedMask
[1] >= 0 || IsHighZero) && "Undef half?") ? static_cast
<void> (0) : __assert_fail ("(WidenedMask[0] >= 0 || IsLowZero) && (WidenedMask[1] >= 0 || IsHighZero) && \"Undef half?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14865, __PRETTY_FUNCTION__))
14865 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?")(((WidenedMask[0] >= 0 || IsLowZero) && (WidenedMask
[1] >= 0 || IsHighZero) && "Undef half?") ? static_cast
<void> (0) : __assert_fail ("(WidenedMask[0] >= 0 || IsLowZero) && (WidenedMask[1] >= 0 || IsHighZero) && \"Undef half?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14865, __PRETTY_FUNCTION__))
;
14866
14867 unsigned PermMask = 0;
14868 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
14869 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
14870
14871 // Check the immediate mask and replace unused sources with undef.
14872 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
14873 V1 = DAG.getUNDEF(VT);
14874 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
14875 V2 = DAG.getUNDEF(VT);
14876
14877 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
14878 DAG.getTargetConstant(PermMask, DL, MVT::i8));
14879}
14880
14881/// Lower a vector shuffle by first fixing the 128-bit lanes and then
14882/// shuffling each lane.
14883///
14884/// This attempts to create a repeated lane shuffle where each lane uses one
14885/// or two of the lanes of the inputs. The lanes of the input vectors are
14886/// shuffled in one or two independent shuffles to get the lanes into the
14887/// position needed by the final shuffle.
14888static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
14889 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14890 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14891 assert(!V2.isUndef() && "This is only useful with multiple inputs.")((!V2.isUndef() && "This is only useful with multiple inputs."
) ? static_cast<void> (0) : __assert_fail ("!V2.isUndef() && \"This is only useful with multiple inputs.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14891, __PRETTY_FUNCTION__))
;
14892
14893 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
14894 return SDValue();
14895
14896 int NumElts = Mask.size();
14897 int NumLanes = VT.getSizeInBits() / 128;
14898 int NumLaneElts = 128 / VT.getScalarSizeInBits();
14899 SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
14900 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
14901
14902 // First pass will try to fill in the RepeatMask from lanes that need two
14903 // sources.
14904 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14905 int Srcs[2] = {-1, -1};
14906 SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
14907 for (int i = 0; i != NumLaneElts; ++i) {
14908 int M = Mask[(Lane * NumLaneElts) + i];
14909 if (M < 0)
14910 continue;
14911 // Determine which of the possible input lanes (NumLanes from each source)
14912 // this element comes from. Assign that as one of the sources for this
14913 // lane. We can assign up to 2 sources for this lane. If we run out
14914 // sources we can't do anything.
14915 int LaneSrc = M / NumLaneElts;
14916 int Src;
14917 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
14918 Src = 0;
14919 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
14920 Src = 1;
14921 else
14922 return SDValue();
14923
14924 Srcs[Src] = LaneSrc;
14925 InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
14926 }
14927
14928 // If this lane has two sources, see if it fits with the repeat mask so far.
14929 if (Srcs[1] < 0)
14930 continue;
14931
14932 LaneSrcs[Lane][0] = Srcs[0];
14933 LaneSrcs[Lane][1] = Srcs[1];
14934
14935 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
14936 assert(M1.size() == M2.size() && "Unexpected mask size")((M1.size() == M2.size() && "Unexpected mask size") ?
static_cast<void> (0) : __assert_fail ("M1.size() == M2.size() && \"Unexpected mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14936, __PRETTY_FUNCTION__))
;
14937 for (int i = 0, e = M1.size(); i != e; ++i)
14938 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
14939 return false;
14940 return true;
14941 };
14942
14943 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
14944 assert(Mask.size() == MergedMask.size() && "Unexpected mask size")((Mask.size() == MergedMask.size() && "Unexpected mask size"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == MergedMask.size() && \"Unexpected mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14944, __PRETTY_FUNCTION__))
;
14945 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
14946 int M = Mask[i];
14947 if (M < 0)
14948 continue;
14949 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&(((MergedMask[i] < 0 || MergedMask[i] == M) && "Unexpected mask element"
) ? static_cast<void> (0) : __assert_fail ("(MergedMask[i] < 0 || MergedMask[i] == M) && \"Unexpected mask element\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14950, __PRETTY_FUNCTION__))
14950 "Unexpected mask element")(((MergedMask[i] < 0 || MergedMask[i] == M) && "Unexpected mask element"
) ? static_cast<void> (0) : __assert_fail ("(MergedMask[i] < 0 || MergedMask[i] == M) && \"Unexpected mask element\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 14950, __PRETTY_FUNCTION__))
;
14951 MergedMask[i] = M;
14952 }
14953 };
14954
14955 if (MatchMasks(InLaneMask, RepeatMask)) {
14956 // Merge this lane mask into the final repeat mask.
14957 MergeMasks(InLaneMask, RepeatMask);
14958 continue;
14959 }
14960
14961 // Didn't find a match. Swap the operands and try again.
14962 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
14963 ShuffleVectorSDNode::commuteMask(InLaneMask);
14964
14965 if (MatchMasks(InLaneMask, RepeatMask)) {
14966 // Merge this lane mask into the final repeat mask.
14967 MergeMasks(InLaneMask, RepeatMask);
14968 continue;
14969 }
14970
14971 // Couldn't find a match with the operands in either order.
14972 return SDValue();
14973 }
14974
14975 // Now handle any lanes with only one source.
14976 for (int Lane = 0; Lane != NumLanes; ++Lane) {
14977 // If this lane has already been processed, skip it.
14978 if (LaneSrcs[Lane][0] >= 0)
14979 continue;
14980
14981 for (int i = 0; i != NumLaneElts; ++i) {
14982 int M = Mask[(Lane * NumLaneElts) + i];
14983 if (M < 0)
14984 continue;
14985
14986 // If RepeatMask isn't defined yet we can define it ourself.
14987 if (RepeatMask[i] < 0)
14988 RepeatMask[i] = M % NumLaneElts;
14989
14990 if (RepeatMask[i] < NumElts) {
14991 if (RepeatMask[i] != M % NumLaneElts)
14992 return SDValue();
14993 LaneSrcs[Lane][0] = M / NumLaneElts;
14994 } else {
14995 if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
14996 return SDValue();
14997 LaneSrcs[Lane][1] = M / NumLaneElts;
14998 }
14999 }
15000
15001 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15002 return SDValue();
15003 }
15004
15005 SmallVector<int, 16> NewMask(NumElts, -1);
15006 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15007 int Src = LaneSrcs[Lane][0];
15008 for (int i = 0; i != NumLaneElts; ++i) {
15009 int M = -1;
15010 if (Src >= 0)
15011 M = Src * NumLaneElts + i;
15012 NewMask[Lane * NumLaneElts + i] = M;
15013 }
15014 }
15015 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15016 // Ensure we didn't get back the shuffle we started with.
15017 // FIXME: This is a hack to make up for some splat handling code in
15018 // getVectorShuffle.
15019 if (isa<ShuffleVectorSDNode>(NewV1) &&
15020 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15021 return SDValue();
15022
15023 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15024 int Src = LaneSrcs[Lane][1];
15025 for (int i = 0; i != NumLaneElts; ++i) {
15026 int M = -1;
15027 if (Src >= 0)
15028 M = Src * NumLaneElts + i;
15029 NewMask[Lane * NumLaneElts + i] = M;
15030 }
15031 }
15032 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15033 // Ensure we didn't get back the shuffle we started with.
15034 // FIXME: This is a hack to make up for some splat handling code in
15035 // getVectorShuffle.
15036 if (isa<ShuffleVectorSDNode>(NewV2) &&
15037 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15038 return SDValue();
15039
15040 for (int i = 0; i != NumElts; ++i) {
15041 NewMask[i] = RepeatMask[i % NumLaneElts];
15042 if (NewMask[i] < 0)
15043 continue;
15044
15045 NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15046 }
15047 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15048}
15049
15050/// If the input shuffle mask results in a vector that is undefined in all upper
15051/// or lower half elements and that mask accesses only 2 halves of the
15052/// shuffle's operands, return true. A mask of half the width with mask indexes
15053/// adjusted to access the extracted halves of the original shuffle operands is
15054/// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15055/// lower half of each input operand is accessed.
15056static bool
15057getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15058 int &HalfIdx1, int &HalfIdx2) {
15059 assert((Mask.size() == HalfMask.size() * 2) &&(((Mask.size() == HalfMask.size() * 2) && "Expected input mask to be twice as long as output"
) ? static_cast<void> (0) : __assert_fail ("(Mask.size() == HalfMask.size() * 2) && \"Expected input mask to be twice as long as output\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15060, __PRETTY_FUNCTION__))
15060 "Expected input mask to be twice as long as output")(((Mask.size() == HalfMask.size() * 2) && "Expected input mask to be twice as long as output"
) ? static_cast<void> (0) : __assert_fail ("(Mask.size() == HalfMask.size() * 2) && \"Expected input mask to be twice as long as output\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15060, __PRETTY_FUNCTION__))
;
15061
15062 // Exactly one half of the result must be undef to allow narrowing.
15063 bool UndefLower = isUndefLowerHalf(Mask);
15064 bool UndefUpper = isUndefUpperHalf(Mask);
15065 if (UndefLower == UndefUpper)
15066 return false;
15067
15068 unsigned HalfNumElts = HalfMask.size();
15069 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15070 HalfIdx1 = -1;
15071 HalfIdx2 = -1;
15072 for (unsigned i = 0; i != HalfNumElts; ++i) {
15073 int M = Mask[i + MaskIndexOffset];
15074 if (M < 0) {
15075 HalfMask[i] = M;
15076 continue;
15077 }
15078
15079 // Determine which of the 4 half vectors this element is from.
15080 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15081 int HalfIdx = M / HalfNumElts;
15082
15083 // Determine the element index into its half vector source.
15084 int HalfElt = M % HalfNumElts;
15085
15086 // We can shuffle with up to 2 half vectors, set the new 'half'
15087 // shuffle mask accordingly.
15088 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15089 HalfMask[i] = HalfElt;
15090 HalfIdx1 = HalfIdx;
15091 continue;
15092 }
15093 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15094 HalfMask[i] = HalfElt + HalfNumElts;
15095 HalfIdx2 = HalfIdx;
15096 continue;
15097 }
15098
15099 // Too many half vectors referenced.
15100 return false;
15101 }
15102
15103 return true;
15104}
15105
15106/// Given the output values from getHalfShuffleMask(), create a half width
15107/// shuffle of extracted vectors followed by an insert back to full width.
15108static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15109 ArrayRef<int> HalfMask, int HalfIdx1,
15110 int HalfIdx2, bool UndefLower,
15111 SelectionDAG &DAG, bool UseConcat = false) {
15112 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?")((V1.getValueType() == V2.getValueType() && "Different sized vectors?"
) ? static_cast<void> (0) : __assert_fail ("V1.getValueType() == V2.getValueType() && \"Different sized vectors?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15112, __PRETTY_FUNCTION__))
;
15113 assert(V1.getValueType().isSimple() && "Expecting only simple types")((V1.getValueType().isSimple() && "Expecting only simple types"
) ? static_cast<void> (0) : __assert_fail ("V1.getValueType().isSimple() && \"Expecting only simple types\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15113, __PRETTY_FUNCTION__))
;
15114
15115 MVT VT = V1.getSimpleValueType();
15116 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15117 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15118
15119 auto getHalfVector = [&](int HalfIdx) {
15120 if (HalfIdx < 0)
15121 return DAG.getUNDEF(HalfVT);
15122 SDValue V = (HalfIdx < 2 ? V1 : V2);
15123 HalfIdx = (HalfIdx % 2) * HalfNumElts;
15124 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15125 DAG.getIntPtrConstant(HalfIdx, DL));
15126 };
15127
15128 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15129 SDValue Half1 = getHalfVector(HalfIdx1);
15130 SDValue Half2 = getHalfVector(HalfIdx2);
15131 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15132 if (UseConcat) {
15133 SDValue Op0 = V;
15134 SDValue Op1 = DAG.getUNDEF(HalfVT);
15135 if (UndefLower)
15136 std::swap(Op0, Op1);
15137 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15138 }
15139
15140 unsigned Offset = UndefLower ? HalfNumElts : 0;
15141 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15142 DAG.getIntPtrConstant(Offset, DL));
15143}
15144
15145/// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15146/// This allows for fast cases such as subvector extraction/insertion
15147/// or shuffling smaller vector types which can lower more efficiently.
15148static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15149 SDValue V2, ArrayRef<int> Mask,
15150 const X86Subtarget &Subtarget,
15151 SelectionDAG &DAG) {
15152 assert((VT.is256BitVector() || VT.is512BitVector()) &&(((VT.is256BitVector() || VT.is512BitVector()) && "Expected 256-bit or 512-bit vector"
) ? static_cast<void> (0) : __assert_fail ("(VT.is256BitVector() || VT.is512BitVector()) && \"Expected 256-bit or 512-bit vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15153, __PRETTY_FUNCTION__))
15153 "Expected 256-bit or 512-bit vector")(((VT.is256BitVector() || VT.is512BitVector()) && "Expected 256-bit or 512-bit vector"
) ? static_cast<void> (0) : __assert_fail ("(VT.is256BitVector() || VT.is512BitVector()) && \"Expected 256-bit or 512-bit vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15153, __PRETTY_FUNCTION__))
;
15154
15155 bool UndefLower = isUndefLowerHalf(Mask);
15156 if (!UndefLower && !isUndefUpperHalf(Mask))
15157 return SDValue();
15158
15159 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&(((!UndefLower || !isUndefUpperHalf(Mask)) && "Completely undef shuffle mask should have been simplified already"
) ? static_cast<void> (0) : __assert_fail ("(!UndefLower || !isUndefUpperHalf(Mask)) && \"Completely undef shuffle mask should have been simplified already\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15160, __PRETTY_FUNCTION__))
15160 "Completely undef shuffle mask should have been simplified already")(((!UndefLower || !isUndefUpperHalf(Mask)) && "Completely undef shuffle mask should have been simplified already"
) ? static_cast<void> (0) : __assert_fail ("(!UndefLower || !isUndefUpperHalf(Mask)) && \"Completely undef shuffle mask should have been simplified already\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15160, __PRETTY_FUNCTION__))
;
15161
15162 // Upper half is undef and lower half is whole upper subvector.
15163 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15164 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15165 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15166 if (!UndefLower &&
15167 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15168 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15169 DAG.getIntPtrConstant(HalfNumElts, DL));
15170 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15171 DAG.getIntPtrConstant(0, DL));
15172 }
15173
15174 // Lower half is undef and upper half is whole lower subvector.
15175 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15176 if (UndefLower &&
15177 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15178 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15179 DAG.getIntPtrConstant(0, DL));
15180 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15181 DAG.getIntPtrConstant(HalfNumElts, DL));
15182 }
15183
15184 int HalfIdx1, HalfIdx2;
15185 SmallVector<int, 8> HalfMask(HalfNumElts);
15186 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15187 return SDValue();
15188
15189 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length")((HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length"
) ? static_cast<void> (0) : __assert_fail ("HalfMask.size() == HalfNumElts && \"Unexpected shuffle mask length\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15189, __PRETTY_FUNCTION__))
;
15190
15191 // Only shuffle the halves of the inputs when useful.
15192 unsigned NumLowerHalves =
15193 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15194 unsigned NumUpperHalves =
15195 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15196 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed")((NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed"
) ? static_cast<void> (0) : __assert_fail ("NumLowerHalves + NumUpperHalves <= 2 && \"Only 1 or 2 halves allowed\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15196, __PRETTY_FUNCTION__))
;
15197
15198 // Determine the larger pattern of undef/halves, then decide if it's worth
15199 // splitting the shuffle based on subtarget capabilities and types.
15200 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15201 if (!UndefLower) {
15202 // XXXXuuuu: no insert is needed.
15203 // Always extract lowers when setting lower - these are all free subreg ops.
15204 if (NumUpperHalves == 0)
15205 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15206 UndefLower, DAG);
15207
15208 if (NumUpperHalves == 1) {
15209 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15210 if (Subtarget.hasAVX2()) {
15211 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15212 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15213 !is128BitUnpackShuffleMask(HalfMask) &&
15214 (!isSingleSHUFPSMask(HalfMask) ||
15215 Subtarget.hasFastVariableShuffle()))
15216 return SDValue();
15217 // If this is a unary shuffle (assume that the 2nd operand is
15218 // canonicalized to undef), then we can use vpermpd. Otherwise, we
15219 // are better off extracting the upper half of 1 operand and using a
15220 // narrow shuffle.
15221 if (EltWidth == 64 && V2.isUndef())
15222 return SDValue();
15223 }
15224 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15225 if (Subtarget.hasAVX512() && VT.is512BitVector())
15226 return SDValue();
15227 // Extract + narrow shuffle is better than the wide alternative.
15228 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15229 UndefLower, DAG);
15230 }
15231
15232 // Don't extract both uppers, instead shuffle and then extract.
15233 assert(NumUpperHalves == 2 && "Half vector count went wrong")((NumUpperHalves == 2 && "Half vector count went wrong"
) ? static_cast<void> (0) : __assert_fail ("NumUpperHalves == 2 && \"Half vector count went wrong\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15233, __PRETTY_FUNCTION__))
;
15234 return SDValue();
15235 }
15236
15237 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15238 if (NumUpperHalves == 0) {
15239 // AVX2 has efficient 64-bit element cross-lane shuffles.
15240 // TODO: Refine to account for unary shuffle, splat, and other masks?
15241 if (Subtarget.hasAVX2() && EltWidth == 64)
15242 return SDValue();
15243 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15244 if (Subtarget.hasAVX512() && VT.is512BitVector())
15245 return SDValue();
15246 // Narrow shuffle + insert is better than the wide alternative.
15247 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15248 UndefLower, DAG);
15249 }
15250
15251 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15252 return SDValue();
15253}
15254
15255/// Test whether the specified input (0 or 1) is in-place blended by the
15256/// given mask.
15257///
15258/// This returns true if the elements from a particular input are already in the
15259/// slot required by the given mask and require no permutation.
15260static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
15261 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.")(((Input == 0 || Input == 1) && "Only two inputs to shuffles."
) ? static_cast<void> (0) : __assert_fail ("(Input == 0 || Input == 1) && \"Only two inputs to shuffles.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15261, __PRETTY_FUNCTION__))
;
15262 int Size = Mask.size();
15263 for (int i = 0; i < Size; ++i)
15264 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
15265 return false;
15266
15267 return true;
15268}
15269
15270/// Handle case where shuffle sources are coming from the same 128-bit lane and
15271/// every lane can be represented as the same repeating mask - allowing us to
15272/// shuffle the sources with the repeating shuffle and then permute the result
15273/// to the destination lanes.
15274static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
15275 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15276 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15277 int NumElts = VT.getVectorNumElements();
15278 int NumLanes = VT.getSizeInBits() / 128;
15279 int NumLaneElts = NumElts / NumLanes;
15280
15281 // On AVX2 we may be able to just shuffle the lowest elements and then
15282 // broadcast the result.
15283 if (Subtarget.hasAVX2()) {
15284 for (unsigned BroadcastSize : {16, 32, 64}) {
15285 if (BroadcastSize <= VT.getScalarSizeInBits())
15286 continue;
15287 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
15288
15289 // Attempt to match a repeating pattern every NumBroadcastElts,
15290 // accounting for UNDEFs but only references the lowest 128-bit
15291 // lane of the inputs.
15292 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
15293 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15294 for (int j = 0; j != NumBroadcastElts; ++j) {
15295 int M = Mask[i + j];
15296 if (M < 0)
15297 continue;
15298 int &R = RepeatMask[j];
15299 if (0 != ((M % NumElts) / NumLaneElts))
15300 return false;
15301 if (0 <= R && R != M)
15302 return false;
15303 R = M;
15304 }
15305 return true;
15306 };
15307
15308 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15309 if (!FindRepeatingBroadcastMask(RepeatMask))
15310 continue;
15311
15312 // Shuffle the (lowest) repeated elements in place for broadcast.
15313 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15314
15315 // Shuffle the actual broadcast.
15316 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15317 for (int i = 0; i != NumElts; i += NumBroadcastElts)
15318 for (int j = 0; j != NumBroadcastElts; ++j)
15319 BroadcastMask[i + j] = j;
15320 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15321 BroadcastMask);
15322 }
15323 }
15324
15325 // Bail if the shuffle mask doesn't cross 128-bit lanes.
15326 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15327 return SDValue();
15328
15329 // Bail if we already have a repeated lane shuffle mask.
15330 SmallVector<int, 8> RepeatedShuffleMask;
15331 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
15332 return SDValue();
15333
15334 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15335 // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
15336 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
15337 int NumSubLanes = NumLanes * SubLaneScale;
15338 int NumSubLaneElts = NumLaneElts / SubLaneScale;
15339
15340 // Check that all the sources are coming from the same lane and see if we can
15341 // form a repeating shuffle mask (local to each sub-lane). At the same time,
15342 // determine the source sub-lane for each destination sub-lane.
15343 int TopSrcSubLane = -1;
15344 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15345 SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
15346 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
15347 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
15348
15349 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15350 // Extract the sub-lane mask, check that it all comes from the same lane
15351 // and normalize the mask entries to come from the first lane.
15352 int SrcLane = -1;
15353 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15354 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15355 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15356 if (M < 0)
15357 continue;
15358 int Lane = (M % NumElts) / NumLaneElts;
15359 if ((0 <= SrcLane) && (SrcLane != Lane))
15360 return SDValue();
15361 SrcLane = Lane;
15362 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15363 SubLaneMask[Elt] = LocalM;
15364 }
15365
15366 // Whole sub-lane is UNDEF.
15367 if (SrcLane < 0)
15368 continue;
15369
15370 // Attempt to match against the candidate repeated sub-lane masks.
15371 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15372 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15373 for (int i = 0; i != NumSubLaneElts; ++i) {
15374 if (M1[i] < 0 || M2[i] < 0)
15375 continue;
15376 if (M1[i] != M2[i])
15377 return false;
15378 }
15379 return true;
15380 };
15381
15382 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15383 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15384 continue;
15385
15386 // Merge the sub-lane mask into the matching repeated sub-lane mask.
15387 for (int i = 0; i != NumSubLaneElts; ++i) {
15388 int M = SubLaneMask[i];
15389 if (M < 0)
15390 continue;
15391 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&(((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] ==
M) && "Unexpected mask element") ? static_cast<void
> (0) : __assert_fail ("(RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) && \"Unexpected mask element\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15392, __PRETTY_FUNCTION__))
15392 "Unexpected mask element")(((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] ==
M) && "Unexpected mask element") ? static_cast<void
> (0) : __assert_fail ("(RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) && \"Unexpected mask element\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15392, __PRETTY_FUNCTION__))
;
15393 RepeatedSubLaneMask[i] = M;
15394 }
15395
15396 // Track the top most source sub-lane - by setting the remaining to UNDEF
15397 // we can greatly simplify shuffle matching.
15398 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15399 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15400 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15401 break;
15402 }
15403
15404 // Bail if we failed to find a matching repeated sub-lane mask.
15405 if (Dst2SrcSubLanes[DstSubLane] < 0)
15406 return SDValue();
15407 }
15408 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&((0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes
&& "Unexpected source lane") ? static_cast<void>
(0) : __assert_fail ("0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes && \"Unexpected source lane\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15409, __PRETTY_FUNCTION__))
15409 "Unexpected source lane")((0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes
&& "Unexpected source lane") ? static_cast<void>
(0) : __assert_fail ("0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes && \"Unexpected source lane\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15409, __PRETTY_FUNCTION__))
;
15410
15411 // Create a repeating shuffle mask for the entire vector.
15412 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15413 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15414 int Lane = SubLane / SubLaneScale;
15415 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15416 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15417 int M = RepeatedSubLaneMask[Elt];
15418 if (M < 0)
15419 continue;
15420 int Idx = (SubLane * NumSubLaneElts) + Elt;
15421 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15422 }
15423 }
15424 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15425
15426 // Shuffle each source sub-lane to its destination.
15427 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15428 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15429 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15430 if (SrcSubLane < 0)
15431 continue;
15432 for (int j = 0; j != NumSubLaneElts; ++j)
15433 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15434 }
15435
15436 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15437 SubLaneMask);
15438}
15439
15440static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15441 bool &ForceV1Zero, bool &ForceV2Zero,
15442 unsigned &ShuffleImm, ArrayRef<int> Mask,
15443 const APInt &Zeroable) {
15444 int NumElts = VT.getVectorNumElements();
15445 assert(VT.getScalarSizeInBits() == 64 &&((VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts
== 4 || NumElts == 8) && "Unexpected data type for VSHUFPD"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts == 4 || NumElts == 8) && \"Unexpected data type for VSHUFPD\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15447, __PRETTY_FUNCTION__))
15446 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&((VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts
== 4 || NumElts == 8) && "Unexpected data type for VSHUFPD"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts == 4 || NumElts == 8) && \"Unexpected data type for VSHUFPD\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15447, __PRETTY_FUNCTION__))
15447 "Unexpected data type for VSHUFPD")((VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts
== 4 || NumElts == 8) && "Unexpected data type for VSHUFPD"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts == 4 || NumElts == 8) && \"Unexpected data type for VSHUFPD\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15447, __PRETTY_FUNCTION__))
;
15448 assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&((isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) && "Illegal shuffle mask"
) ? static_cast<void> (0) : __assert_fail ("isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) && \"Illegal shuffle mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15449, __PRETTY_FUNCTION__))
15449 "Illegal shuffle mask")((isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) && "Illegal shuffle mask"
) ? static_cast<void> (0) : __assert_fail ("isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) && \"Illegal shuffle mask\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15449, __PRETTY_FUNCTION__))
;
15450
15451 bool ZeroLane[2] = { true, true };
15452 for (int i = 0; i < NumElts; ++i)
15453 ZeroLane[i & 1] &= Zeroable[i];
15454
15455 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
15456 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
15457 ShuffleImm = 0;
15458 bool ShufpdMask = true;
15459 bool CommutableMask = true;
15460 for (int i = 0; i < NumElts; ++i) {
15461 if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
15462 continue;
15463 if (Mask[i] < 0)
15464 return false;
15465 int Val = (i & 6) + NumElts * (i & 1);
15466 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15467 if (Mask[i] < Val || Mask[i] > Val + 1)
15468 ShufpdMask = false;
15469 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15470 CommutableMask = false;
15471 ShuffleImm |= (Mask[i] % 2) << i;
15472 }
15473
15474 if (!ShufpdMask && !CommutableMask)
15475 return false;
15476
15477 if (!ShufpdMask && CommutableMask)
15478 std::swap(V1, V2);
15479
15480 ForceV1Zero = ZeroLane[0];
15481 ForceV2Zero = ZeroLane[1];
15482 return true;
15483}
15484
15485static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
15486 SDValue V2, ArrayRef<int> Mask,
15487 const APInt &Zeroable,
15488 const X86Subtarget &Subtarget,
15489 SelectionDAG &DAG) {
15490 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&(((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
"Unexpected data type for VSHUFPD") ? static_cast<void>
(0) : __assert_fail ("(VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) && \"Unexpected data type for VSHUFPD\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15491, __PRETTY_FUNCTION__))
15491 "Unexpected data type for VSHUFPD")(((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
"Unexpected data type for VSHUFPD") ? static_cast<void>
(0) : __assert_fail ("(VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) && \"Unexpected data type for VSHUFPD\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15491, __PRETTY_FUNCTION__))
;
15492
15493 unsigned Immediate = 0;
15494 bool ForceV1Zero = false, ForceV2Zero = false;
15495 if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
15496 Mask, Zeroable))
15497 return SDValue();
15498
15499 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
15500 if (ForceV1Zero)
15501 V1 = getZeroVector(VT, Subtarget, DAG, DL);
15502 if (ForceV2Zero)
15503 V2 = getZeroVector(VT, Subtarget, DAG, DL);
15504
15505 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15506 DAG.getTargetConstant(Immediate, DL, MVT::i8));
15507}
15508
15509// Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
15510// by zeroable elements in the remaining 24 elements. Turn this into two
15511// vmovqb instructions shuffled together.
15512static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
15513 SDValue V1, SDValue V2,
15514 ArrayRef<int> Mask,
15515 const APInt &Zeroable,
15516 SelectionDAG &DAG) {
15517 assert(VT == MVT::v32i8 && "Unexpected type!")((VT == MVT::v32i8 && "Unexpected type!") ? static_cast
<void> (0) : __assert_fail ("VT == MVT::v32i8 && \"Unexpected type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15517, __PRETTY_FUNCTION__))
;
15518
15519 // The first 8 indices should be every 8th element.
15520 if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
15521 return SDValue();
15522
15523 // Remaining elements need to be zeroable.
15524 if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
15525 return SDValue();
15526
15527 V1 = DAG.getBitcast(MVT::v4i64, V1);
15528 V2 = DAG.getBitcast(MVT::v4i64, V2);
15529
15530 V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
15531 V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
15532
15533 // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
15534 // the upper bits of the result using an unpckldq.
15535 SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
15536 { 0, 1, 2, 3, 16, 17, 18, 19,
15537 4, 5, 6, 7, 20, 21, 22, 23 });
15538 // Insert the unpckldq into a zero vector to widen to v32i8.
15539 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
15540 DAG.getConstant(0, DL, MVT::v32i8), Unpack,
15541 DAG.getIntPtrConstant(0, DL));
15542}
15543
15544
15545/// Handle lowering of 4-lane 64-bit floating point shuffles.
15546///
15547/// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15548/// isn't available.
15549static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15550 const APInt &Zeroable, SDValue V1, SDValue V2,
15551 const X86Subtarget &Subtarget,
15552 SelectionDAG &DAG) {
15553 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4f64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15553, __PRETTY_FUNCTION__))
;
15554 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4f64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15554, __PRETTY_FUNCTION__))
;
15555 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15555, __PRETTY_FUNCTION__))
;
15556
15557 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15558 Subtarget, DAG))
15559 return V;
15560
15561 if (V2.isUndef()) {
15562 // Check for being able to broadcast a single element.
15563 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15564 Mask, Subtarget, DAG))
15565 return Broadcast;
15566
15567 // Use low duplicate instructions for masks that match their pattern.
15568 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
15569 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15570
15571 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15572 // Non-half-crossing single input shuffles can be lowered with an
15573 // interleaved permutation.
15574 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15575 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15576 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15577 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
15578 }
15579
15580 // With AVX2 we have direct support for this permutation.
15581 if (Subtarget.hasAVX2())
15582 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15583 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15584
15585 // Try to create an in-lane repeating shuffle mask and then shuffle the
15586 // results into the target lanes.
15587 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15588 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15589 return V;
15590
15591 // Try to permute the lanes and then use a per-lane permute.
15592 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15593 Mask, DAG, Subtarget))
15594 return V;
15595
15596 // Otherwise, fall back.
15597 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
15598 DAG, Subtarget);
15599 }
15600
15601 // Use dedicated unpack instructions for masks that match their pattern.
15602 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15603 return V;
15604
15605 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15606 Zeroable, Subtarget, DAG))
15607 return Blend;
15608
15609 // Check if the blend happens to exactly fit that of SHUFPD.
15610 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
15611 Zeroable, Subtarget, DAG))
15612 return Op;
15613
15614 // If we have one input in place, then we can permute the other input and
15615 // blend the result.
15616 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15617 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15618 Subtarget, DAG);
15619
15620 // Try to create an in-lane repeating shuffle mask and then shuffle the
15621 // results into the target lanes.
15622 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15623 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15624 return V;
15625
15626 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15627 // shuffle. However, if we have AVX2 and either inputs are already in place,
15628 // we will be able to shuffle even across lanes the other input in a single
15629 // instruction so skip this pattern.
15630 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
15631 isShuffleMaskInputInPlace(1, Mask))))
15632 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15633 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15634 return V;
15635
15636 // If we have VLX support, we can use VEXPAND.
15637 if (Subtarget.hasVLX())
15638 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15639 DAG, Subtarget))
15640 return V;
15641
15642 // If we have AVX2 then we always want to lower with a blend because an v4 we
15643 // can fully permute the elements.
15644 if (Subtarget.hasAVX2())
15645 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
15646 Subtarget, DAG);
15647
15648 // Otherwise fall back on generic lowering.
15649 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15650 Subtarget, DAG);
15651}
15652
15653/// Handle lowering of 4-lane 64-bit integer shuffles.
15654///
15655/// This routine is only called when we have AVX2 and thus a reasonable
15656/// instruction set for v4i64 shuffling..
15657static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15658 const APInt &Zeroable, SDValue V1, SDValue V2,
15659 const X86Subtarget &Subtarget,
15660 SelectionDAG &DAG) {
15661 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4i64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15661, __PRETTY_FUNCTION__))
;
15662 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4i64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15662, __PRETTY_FUNCTION__))
;
15663 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15663, __PRETTY_FUNCTION__))
;
15664 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!")((Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower v4i64 with AVX2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15664, __PRETTY_FUNCTION__))
;
15665
15666 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15667 Subtarget, DAG))
15668 return V;
15669
15670 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15671 Zeroable, Subtarget, DAG))
15672 return Blend;
15673
15674 // Check for being able to broadcast a single element.
15675 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15676 Subtarget, DAG))
15677 return Broadcast;
15678
15679 if (V2.isUndef()) {
15680 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15681 // can use lower latency instructions that will operate on both lanes.
15682 SmallVector<int, 2> RepeatedMask;
15683 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15684 SmallVector<int, 4> PSHUFDMask;
15685 scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask);
15686 return DAG.getBitcast(
15687 MVT::v4i64,
15688 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15689 DAG.getBitcast(MVT::v8i32, V1),
15690 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15691 }
15692
15693 // AVX2 provides a direct instruction for permuting a single input across
15694 // lanes.
15695 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15696 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15697 }
15698
15699 // Try to use shift instructions.
15700 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
15701 Zeroable, Subtarget, DAG))
15702 return Shift;
15703
15704 // If we have VLX support, we can use VALIGN or VEXPAND.
15705 if (Subtarget.hasVLX()) {
15706 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v4i64, V1, V2, Mask,
15707 Subtarget, DAG))
15708 return Rotate;
15709
15710 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15711 DAG, Subtarget))
15712 return V;
15713 }
15714
15715 // Try to use PALIGNR.
15716 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
15717 Subtarget, DAG))
15718 return Rotate;
15719
15720 // Use dedicated unpack instructions for masks that match their pattern.
15721 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
15722 return V;
15723
15724 // If we have one input in place, then we can permute the other input and
15725 // blend the result.
15726 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
15727 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
15728 Subtarget, DAG);
15729
15730 // Try to create an in-lane repeating shuffle mask and then shuffle the
15731 // results into the target lanes.
15732 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15733 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15734 return V;
15735
15736 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15737 // shuffle. However, if we have AVX2 and either inputs are already in place,
15738 // we will be able to shuffle even across lanes the other input in a single
15739 // instruction so skip this pattern.
15740 if (!isShuffleMaskInputInPlace(0, Mask) &&
15741 !isShuffleMaskInputInPlace(1, Mask))
15742 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15743 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15744 return Result;
15745
15746 // Otherwise fall back on generic blend lowering.
15747 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
15748 Subtarget, DAG);
15749}
15750
15751/// Handle lowering of 8-lane 32-bit floating point shuffles.
15752///
15753/// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
15754/// isn't available.
15755static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15756 const APInt &Zeroable, SDValue V1, SDValue V2,
15757 const X86Subtarget &Subtarget,
15758 SelectionDAG &DAG) {
15759 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8f32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15759, __PRETTY_FUNCTION__))
;
15760 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8f32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15760, __PRETTY_FUNCTION__))
;
15761 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15761, __PRETTY_FUNCTION__))
;
15762
15763 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
15764 Zeroable, Subtarget, DAG))
15765 return Blend;
15766
15767 // Check for being able to broadcast a single element.
15768 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
15769 Subtarget, DAG))
15770 return Broadcast;
15771
15772 // If the shuffle mask is repeated in each 128-bit lane, we have many more
15773 // options to efficiently lower the shuffle.
15774 SmallVector<int, 4> RepeatedMask;
15775 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
15776 assert(RepeatedMask.size() == 4 &&((RepeatedMask.size() == 4 && "Repeated masks must be half the mask width!"
) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Repeated masks must be half the mask width!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15777, __PRETTY_FUNCTION__))
15777 "Repeated masks must be half the mask width!")((RepeatedMask.size() == 4 && "Repeated masks must be half the mask width!"
) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Repeated masks must be half the mask width!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15777, __PRETTY_FUNCTION__))
;
15778
15779 // Use even/odd duplicate instructions for masks that match their pattern.
15780 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
15781 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
15782 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
15783 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
15784
15785 if (V2.isUndef())
15786 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
15787 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15788
15789 // Use dedicated unpack instructions for masks that match their pattern.
15790 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
15791 return V;
15792
15793 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
15794 // have already handled any direct blends.
15795 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
15796 }
15797
15798 // Try to create an in-lane repeating shuffle mask and then shuffle the
15799 // results into the target lanes.
15800 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15801 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15802 return V;
15803
15804 // If we have a single input shuffle with different shuffle patterns in the
15805 // two 128-bit lanes use the variable mask to VPERMILPS.
15806 if (V2.isUndef()) {
15807 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15808 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
15809 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
15810
15811 if (Subtarget.hasAVX2())
15812 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
15813
15814 // Otherwise, fall back.
15815 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
15816 DAG, Subtarget);
15817 }
15818
15819 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15820 // shuffle.
15821 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15822 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15823 return Result;
15824
15825 // If we have VLX support, we can use VEXPAND.
15826 if (Subtarget.hasVLX())
15827 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
15828 DAG, Subtarget))
15829 return V;
15830
15831 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
15832 // since after split we get a more efficient code using vpunpcklwd and
15833 // vpunpckhwd instrs than vblend.
15834 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
15835 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
15836 Subtarget, DAG))
15837 return V;
15838
15839 // If we have AVX2 then we always want to lower with a blend because at v8 we
15840 // can fully permute the elements.
15841 if (Subtarget.hasAVX2())
15842 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
15843 Subtarget, DAG);
15844
15845 // Otherwise fall back on generic lowering.
15846 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
15847 Subtarget, DAG);
15848}
15849
15850/// Handle lowering of 8-lane 32-bit integer shuffles.
15851///
15852/// This routine is only called when we have AVX2 and thus a reasonable
15853/// instruction set for v8i32 shuffling..
15854static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15855 const APInt &Zeroable, SDValue V1, SDValue V2,
15856 const X86Subtarget &Subtarget,
15857 SelectionDAG &DAG) {
15858 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8i32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15858, __PRETTY_FUNCTION__))
;
15859 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8i32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15859, __PRETTY_FUNCTION__))
;
15860 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15860, __PRETTY_FUNCTION__))
;
15861 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!")((Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower v8i32 with AVX2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15861, __PRETTY_FUNCTION__))
;
15862
15863 // Whenever we can lower this as a zext, that instruction is strictly faster
15864 // than any alternative. It also allows us to fold memory operands into the
15865 // shuffle in many cases.
15866 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
15867 Zeroable, Subtarget, DAG))
15868 return ZExt;
15869
15870 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
15871 // since after split we get a more efficient code than vblend by using
15872 // vpunpcklwd and vpunpckhwd instrs.
15873 if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
15874 !Subtarget.hasAVX512())
15875 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
15876 Subtarget, DAG))
15877 return V;
15878
15879 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
15880 Zeroable, Subtarget, DAG))
15881 return Blend;
15882
15883 // Check for being able to broadcast a single element.
15884 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
15885 Subtarget, DAG))
15886 return Broadcast;
15887
15888 // If the shuffle mask is repeated in each 128-bit lane we can use more
15889 // efficient instructions that mirror the shuffles across the two 128-bit
15890 // lanes.
15891 SmallVector<int, 4> RepeatedMask;
15892 bool Is128BitLaneRepeatedShuffle =
15893 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
15894 if (Is128BitLaneRepeatedShuffle) {
15895 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!")((RepeatedMask.size() == 4 && "Unexpected repeated mask size!"
) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Unexpected repeated mask size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15895, __PRETTY_FUNCTION__))
;
15896 if (V2.isUndef())
15897 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
15898 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15899
15900 // Use dedicated unpack instructions for masks that match their pattern.
15901 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
15902 return V;
15903 }
15904
15905 // Try to use shift instructions.
15906 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
15907 Zeroable, Subtarget, DAG))
15908 return Shift;
15909
15910 // If we have VLX support, we can use VALIGN or EXPAND.
15911 if (Subtarget.hasVLX()) {
15912 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i32, V1, V2, Mask,
15913 Subtarget, DAG))
15914 return Rotate;
15915
15916 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
15917 DAG, Subtarget))
15918 return V;
15919 }
15920
15921 // Try to use byte rotation instructions.
15922 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
15923 Subtarget, DAG))
15924 return Rotate;
15925
15926 // Try to create an in-lane repeating shuffle mask and then shuffle the
15927 // results into the target lanes.
15928 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15929 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
15930 return V;
15931
15932 // If the shuffle patterns aren't repeated but it is a single input, directly
15933 // generate a cross-lane VPERMD instruction.
15934 if (V2.isUndef()) {
15935 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15936 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
15937 }
15938
15939 // Assume that a single SHUFPS is faster than an alternative sequence of
15940 // multiple instructions (even if the CPU has a domain penalty).
15941 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
15942 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
15943 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
15944 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
15945 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
15946 CastV1, CastV2, DAG);
15947 return DAG.getBitcast(MVT::v8i32, ShufPS);
15948 }
15949
15950 // Try to simplify this by merging 128-bit lanes to enable a lane-based
15951 // shuffle.
15952 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15953 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
15954 return Result;
15955
15956 // Otherwise fall back on generic blend lowering.
15957 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
15958 Subtarget, DAG);
15959}
15960
15961/// Handle lowering of 16-lane 16-bit integer shuffles.
15962///
15963/// This routine is only called when we have AVX2 and thus a reasonable
15964/// instruction set for v16i16 shuffling..
15965static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15966 const APInt &Zeroable, SDValue V1, SDValue V2,
15967 const X86Subtarget &Subtarget,
15968 SelectionDAG &DAG) {
15969 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v16i16 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15969, __PRETTY_FUNCTION__))
;
15970 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v16i16 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15970, __PRETTY_FUNCTION__))
;
15971 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!")((Mask.size() == 16 && "Unexpected mask size for v16 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 16 && \"Unexpected mask size for v16 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15971, __PRETTY_FUNCTION__))
;
15972 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!")((Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower v16i16 with AVX2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 15972, __PRETTY_FUNCTION__))
;
15973
15974 // Whenever we can lower this as a zext, that instruction is strictly faster
15975 // than any alternative. It also allows us to fold memory operands into the
15976 // shuffle in many cases.
15977 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
15978 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
15979 return ZExt;
15980
15981 // Check for being able to broadcast a single element.
15982 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
15983 Subtarget, DAG))
15984 return Broadcast;
15985
15986 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
15987 Zeroable, Subtarget, DAG))
15988 return Blend;
15989
15990 // Use dedicated unpack instructions for masks that match their pattern.
15991 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
15992 return V;
15993
15994 // Use dedicated pack instructions for masks that match their pattern.
15995 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
15996 Subtarget))
15997 return V;
15998
15999 // Try to use shift instructions.
16000 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
16001 Zeroable, Subtarget, DAG))
16002 return Shift;
16003
16004 // Try to use byte rotation instructions.
16005 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16006 Subtarget, DAG))
16007 return Rotate;
16008
16009 // Try to create an in-lane repeating shuffle mask and then shuffle the
16010 // results into the target lanes.
16011 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16012 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16013 return V;
16014
16015 if (V2.isUndef()) {
16016 // There are no generalized cross-lane shuffle operations available on i16
16017 // element types.
16018 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16019 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16020 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16021 return V;
16022
16023 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16024 DAG, Subtarget);
16025 }
16026
16027 SmallVector<int, 8> RepeatedMask;
16028 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16029 // As this is a single-input shuffle, the repeated mask should be
16030 // a strictly valid v8i16 mask that we can pass through to the v8i16
16031 // lowering to handle even the v16 case.
16032 return lowerV8I16GeneralSingleInputShuffle(
16033 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16034 }
16035 }
16036
16037 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16038 Zeroable, Subtarget, DAG))
16039 return PSHUFB;
16040
16041 // AVX512BWVL can lower to VPERMW.
16042 if (Subtarget.hasBWI() && Subtarget.hasVLX())
16043 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
16044
16045 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16046 // shuffle.
16047 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16048 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16049 return Result;
16050
16051 // Try to permute the lanes and then use a per-lane permute.
16052 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16053 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16054 return V;
16055
16056 // Otherwise fall back on generic lowering.
16057 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16058 Subtarget, DAG);
16059}
16060
16061/// Handle lowering of 32-lane 8-bit integer shuffles.
16062///
16063/// This routine is only called when we have AVX2 and thus a reasonable
16064/// instruction set for v32i8 shuffling..
16065static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16066 const APInt &Zeroable, SDValue V1, SDValue V2,
16067 const X86Subtarget &Subtarget,
16068 SelectionDAG &DAG) {
16069 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v32i8 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16069, __PRETTY_FUNCTION__))
;
16070 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v32i8 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16070, __PRETTY_FUNCTION__))
;
16071 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!")((Mask.size() == 32 && "Unexpected mask size for v32 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 32 && \"Unexpected mask size for v32 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16071, __PRETTY_FUNCTION__))
;
16072 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!")((Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower v32i8 with AVX2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16072, __PRETTY_FUNCTION__))
;
16073
16074 // Whenever we can lower this as a zext, that instruction is strictly faster
16075 // than any alternative. It also allows us to fold memory operands into the
16076 // shuffle in many cases.
16077 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16078 Zeroable, Subtarget, DAG))
16079 return ZExt;
16080
16081 // Check for being able to broadcast a single element.
16082 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16083 Subtarget, DAG))
16084 return Broadcast;
16085
16086 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16087 Zeroable, Subtarget, DAG))
16088 return Blend;
16089
16090 // Use dedicated unpack instructions for masks that match their pattern.
16091 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16092 return V;
16093
16094 // Use dedicated pack instructions for masks that match their pattern.
16095 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16096 Subtarget))
16097 return V;
16098
16099 // Try to use shift instructions.
16100 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
16101 Zeroable, Subtarget, DAG))
16102 return Shift;
16103
16104 // Try to use byte rotation instructions.
16105 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16106 Subtarget, DAG))
16107 return Rotate;
16108
16109 // Try to create an in-lane repeating shuffle mask and then shuffle the
16110 // results into the target lanes.
16111 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16112 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16113 return V;
16114
16115 // There are no generalized cross-lane shuffle operations available on i8
16116 // element types.
16117 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16118 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16119 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16120 return V;
16121
16122 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16123 DAG, Subtarget);
16124 }
16125
16126 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16127 Zeroable, Subtarget, DAG))
16128 return PSHUFB;
16129
16130 // AVX512VBMIVL can lower to VPERMB.
16131 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
16132 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
16133
16134 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16135 // shuffle.
16136 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16137 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16138 return Result;
16139
16140 // Try to permute the lanes and then use a per-lane permute.
16141 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16142 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16143 return V;
16144
16145 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16146 // by zeroable elements in the remaining 24 elements. Turn this into two
16147 // vmovqb instructions shuffled together.
16148 if (Subtarget.hasVLX())
16149 if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16150 Mask, Zeroable, DAG))
16151 return V;
16152
16153 // Otherwise fall back on generic lowering.
16154 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16155 Subtarget, DAG);
16156}
16157
16158/// High-level routine to lower various 256-bit x86 vector shuffles.
16159///
16160/// This routine either breaks down the specific type of a 256-bit x86 vector
16161/// shuffle or splits it into two 128-bit shuffles and fuses the results back
16162/// together based on the available instructions.
16163static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16164 SDValue V1, SDValue V2, const APInt &Zeroable,
16165 const X86Subtarget &Subtarget,
16166 SelectionDAG &DAG) {
16167 // If we have a single input to the zero element, insert that into V1 if we
16168 // can do so cheaply.
16169 int NumElts = VT.getVectorNumElements();
16170 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16171
16172 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16173 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16174 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16175 return Insertion;
16176
16177 // Handle special cases where the lower or upper half is UNDEF.
16178 if (SDValue V =
16179 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16180 return V;
16181
16182 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16183 // can check for those subtargets here and avoid much of the subtarget
16184 // querying in the per-vector-type lowering routines. With AVX1 we have
16185 // essentially *zero* ability to manipulate a 256-bit vector with integer
16186 // types. Since we'll use floating point types there eventually, just
16187 // immediately cast everything to a float and operate entirely in that domain.
16188 if (VT.isInteger() && !Subtarget.hasAVX2()) {
16189 int ElementBits = VT.getScalarSizeInBits();
16190 if (ElementBits < 32) {
16191 // No floating point type available, if we can't use the bit operations
16192 // for masking/blending then decompose into 128-bit vectors.
16193 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16194 Subtarget, DAG))
16195 return V;
16196 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16197 return V;
16198 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16199 }
16200
16201 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16202 VT.getVectorNumElements());
16203 V1 = DAG.getBitcast(FpVT, V1);
16204 V2 = DAG.getBitcast(FpVT, V2);
16205 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16206 }
16207
16208 switch (VT.SimpleTy) {
16209 case MVT::v4f64:
16210 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16211 case MVT::v4i64:
16212 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16213 case MVT::v8f32:
16214 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16215 case MVT::v8i32:
16216 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16217 case MVT::v16i16:
16218 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16219 case MVT::v32i8:
16220 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16221
16222 default:
16223 llvm_unreachable("Not a valid 256-bit x86 vector type!")::llvm::llvm_unreachable_internal("Not a valid 256-bit x86 vector type!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16223)
;
16224 }
16225}
16226
16227/// Try to lower a vector shuffle as a 128-bit shuffles.
16228static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
16229 const APInt &Zeroable, SDValue V1, SDValue V2,
16230 const X86Subtarget &Subtarget,
16231 SelectionDAG &DAG) {
16232 assert(VT.getScalarSizeInBits() == 64 &&((VT.getScalarSizeInBits() == 64 && "Unexpected element type size for 128bit shuffle."
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && \"Unexpected element type size for 128bit shuffle.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16233, __PRETTY_FUNCTION__))
16233 "Unexpected element type size for 128bit shuffle.")((VT.getScalarSizeInBits() == 64 && "Unexpected element type size for 128bit shuffle."
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && \"Unexpected element type size for 128bit shuffle.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16233, __PRETTY_FUNCTION__))
;
16234
16235 // To handle 256 bit vector requires VLX and most probably
16236 // function lowerV2X128VectorShuffle() is better solution.
16237 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.")((VT.is512BitVector() && "Unexpected vector size for 512bit shuffle."
) ? static_cast<void> (0) : __assert_fail ("VT.is512BitVector() && \"Unexpected vector size for 512bit shuffle.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16237, __PRETTY_FUNCTION__))
;
16238
16239 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
16240 SmallVector<int, 4> WidenedMask;
16241 if (!canWidenShuffleElements(Mask, WidenedMask))
16242 return SDValue();
16243
16244 // Try to use an insert into a zero vector.
16245 if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
16246 (WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
16247 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
16248 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
16249 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
16250 DAG.getIntPtrConstant(0, DL));
16251 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16252 getZeroVector(VT, Subtarget, DAG, DL), LoV,
16253 DAG.getIntPtrConstant(0, DL));
16254 }
16255
16256 // Check for patterns which can be matched with a single insert of a 256-bit
16257 // subvector.
16258 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask,
16259 {0, 1, 2, 3, 0, 1, 2, 3});
16260 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask,
16261 {0, 1, 2, 3, 8, 9, 10, 11})) {
16262 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
16263 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
16264 OnlyUsesV1 ? V1 : V2,
16265 DAG.getIntPtrConstant(0, DL));
16266 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
16267 DAG.getIntPtrConstant(4, DL));
16268 }
16269
16270 assert(WidenedMask.size() == 4)((WidenedMask.size() == 4) ? static_cast<void> (0) : __assert_fail
("WidenedMask.size() == 4", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16270, __PRETTY_FUNCTION__))
;
16271
16272 // See if this is an insertion of the lower 128-bits of V2 into V1.
16273 bool IsInsert = true;
16274 int V2Index = -1;
16275 for (int i = 0; i < 4; ++i) {
16276 assert(WidenedMask[i] >= -1)((WidenedMask[i] >= -1) ? static_cast<void> (0) : __assert_fail
("WidenedMask[i] >= -1", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16276, __PRETTY_FUNCTION__))
;
16277 if (WidenedMask[i] < 0)
16278 continue;
16279
16280 // Make sure all V1 subvectors are in place.
16281 if (WidenedMask[i] < 4) {
16282 if (WidenedMask[i] != i) {
16283 IsInsert = false;
16284 break;
16285 }
16286 } else {
16287 // Make sure we only have a single V2 index and its the lowest 128-bits.
16288 if (V2Index >= 0 || WidenedMask[i] != 4) {
16289 IsInsert = false;
16290 break;
16291 }
16292 V2Index = i;
16293 }
16294 }
16295 if (IsInsert && V2Index >= 0) {
16296 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16297 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
16298 DAG.getIntPtrConstant(0, DL));
16299 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
16300 }
16301
16302 // Try to lower to vshuf64x2/vshuf32x4.
16303 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
16304 unsigned PermMask = 0;
16305 // Insure elements came from the same Op.
16306 for (int i = 0; i < 4; ++i) {
16307 assert(WidenedMask[i] >= -1)((WidenedMask[i] >= -1) ? static_cast<void> (0) : __assert_fail
("WidenedMask[i] >= -1", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16307, __PRETTY_FUNCTION__))
;
16308 if (WidenedMask[i] < 0)
16309 continue;
16310
16311 SDValue Op = WidenedMask[i] >= 4 ? V2 : V1;
16312 unsigned OpIndex = i / 2;
16313 if (Ops[OpIndex].isUndef())
16314 Ops[OpIndex] = Op;
16315 else if (Ops[OpIndex] != Op)
16316 return SDValue();
16317
16318 // Convert the 128-bit shuffle mask selection values into 128-bit selection
16319 // bits defined by a vshuf64x2 instruction's immediate control byte.
16320 PermMask |= (WidenedMask[i] % 4) << (i * 2);
16321 }
16322
16323 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
16324 DAG.getTargetConstant(PermMask, DL, MVT::i8));
16325}
16326
16327/// Handle lowering of 8-lane 64-bit floating point shuffles.
16328static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16329 const APInt &Zeroable, SDValue V1, SDValue V2,
16330 const X86Subtarget &Subtarget,
16331 SelectionDAG &DAG) {
16332 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8f64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16332, __PRETTY_FUNCTION__))
;
16333 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8f64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16333, __PRETTY_FUNCTION__))
;
16334 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16334, __PRETTY_FUNCTION__))
;
16335
16336 if (V2.isUndef()) {
16337 // Use low duplicate instructions for masks that match their pattern.
16338 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
16339 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
16340
16341 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
16342 // Non-half-crossing single input shuffles can be lowered with an
16343 // interleaved permutation.
16344 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16345 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
16346 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
16347 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
16348 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
16349 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16350 }
16351
16352 SmallVector<int, 4> RepeatedMask;
16353 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
16354 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
16355 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16356 }
16357
16358 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16359 V2, Subtarget, DAG))
16360 return Shuf128;
16361
16362 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16363 return Unpck;
16364
16365 // Check if the blend happens to exactly fit that of SHUFPD.
16366 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
16367 Zeroable, Subtarget, DAG))
16368 return Op;
16369
16370 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16371 DAG, Subtarget))
16372 return V;
16373
16374 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16375 Zeroable, Subtarget, DAG))
16376 return Blend;
16377
16378 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
16379}
16380
16381/// Handle lowering of 16-lane 32-bit floating point shuffles.
16382static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16383 const APInt &Zeroable, SDValue V1, SDValue V2,
16384 const X86Subtarget &Subtarget,
16385 SelectionDAG &DAG) {
16386 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v16f32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16386, __PRETTY_FUNCTION__))
;
16387 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v16f32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16387, __PRETTY_FUNCTION__))
;
16388 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!")((Mask.size() == 16 && "Unexpected mask size for v16 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 16 && \"Unexpected mask size for v16 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16388, __PRETTY_FUNCTION__))
;
16389
16390 // If the shuffle mask is repeated in each 128-bit lane, we have many more
16391 // options to efficiently lower the shuffle.
16392 SmallVector<int, 4> RepeatedMask;
16393 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16394 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!")((RepeatedMask.size() == 4 && "Unexpected repeated mask size!"
) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Unexpected repeated mask size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16394, __PRETTY_FUNCTION__))
;
16395
16396 // Use even/odd duplicate instructions for masks that match their pattern.
16397 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16398 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16399 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16400 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16401
16402 if (V2.isUndef())
16403 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16404 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16405
16406 // Use dedicated unpack instructions for masks that match their pattern.
16407 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16408 return V;
16409
16410 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16411 Zeroable, Subtarget, DAG))
16412 return Blend;
16413
16414 // Otherwise, fall back to a SHUFPS sequence.
16415 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16416 }
16417
16418 // If we have a single input shuffle with different shuffle patterns in the
16419 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16420 if (V2.isUndef() &&
16421 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16422 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16423 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16424 }
16425
16426 // If we have AVX512F support, we can use VEXPAND.
16427 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16428 V1, V2, DAG, Subtarget))
16429 return V;
16430
16431 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
16432}
16433
16434/// Handle lowering of 8-lane 64-bit integer shuffles.
16435static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16436 const APInt &Zeroable, SDValue V1, SDValue V2,
16437 const X86Subtarget &Subtarget,
16438 SelectionDAG &DAG) {
16439 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8i64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16439, __PRETTY_FUNCTION__))
;
16440 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8i64 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16440, __PRETTY_FUNCTION__))
;
16441 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16441, __PRETTY_FUNCTION__))
;
16442
16443 if (V2.isUndef()) {
16444 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16445 // can use lower latency instructions that will operate on all four
16446 // 128-bit lanes.
16447 SmallVector<int, 2> Repeated128Mask;
16448 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16449 SmallVector<int, 4> PSHUFDMask;
16450 scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask);
16451 return DAG.getBitcast(
16452 MVT::v8i64,
16453 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16454 DAG.getBitcast(MVT::v16i32, V1),
16455 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16456 }
16457
16458 SmallVector<int, 4> Repeated256Mask;
16459 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16460 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16461 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16462 }
16463
16464 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16465 V2, Subtarget, DAG))
16466 return Shuf128;
16467
16468 // Try to use shift instructions.
16469 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
16470 Zeroable, Subtarget, DAG))
16471 return Shift;
16472
16473 // Try to use VALIGN.
16474 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v8i64, V1, V2, Mask,
16475 Subtarget, DAG))
16476 return Rotate;
16477
16478 // Try to use PALIGNR.
16479 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16480 Subtarget, DAG))
16481 return Rotate;
16482
16483 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16484 return Unpck;
16485 // If we have AVX512F support, we can use VEXPAND.
16486 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16487 DAG, Subtarget))
16488 return V;
16489
16490 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16491 Zeroable, Subtarget, DAG))
16492 return Blend;
16493
16494 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
16495}
16496
16497/// Handle lowering of 16-lane 32-bit integer shuffles.
16498static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16499 const APInt &Zeroable, SDValue V1, SDValue V2,
16500 const X86Subtarget &Subtarget,
16501 SelectionDAG &DAG) {
16502 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v16i32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16502, __PRETTY_FUNCTION__))
;
16503 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v16i32 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16503, __PRETTY_FUNCTION__))
;
16504 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!")((Mask.size() == 16 && "Unexpected mask size for v16 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 16 && \"Unexpected mask size for v16 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16504, __PRETTY_FUNCTION__))
;
16505
16506 // Whenever we can lower this as a zext, that instruction is strictly faster
16507 // than any alternative. It also allows us to fold memory operands into the
16508 // shuffle in many cases.
16509 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16510 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16511 return ZExt;
16512
16513 // If the shuffle mask is repeated in each 128-bit lane we can use more
16514 // efficient instructions that mirror the shuffles across the four 128-bit
16515 // lanes.
16516 SmallVector<int, 4> RepeatedMask;
16517 bool Is128BitLaneRepeatedShuffle =
16518 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16519 if (Is128BitLaneRepeatedShuffle) {
16520 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!")((RepeatedMask.size() == 4 && "Unexpected repeated mask size!"
) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Unexpected repeated mask size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16520, __PRETTY_FUNCTION__))
;
16521 if (V2.isUndef())
16522 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16523 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16524
16525 // Use dedicated unpack instructions for masks that match their pattern.
16526 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16527 return V;
16528 }
16529
16530 // Try to use shift instructions.
16531 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
16532 Zeroable, Subtarget, DAG))
16533 return Shift;
16534
16535 // Try to use VALIGN.
16536 if (SDValue Rotate = lowerShuffleAsRotate(DL, MVT::v16i32, V1, V2, Mask,
16537 Subtarget, DAG))
16538 return Rotate;
16539
16540 // Try to use byte rotation instructions.
16541 if (Subtarget.hasBWI())
16542 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16543 Subtarget, DAG))
16544 return Rotate;
16545
16546 // Assume that a single SHUFPS is faster than using a permv shuffle.
16547 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16548 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16549 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16550 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16551 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16552 CastV1, CastV2, DAG);
16553 return DAG.getBitcast(MVT::v16i32, ShufPS);
16554 }
16555 // If we have AVX512F support, we can use VEXPAND.
16556 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16557 DAG, Subtarget))
16558 return V;
16559
16560 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16561 Zeroable, Subtarget, DAG))
16562 return Blend;
16563 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
16564}
16565
16566/// Handle lowering of 32-lane 16-bit integer shuffles.
16567static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16568 const APInt &Zeroable, SDValue V1, SDValue V2,
16569 const X86Subtarget &Subtarget,
16570 SelectionDAG &DAG) {
16571 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v32i16 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16571, __PRETTY_FUNCTION__))
;
16572 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v32i16 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16572, __PRETTY_FUNCTION__))
;
16573 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!")((Mask.size() == 32 && "Unexpected mask size for v32 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 32 && \"Unexpected mask size for v32 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16573, __PRETTY_FUNCTION__))
;
16574 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!")((Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"We can only lower v32i16 with AVX-512-BWI!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16574, __PRETTY_FUNCTION__))
;
16575
16576 // Whenever we can lower this as a zext, that instruction is strictly faster
16577 // than any alternative. It also allows us to fold memory operands into the
16578 // shuffle in many cases.
16579 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16580 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16581 return ZExt;
16582
16583 // Use dedicated unpack instructions for masks that match their pattern.
16584 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16585 return V;
16586
16587 // Try to use shift instructions.
16588 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
16589 Zeroable, Subtarget, DAG))
16590 return Shift;
16591
16592 // Try to use byte rotation instructions.
16593 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16594 Subtarget, DAG))
16595 return Rotate;
16596
16597 if (V2.isUndef()) {
16598 SmallVector<int, 8> RepeatedMask;
16599 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16600 // As this is a single-input shuffle, the repeated mask should be
16601 // a strictly valid v8i16 mask that we can pass through to the v8i16
16602 // lowering to handle even the v32 case.
16603 return lowerV8I16GeneralSingleInputShuffle(
16604 DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG);
16605 }
16606 }
16607
16608 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16609 Zeroable, Subtarget, DAG))
16610 return Blend;
16611
16612 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16613 Zeroable, Subtarget, DAG))
16614 return PSHUFB;
16615
16616 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
16617}
16618
16619/// Handle lowering of 64-lane 8-bit integer shuffles.
16620static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16621 const APInt &Zeroable, SDValue V1, SDValue V2,
16622 const X86Subtarget &Subtarget,
16623 SelectionDAG &DAG) {
16624 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v64i8 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16624, __PRETTY_FUNCTION__))
;
16625 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!"
) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v64i8 && \"Bad operand type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16625, __PRETTY_FUNCTION__))
;
16626 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!")((Mask.size() == 64 && "Unexpected mask size for v64 shuffle!"
) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 64 && \"Unexpected mask size for v64 shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16626, __PRETTY_FUNCTION__))
;
16627 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!")((Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"We can only lower v64i8 with AVX-512-BWI!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16627, __PRETTY_FUNCTION__))
;
16628
16629 // Whenever we can lower this as a zext, that instruction is strictly faster
16630 // than any alternative. It also allows us to fold memory operands into the
16631 // shuffle in many cases.
16632 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16633 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16634 return ZExt;
16635
16636 // Use dedicated unpack instructions for masks that match their pattern.
16637 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16638 return V;
16639
16640 // Use dedicated pack instructions for masks that match their pattern.
16641 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16642 Subtarget))
16643 return V;
16644
16645 // Try to use shift instructions.
16646 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
16647 Zeroable, Subtarget, DAG))
16648 return Shift;
16649
16650 // Try to use byte rotation instructions.
16651 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
16652 Subtarget, DAG))
16653 return Rotate;
16654
16655 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
16656 Zeroable, Subtarget, DAG))
16657 return PSHUFB;
16658
16659 // VBMI can use VPERMV/VPERMV3 byte shuffles.
16660 if (Subtarget.hasVBMI())
16661 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
16662
16663 // Try to create an in-lane repeating shuffle mask and then shuffle the
16664 // results into the target lanes.
16665 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16666 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16667 return V;
16668
16669 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
16670 Zeroable, Subtarget, DAG))
16671 return Blend;
16672
16673 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16674 // shuffle.
16675 if (!V2.isUndef())
16676 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16677 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
16678 return Result;
16679
16680 // FIXME: Implement direct support for this type!
16681 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
16682}
16683
16684/// High-level routine to lower various 512-bit x86 vector shuffles.
16685///
16686/// This routine either breaks down the specific type of a 512-bit x86 vector
16687/// shuffle or splits it into two 256-bit shuffles and fuses the results back
16688/// together based on the available instructions.
16689static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16690 MVT VT, SDValue V1, SDValue V2,
16691 const APInt &Zeroable,
16692 const X86Subtarget &Subtarget,
16693 SelectionDAG &DAG) {
16694 assert(Subtarget.hasAVX512() &&((Subtarget.hasAVX512() && "Cannot lower 512-bit vectors w/ basic ISA!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"Cannot lower 512-bit vectors w/ basic ISA!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16695, __PRETTY_FUNCTION__))
16695 "Cannot lower 512-bit vectors w/ basic ISA!")((Subtarget.hasAVX512() && "Cannot lower 512-bit vectors w/ basic ISA!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"Cannot lower 512-bit vectors w/ basic ISA!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16695, __PRETTY_FUNCTION__))
;
16696
16697 // If we have a single input to the zero element, insert that into V1 if we
16698 // can do so cheaply.
16699 int NumElts = Mask.size();
16700 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16701
16702 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16703 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16704 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16705 return Insertion;
16706
16707 // Handle special cases where the lower or upper half is UNDEF.
16708 if (SDValue V =
16709 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16710 return V;
16711
16712 // Check for being able to broadcast a single element.
16713 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
16714 Subtarget, DAG))
16715 return Broadcast;
16716
16717 // Dispatch to each element type for lowering. If we don't have support for
16718 // specific element type shuffles at 512 bits, immediately split them and
16719 // lower them. Each lowering routine of a given type is allowed to assume that
16720 // the requisite ISA extensions for that element type are available.
16721 switch (VT.SimpleTy) {
16722 case MVT::v8f64:
16723 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16724 case MVT::v16f32:
16725 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16726 case MVT::v8i64:
16727 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16728 case MVT::v16i32:
16729 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16730 case MVT::v32i16:
16731 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16732 case MVT::v64i8:
16733 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16734
16735 default:
16736 llvm_unreachable("Not a valid 512-bit x86 vector type!")::llvm::llvm_unreachable_internal("Not a valid 512-bit x86 vector type!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16736)
;
16737 }
16738}
16739
16740static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
16741 MVT VT, SDValue V1, SDValue V2,
16742 const X86Subtarget &Subtarget,
16743 SelectionDAG &DAG) {
16744 // Shuffle should be unary.
16745 if (!V2.isUndef())
16746 return SDValue();
16747
16748 int ShiftAmt = -1;
16749 int NumElts = Mask.size();
16750 for (int i = 0; i != NumElts; ++i) {
16751 int M = Mask[i];
16752 assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&(((M == SM_SentinelUndef || (0 <= M && M < NumElts
)) && "Unexpected mask index.") ? static_cast<void
> (0) : __assert_fail ("(M == SM_SentinelUndef || (0 <= M && M < NumElts)) && \"Unexpected mask index.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16753, __PRETTY_FUNCTION__))
16753 "Unexpected mask index.")(((M == SM_SentinelUndef || (0 <= M && M < NumElts
)) && "Unexpected mask index.") ? static_cast<void
> (0) : __assert_fail ("(M == SM_SentinelUndef || (0 <= M && M < NumElts)) && \"Unexpected mask index.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16753, __PRETTY_FUNCTION__))
;
16754 if (M < 0)
16755 continue;
16756
16757 // The first non-undef element determines our shift amount.
16758 if (ShiftAmt < 0) {
16759 ShiftAmt = M - i;
16760 // Need to be shifting right.
16761 if (ShiftAmt <= 0)
16762 return SDValue();
16763 }
16764 // All non-undef elements must shift by the same amount.
16765 if (ShiftAmt != M - i)
16766 return SDValue();
16767 }
16768 assert(ShiftAmt >= 0 && "All undef?")((ShiftAmt >= 0 && "All undef?") ? static_cast<
void> (0) : __assert_fail ("ShiftAmt >= 0 && \"All undef?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16768, __PRETTY_FUNCTION__))
;
16769
16770 // Great we found a shift right.
16771 MVT WideVT = VT;
16772 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
16773 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
16774 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
16775 DAG.getUNDEF(WideVT), V1,
16776 DAG.getIntPtrConstant(0, DL));
16777 Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
16778 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
16779 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
16780 DAG.getIntPtrConstant(0, DL));
16781}
16782
16783// Determine if this shuffle can be implemented with a KSHIFT instruction.
16784// Returns the shift amount if possible or -1 if not. This is a simplified
16785// version of matchShuffleAsShift.
16786static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
16787 int MaskOffset, const APInt &Zeroable) {
16788 int Size = Mask.size();
16789
16790 auto CheckZeros = [&](int Shift, bool Left) {
16791 for (int j = 0; j < Shift; ++j)
16792 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
16793 return false;
16794
16795 return true;
16796 };
16797
16798 auto MatchShift = [&](int Shift, bool Left) {
16799 unsigned Pos = Left ? Shift : 0;
16800 unsigned Low = Left ? 0 : Shift;
16801 unsigned Len = Size - Shift;
16802 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
16803 };
16804
16805 for (int Shift = 1; Shift != Size; ++Shift)
16806 for (bool Left : {true, false})
16807 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
16808 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
16809 return Shift;
16810 }
16811
16812 return -1;
16813}
16814
16815
16816// Lower vXi1 vector shuffles.
16817// There is no a dedicated instruction on AVX-512 that shuffles the masks.
16818// The only way to shuffle bits is to sign-extend the mask vector to SIMD
16819// vector, shuffle and then truncate it back.
16820static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16821 MVT VT, SDValue V1, SDValue V2,
16822 const APInt &Zeroable,
16823 const X86Subtarget &Subtarget,
16824 SelectionDAG &DAG) {
16825 assert(Subtarget.hasAVX512() &&((Subtarget.hasAVX512() && "Cannot lower 512-bit vectors w/o basic ISA!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"Cannot lower 512-bit vectors w/o basic ISA!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16826, __PRETTY_FUNCTION__))
16826 "Cannot lower 512-bit vectors w/o basic ISA!")((Subtarget.hasAVX512() && "Cannot lower 512-bit vectors w/o basic ISA!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"Cannot lower 512-bit vectors w/o basic ISA!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16826, __PRETTY_FUNCTION__))
;
16827
16828 int NumElts = Mask.size();
16829
16830 // Try to recognize shuffles that are just padding a subvector with zeros.
16831 int SubvecElts = 0;
16832 int Src = -1;
16833 for (int i = 0; i != NumElts; ++i) {
16834 if (Mask[i] >= 0) {
16835 // Grab the source from the first valid mask. All subsequent elements need
16836 // to use this same source.
16837 if (Src < 0)
16838 Src = Mask[i] / NumElts;
16839 if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
16840 break;
16841 }
16842
16843 ++SubvecElts;
16844 }
16845 assert(SubvecElts != NumElts && "Identity shuffle?")((SubvecElts != NumElts && "Identity shuffle?") ? static_cast
<void> (0) : __assert_fail ("SubvecElts != NumElts && \"Identity shuffle?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16845, __PRETTY_FUNCTION__))
;
16846
16847 // Clip to a power 2.
16848 SubvecElts = PowerOf2Floor(SubvecElts);
16849
16850 // Make sure the number of zeroable bits in the top at least covers the bits
16851 // not covered by the subvector.
16852 if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
16853 assert(Src >= 0 && "Expected a source!")((Src >= 0 && "Expected a source!") ? static_cast<
void> (0) : __assert_fail ("Src >= 0 && \"Expected a source!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16853, __PRETTY_FUNCTION__))
;
16854 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
16855 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
16856 Src == 0 ? V1 : V2,
16857 DAG.getIntPtrConstant(0, DL));
16858 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16859 DAG.getConstant(0, DL, VT),
16860 Extract, DAG.getIntPtrConstant(0, DL));
16861 }
16862
16863 // Try a simple shift right with undef elements. Later we'll try with zeros.
16864 if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
16865 DAG))
16866 return Shift;
16867
16868 // Try to match KSHIFTs.
16869 unsigned Offset = 0;
16870 for (SDValue V : { V1, V2 }) {
16871 unsigned Opcode;
16872 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
16873 if (ShiftAmt >= 0) {
16874 MVT WideVT = VT;
16875 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
16876 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
16877 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
16878 DAG.getUNDEF(WideVT), V,
16879 DAG.getIntPtrConstant(0, DL));
16880 // Widened right shifts need two shifts to ensure we shift in zeroes.
16881 if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
16882 int WideElts = WideVT.getVectorNumElements();
16883 // Shift left to put the original vector in the MSBs of the new size.
16884 Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
16885 DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
16886 // Increase the shift amount to account for the left shift.
16887 ShiftAmt += WideElts - NumElts;
16888 }
16889
16890 Res = DAG.getNode(Opcode, DL, WideVT, Res,
16891 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
16892 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
16893 DAG.getIntPtrConstant(0, DL));
16894 }
16895 Offset += NumElts; // Increment for next iteration.
16896 }
16897
16898
16899
16900 MVT ExtVT;
16901 switch (VT.SimpleTy) {
16902 default:
16903 llvm_unreachable("Expected a vector of i1 elements")::llvm::llvm_unreachable_internal("Expected a vector of i1 elements"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16903)
;
16904 case MVT::v2i1:
16905 ExtVT = MVT::v2i64;
16906 break;
16907 case MVT::v4i1:
16908 ExtVT = MVT::v4i32;
16909 break;
16910 case MVT::v8i1:
16911 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
16912 // shuffle.
16913 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
16914 break;
16915 case MVT::v16i1:
16916 // Take 512-bit type, unless we are avoiding 512-bit types and have the
16917 // 256-bit operation available.
16918 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
16919 break;
16920 case MVT::v32i1:
16921 // Take 512-bit type, unless we are avoiding 512-bit types and have the
16922 // 256-bit operation available.
16923 assert(Subtarget.hasBWI() && "Expected AVX512BW support")((Subtarget.hasBWI() && "Expected AVX512BW support") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected AVX512BW support\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16923, __PRETTY_FUNCTION__))
;
16924 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
16925 break;
16926 case MVT::v64i1:
16927 ExtVT = MVT::v64i8;
16928 break;
16929 }
16930
16931 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
16932 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
16933
16934 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
16935 // i1 was sign extended we can use X86ISD::CVT2MASK.
16936 int NumElems = VT.getVectorNumElements();
16937 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
16938 (Subtarget.hasDQI() && (NumElems < 32)))
16939 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
16940 Shuffle, ISD::SETGT);
16941
16942 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
16943}
16944
16945/// Helper function that returns true if the shuffle mask should be
16946/// commuted to improve canonicalization.
16947static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
16948 int NumElements = Mask.size();
16949
16950 int NumV1Elements = 0, NumV2Elements = 0;
16951 for (int M : Mask)
16952 if (M < 0)
16953 continue;
16954 else if (M < NumElements)
16955 ++NumV1Elements;
16956 else
16957 ++NumV2Elements;
16958
16959 // Commute the shuffle as needed such that more elements come from V1 than
16960 // V2. This allows us to match the shuffle pattern strictly on how many
16961 // elements come from V1 without handling the symmetric cases.
16962 if (NumV2Elements > NumV1Elements)
16963 return true;
16964
16965 assert(NumV1Elements > 0 && "No V1 indices")((NumV1Elements > 0 && "No V1 indices") ? static_cast
<void> (0) : __assert_fail ("NumV1Elements > 0 && \"No V1 indices\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 16965, __PRETTY_FUNCTION__))
;
16966
16967 if (NumV2Elements == 0)
16968 return false;
16969
16970 // When the number of V1 and V2 elements are the same, try to minimize the
16971 // number of uses of V2 in the low half of the vector. When that is tied,
16972 // ensure that the sum of indices for V1 is equal to or lower than the sum
16973 // indices for V2. When those are equal, try to ensure that the number of odd
16974 // indices for V1 is lower than the number of odd indices for V2.
16975 if (NumV1Elements == NumV2Elements) {
16976 int LowV1Elements = 0, LowV2Elements = 0;
16977 for (int M : Mask.slice(0, NumElements / 2))
16978 if (M >= NumElements)
16979 ++LowV2Elements;
16980 else if (M >= 0)
16981 ++LowV1Elements;
16982 if (LowV2Elements > LowV1Elements)
16983 return true;
16984 if (LowV2Elements == LowV1Elements) {
16985 int SumV1Indices = 0, SumV2Indices = 0;
16986 for (int i = 0, Size = Mask.size(); i < Size; ++i)
16987 if (Mask[i] >= NumElements)
16988 SumV2Indices += i;
16989 else if (Mask[i] >= 0)
16990 SumV1Indices += i;
16991 if (SumV2Indices < SumV1Indices)
16992 return true;
16993 if (SumV2Indices == SumV1Indices) {
16994 int NumV1OddIndices = 0, NumV2OddIndices = 0;
16995 for (int i = 0, Size = Mask.size(); i < Size; ++i)
16996 if (Mask[i] >= NumElements)
16997 NumV2OddIndices += i % 2;
16998 else if (Mask[i] >= 0)
16999 NumV1OddIndices += i % 2;
17000 if (NumV2OddIndices < NumV1OddIndices)
17001 return true;
17002 }
17003 }
17004 }
17005
17006 return false;
17007}
17008
17009/// Top-level lowering for x86 vector shuffles.
17010///
17011/// This handles decomposition, canonicalization, and lowering of all x86
17012/// vector shuffles. Most of the specific lowering strategies are encapsulated
17013/// above in helper routines. The canonicalization attempts to widen shuffles
17014/// to involve fewer lanes of wider elements, consolidate symmetric patterns
17015/// s.t. only one of the two inputs needs to be tested, etc.
17016static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget,
17017 SelectionDAG &DAG) {
17018 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17019 ArrayRef<int> OrigMask = SVOp->getMask();
17020 SDValue V1 = Op.getOperand(0);
17021 SDValue V2 = Op.getOperand(1);
17022 MVT VT = Op.getSimpleValueType();
17023 int NumElements = VT.getVectorNumElements();
17024 SDLoc DL(Op);
17025 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17026
17027 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&(((VT.getSizeInBits() != 64 || Is1BitVector) && "Can't lower MMX shuffles"
) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() != 64 || Is1BitVector) && \"Can't lower MMX shuffles\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17028, __PRETTY_FUNCTION__))
17028 "Can't lower MMX shuffles")(((VT.getSizeInBits() != 64 || Is1BitVector) && "Can't lower MMX shuffles"
) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() != 64 || Is1BitVector) && \"Can't lower MMX shuffles\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17028, __PRETTY_FUNCTION__))
;
17029
17030 bool V1IsUndef = V1.isUndef();
17031 bool V2IsUndef = V2.isUndef();
17032 if (V1IsUndef && V2IsUndef)
17033 return DAG.getUNDEF(VT);
17034
17035 // When we create a shuffle node we put the UNDEF node to second operand,
17036 // but in some cases the first operand may be transformed to UNDEF.
17037 // In this case we should just commute the node.
17038 if (V1IsUndef)
17039 return DAG.getCommutedVectorShuffle(*SVOp);
17040
17041 // Check for non-undef masks pointing at an undef vector and make the masks
17042 // undef as well. This makes it easier to match the shuffle based solely on
17043 // the mask.
17044 if (V2IsUndef &&
17045 any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17046 SmallVector<int, 8> NewMask(OrigMask.begin(), OrigMask.end());
17047 for (int &M : NewMask)
17048 if (M >= NumElements)
17049 M = -1;
17050 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17051 }
17052
17053 // Check for illegal shuffle mask element index values.
17054 int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17055 (void)MaskUpperLimit;
17056 assert(llvm::all_of(OrigMask,((llvm::all_of(OrigMask, [&](int M) { return -1 <= M &&
M < MaskUpperLimit; }) && "Out of bounds shuffle index"
) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(OrigMask, [&](int M) { return -1 <= M && M < MaskUpperLimit; }) && \"Out of bounds shuffle index\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17058, __PRETTY_FUNCTION__))
17057 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&((llvm::all_of(OrigMask, [&](int M) { return -1 <= M &&
M < MaskUpperLimit; }) && "Out of bounds shuffle index"
) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(OrigMask, [&](int M) { return -1 <= M && M < MaskUpperLimit; }) && \"Out of bounds shuffle index\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17058, __PRETTY_FUNCTION__))
17058 "Out of bounds shuffle index")((llvm::all_of(OrigMask, [&](int M) { return -1 <= M &&
M < MaskUpperLimit; }) && "Out of bounds shuffle index"
) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(OrigMask, [&](int M) { return -1 <= M && M < MaskUpperLimit; }) && \"Out of bounds shuffle index\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17058, __PRETTY_FUNCTION__))
;
17059
17060 // We actually see shuffles that are entirely re-arrangements of a set of
17061 // zero inputs. This mostly happens while decomposing complex shuffles into
17062 // simple ones. Directly lower these as a buildvector of zeros.
17063 APInt Zeroable = computeZeroableShuffleElements(OrigMask, V1, V2);
17064 if (Zeroable.isAllOnesValue())
17065 return getZeroVector(VT, Subtarget, DAG, DL);
17066
17067 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17068
17069 // Create an alternative mask with info about zeroable elements.
17070 // Here we do not set undef elements as zeroable.
17071 SmallVector<int, 64> ZeroableMask(OrigMask.begin(), OrigMask.end());
17072 if (V2IsZero) {
17073 assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!")((!Zeroable.isNullValue() && "V2's non-undef elements are used?!"
) ? static_cast<void> (0) : __assert_fail ("!Zeroable.isNullValue() && \"V2's non-undef elements are used?!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17073, __PRETTY_FUNCTION__))
;
17074 for (int i = 0; i != NumElements; ++i)
17075 if (OrigMask[i] != SM_SentinelUndef && Zeroable[i])
17076 ZeroableMask[i] = SM_SentinelZero;
17077 }
17078
17079 // Try to collapse shuffles into using a vector type with fewer elements but
17080 // wider element types. We cap this to not form integers or floating point
17081 // elements wider than 64 bits, but it might be interesting to form i128
17082 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17083 SmallVector<int, 16> WidenedMask;
17084 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17085 canWidenShuffleElements(ZeroableMask, WidenedMask)) {
17086 // Shuffle mask widening should not interfere with a broadcast opportunity
17087 // by obfuscating the operands with bitcasts.
17088 // TODO: Avoid lowering directly from this top-level function: make this
17089 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17090 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17091 Subtarget, DAG))
17092 return Broadcast;
17093
17094 MVT NewEltVT = VT.isFloatingPoint()
17095 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17096 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17097 int NewNumElts = NumElements / 2;
17098 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17099 // Make sure that the new vector type is legal. For example, v2f64 isn't
17100 // legal on SSE1.
17101 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17102 if (V2IsZero) {
17103 // Modify the new Mask to take all zeros from the all-zero vector.
17104 // Choose indices that are blend-friendly.
17105 bool UsedZeroVector = false;
17106 assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&((find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
"V2's non-undef elements are used?!") ? static_cast<void>
(0) : __assert_fail ("find(WidenedMask, SM_SentinelZero) != WidenedMask.end() && \"V2's non-undef elements are used?!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17107, __PRETTY_FUNCTION__))
17107 "V2's non-undef elements are used?!")((find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
"V2's non-undef elements are used?!") ? static_cast<void>
(0) : __assert_fail ("find(WidenedMask, SM_SentinelZero) != WidenedMask.end() && \"V2's non-undef elements are used?!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17107, __PRETTY_FUNCTION__))
;
17108 for (int i = 0; i != NewNumElts; ++i)
17109 if (WidenedMask[i] == SM_SentinelZero) {
17110 WidenedMask[i] = i + NewNumElts;
17111 UsedZeroVector = true;
17112 }
17113 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17114 // some elements to be undef.
17115 if (UsedZeroVector)
17116 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17117 }
17118 V1 = DAG.getBitcast(NewVT, V1);
17119 V2 = DAG.getBitcast(NewVT, V2);
17120 return DAG.getBitcast(
17121 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17122 }
17123 }
17124
17125 // Commute the shuffle if it will improve canonicalization.
17126 SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
17127 if (canonicalizeShuffleMaskWithCommute(Mask)) {
17128 ShuffleVectorSDNode::commuteMask(Mask);
17129 std::swap(V1, V2);
17130 }
17131
17132 if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
17133 return V;
17134
17135 // For each vector width, delegate to a specialized lowering routine.
17136 if (VT.is128BitVector())
17137 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17138
17139 if (VT.is256BitVector())
17140 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17141
17142 if (VT.is512BitVector())
17143 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17144
17145 if (Is1BitVector)
17146 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17147
17148 llvm_unreachable("Unimplemented!")::llvm::llvm_unreachable_internal("Unimplemented!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17148)
;
17149}
17150
17151/// Try to lower a VSELECT instruction to a vector shuffle.
17152static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17153 const X86Subtarget &Subtarget,
17154 SelectionDAG &DAG) {
17155 SDValue Cond = Op.getOperand(0);
17156 SDValue LHS = Op.getOperand(1);
17157 SDValue RHS = Op.getOperand(2);
17158 MVT VT = Op.getSimpleValueType();
17159
17160 // Only non-legal VSELECTs reach this lowering, convert those into generic
17161 // shuffles and re-use the shuffle lowering path for blends.
17162 SmallVector<int, 32> Mask;
17163 if (createShuffleMaskFromVSELECT(Mask, Cond))
17164 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
17165
17166 return SDValue();
17167}
17168
17169SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
17170 SDValue Cond = Op.getOperand(0);
17171 SDValue LHS = Op.getOperand(1);
17172 SDValue RHS = Op.getOperand(2);
17173
17174 // A vselect where all conditions and data are constants can be optimized into
17175 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
17176 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
17177 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
17178 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
17179 return SDValue();
17180
17181 // Try to lower this to a blend-style vector shuffle. This can handle all
17182 // constant condition cases.
17183 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
17184 return BlendOp;
17185
17186 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
17187 // with patterns on the mask registers on AVX-512.
17188 MVT CondVT = Cond.getSimpleValueType();
17189 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
17190 if (CondEltSize == 1)
17191 return Op;
17192
17193 // Variable blends are only legal from SSE4.1 onward.
17194 if (!Subtarget.hasSSE41())
17195 return SDValue();
17196
17197 SDLoc dl(Op);
17198 MVT VT = Op.getSimpleValueType();
17199 unsigned EltSize = VT.getScalarSizeInBits();
17200 unsigned NumElts = VT.getVectorNumElements();
17201
17202 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
17203 // into an i1 condition so that we can use the mask-based 512-bit blend
17204 // instructions.
17205 if (VT.getSizeInBits() == 512) {
17206 // Build a mask by testing the condition against zero.
17207 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
17208 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
17209 DAG.getConstant(0, dl, CondVT),
17210 ISD::SETNE);
17211 // Now return a new VSELECT using the mask.
17212 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
17213 }
17214
17215 // SEXT/TRUNC cases where the mask doesn't match the destination size.
17216 if (CondEltSize != EltSize) {
17217 // If we don't have a sign splat, rely on the expansion.
17218 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
17219 return SDValue();
17220
17221 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
17222 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
17223 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
17224 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
17225 }
17226
17227 // Only some types will be legal on some subtargets. If we can emit a legal
17228 // VSELECT-matching blend, return Op, and but if we need to expand, return
17229 // a null value.
17230 switch (VT.SimpleTy) {
17231 default:
17232 // Most of the vector types have blends past SSE4.1.
17233 return Op;
17234
17235 case MVT::v32i8:
17236 // The byte blends for AVX vectors were introduced only in AVX2.
17237 if (Subtarget.hasAVX2())
17238 return Op;
17239
17240 return SDValue();
17241
17242 case MVT::v8i16:
17243 case MVT::v16i16: {
17244 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
17245 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
17246 Cond = DAG.getBitcast(CastVT, Cond);
17247 LHS = DAG.getBitcast(CastVT, LHS);
17248 RHS = DAG.getBitcast(CastVT, RHS);
17249 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
17250 return DAG.getBitcast(VT, Select);
17251 }
17252 }
17253}
17254
17255static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
17256 MVT VT = Op.getSimpleValueType();
17257 SDLoc dl(Op);
17258
17259 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
17260 return SDValue();
17261
17262 if (VT.getSizeInBits() == 8) {
17263 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
17264 Op.getOperand(0), Op.getOperand(1));
17265 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17266 }
17267
17268 if (VT == MVT::f32) {
17269 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
17270 // the result back to FR32 register. It's only worth matching if the
17271 // result has a single use which is a store or a bitcast to i32. And in
17272 // the case of a store, it's not worth it if the index is a constant 0,
17273 // because a MOVSSmr can be used instead, which is smaller and faster.
17274 if (!Op.hasOneUse())
17275 return SDValue();
17276 SDNode *User = *Op.getNode()->use_begin();
17277 if ((User->getOpcode() != ISD::STORE ||
17278 isNullConstant(Op.getOperand(1))) &&
17279 (User->getOpcode() != ISD::BITCAST ||
17280 User->getValueType(0) != MVT::i32))
17281 return SDValue();
17282 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17283 DAG.getBitcast(MVT::v4i32, Op.getOperand(0)),
17284 Op.getOperand(1));
17285 return DAG.getBitcast(MVT::f32, Extract);
17286 }
17287
17288 if (VT == MVT::i32 || VT == MVT::i64) {
17289 // ExtractPS/pextrq works with constant index.
17290 if (isa<ConstantSDNode>(Op.getOperand(1)))
17291 return Op;
17292 }
17293
17294 return SDValue();
17295}
17296
17297/// Extract one bit from mask vector, like v16i1 or v8i1.
17298/// AVX-512 feature.
17299static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
17300 const X86Subtarget &Subtarget) {
17301 SDValue Vec = Op.getOperand(0);
17302 SDLoc dl(Vec);
17303 MVT VecVT = Vec.getSimpleValueType();
17304 SDValue Idx = Op.getOperand(1);
17305 MVT EltVT = Op.getSimpleValueType();
17306
17307 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&(((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI(
)) && "Unexpected vector type in ExtractBitFromMaskVector"
) ? static_cast<void> (0) : __assert_fail ("(VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) && \"Unexpected vector type in ExtractBitFromMaskVector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17308, __PRETTY_FUNCTION__))
17308 "Unexpected vector type in ExtractBitFromMaskVector")(((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI(
)) && "Unexpected vector type in ExtractBitFromMaskVector"
) ? static_cast<void> (0) : __assert_fail ("(VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) && \"Unexpected vector type in ExtractBitFromMaskVector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17308, __PRETTY_FUNCTION__))
;
17309
17310 // variable index can't be handled in mask registers,
17311 // extend vector to VR512/128
17312 if (!isa<ConstantSDNode>(Idx)) {
17313 unsigned NumElts = VecVT.getVectorNumElements();
17314 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
17315 // than extending to 128/256bit.
17316 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17317 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17318 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
17319 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
17320 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
17321 }
17322
17323 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17324 if (IdxVal == 0) // the operation is legal
17325 return Op;
17326
17327 // Extend to natively supported kshift.
17328 unsigned NumElems = VecVT.getVectorNumElements();
17329 MVT WideVecVT = VecVT;
17330 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17331 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17332 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
17333 DAG.getUNDEF(WideVecVT), Vec,
17334 DAG.getIntPtrConstant(0, dl));
17335 }
17336
17337 // Use kshiftr instruction to move to the lower element.
17338 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
17339 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17340
17341 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17342 DAG.getIntPtrConstant(0, dl));
17343}
17344
17345SDValue
17346X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
17347 SelectionDAG &DAG) const {
17348 SDLoc dl(Op);
17349 SDValue Vec = Op.getOperand(0);
17350 MVT VecVT = Vec.getSimpleValueType();
17351 SDValue Idx = Op.getOperand(1);
17352
17353 if (VecVT.getVectorElementType() == MVT::i1)
17354 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
17355
17356 if (!isa<ConstantSDNode>(Idx)) {
17357 // Its more profitable to go through memory (1 cycles throughput)
17358 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
17359 // IACA tool was used to get performance estimation
17360 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
17361 //
17362 // example : extractelement <16 x i8> %a, i32 %i
17363 //
17364 // Block Throughput: 3.00 Cycles
17365 // Throughput Bottleneck: Port5
17366 //
17367 // | Num Of | Ports pressure in cycles | |
17368 // | Uops | 0 - DV | 5 | 6 | 7 | |
17369 // ---------------------------------------------
17370 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
17371 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
17372 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
17373 // Total Num Of Uops: 4
17374 //
17375 //
17376 // Block Throughput: 1.00 Cycles
17377 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
17378 //
17379 // | | Ports pressure in cycles | |
17380 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
17381 // ---------------------------------------------------------
17382 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
17383 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
17384 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
17385 // Total Num Of Uops: 4
17386
17387 return SDValue();
17388 }
17389
17390 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17391
17392 // If this is a 256-bit vector result, first extract the 128-bit vector and
17393 // then extract the element from the 128-bit vector.
17394 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
17395 // Get the 128-bit vector.
17396 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
17397 MVT EltVT = VecVT.getVectorElementType();
17398
17399 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
17400 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2")((isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(ElemsPerChunk) && \"Elements per chunk not power of 2\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17400, __PRETTY_FUNCTION__))
;
17401
17402 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
17403 // this can be done with a mask.
17404 IdxVal &= ElemsPerChunk - 1;
17405 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17406 DAG.getIntPtrConstant(IdxVal, dl));
17407 }
17408
17409 assert(VecVT.is128BitVector() && "Unexpected vector length")((VecVT.is128BitVector() && "Unexpected vector length"
) ? static_cast<void> (0) : __assert_fail ("VecVT.is128BitVector() && \"Unexpected vector length\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17409, __PRETTY_FUNCTION__))
;
17410
17411 MVT VT = Op.getSimpleValueType();
17412
17413 if (VT.getSizeInBits() == 16) {
17414 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
17415 // we're going to zero extend the register or fold the store (SSE41 only).
17416 if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
17417 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
17418 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
17419 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17420 DAG.getBitcast(MVT::v4i32, Vec), Idx));
17421
17422 // Transform it so it match pextrw which produces a 32-bit result.
17423 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
17424 Op.getOperand(0), Op.getOperand(1));
17425 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17426 }
17427
17428 if (Subtarget.hasSSE41())
17429 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
17430 return Res;
17431
17432 // TODO: We only extract a single element from v16i8, we can probably afford
17433 // to be more aggressive here before using the default approach of spilling to
17434 // stack.
17435 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
17436 // Extract either the lowest i32 or any i16, and extract the sub-byte.
17437 int DWordIdx = IdxVal / 4;
17438 if (DWordIdx == 0) {
17439 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17440 DAG.getBitcast(MVT::v4i32, Vec),
17441 DAG.getIntPtrConstant(DWordIdx, dl));
17442 int ShiftVal = (IdxVal % 4) * 8;
17443 if (ShiftVal != 0)
17444 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17445 DAG.getConstant(ShiftVal, dl, MVT::i8));
17446 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17447 }
17448
17449 int WordIdx = IdxVal / 2;
17450 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17451 DAG.getBitcast(MVT::v8i16, Vec),
17452 DAG.getIntPtrConstant(WordIdx, dl));
17453 int ShiftVal = (IdxVal % 2) * 8;
17454 if (ShiftVal != 0)
17455 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17456 DAG.getConstant(ShiftVal, dl, MVT::i8));
17457 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17458 }
17459
17460 if (VT.getSizeInBits() == 32) {
17461 if (IdxVal == 0)
17462 return Op;
17463
17464 // SHUFPS the element to the lowest double word, then movss.
17465 int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
17466 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17467 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17468 DAG.getIntPtrConstant(0, dl));
17469 }
17470
17471 if (VT.getSizeInBits() == 64) {
17472 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
17473 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
17474 // to match extract_elt for f64.
17475 if (IdxVal == 0)
17476 return Op;
17477
17478 // UNPCKHPD the element to the lowest double word, then movsd.
17479 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
17480 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
17481 int Mask[2] = { 1, -1 };
17482 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
17483 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
17484 DAG.getIntPtrConstant(0, dl));
17485 }
17486
17487 return SDValue();
17488}
17489
17490/// Insert one bit to mask vector, like v16i1 or v8i1.
17491/// AVX-512 feature.
17492static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
17493 const X86Subtarget &Subtarget) {
17494 SDLoc dl(Op);
17495 SDValue Vec = Op.getOperand(0);
17496 SDValue Elt = Op.getOperand(1);
17497 SDValue Idx = Op.getOperand(2);
17498 MVT VecVT = Vec.getSimpleValueType();
17499
17500 if (!isa<ConstantSDNode>(Idx)) {
17501 // Non constant index. Extend source and destination,
17502 // insert element and then truncate the result.
17503 unsigned NumElts = VecVT.getVectorNumElements();
17504 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17505 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17506 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
17507 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
17508 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
17509 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
17510 }
17511
17512 // Copy into a k-register, extract to v1i1 and insert_subvector.
17513 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
17514
17515 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec,
17516 Op.getOperand(2));
17517}
17518
17519SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
17520 SelectionDAG &DAG) const {
17521 MVT VT = Op.getSimpleValueType();
17522 MVT EltVT = VT.getVectorElementType();
17523 unsigned NumElts = VT.getVectorNumElements();
17524
17525 if (EltVT == MVT::i1)
17526 return InsertBitToMaskVector(Op, DAG, Subtarget);
17527
17528 SDLoc dl(Op);
17529 SDValue N0 = Op.getOperand(0);
17530 SDValue N1 = Op.getOperand(1);
17531 SDValue N2 = Op.getOperand(2);
17532
17533 auto *N2C = dyn_cast<ConstantSDNode>(N2);
17534 if (!N2C || N2C->getAPIntValue().uge(NumElts))
17535 return SDValue();
17536 uint64_t IdxVal = N2C->getZExtValue();
17537
17538 bool IsZeroElt = X86::isZeroNode(N1);
17539 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
17540
17541 // If we are inserting a element, see if we can do this more efficiently with
17542 // a blend shuffle with a rematerializable vector than a costly integer
17543 // insertion.
17544 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
17545 16 <= EltVT.getSizeInBits()) {
17546 SmallVector<int, 8> BlendMask;
17547 for (unsigned i = 0; i != NumElts; ++i)
17548 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
17549 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
17550 : getOnesVector(VT, DAG, dl);
17551 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
17552 }
17553
17554 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
17555 // into that, and then insert the subvector back into the result.
17556 if (VT.is256BitVector() || VT.is512BitVector()) {
17557 // With a 256-bit vector, we can insert into the zero element efficiently
17558 // using a blend if we have AVX or AVX2 and the right data type.
17559 if (VT.is256BitVector() && IdxVal == 0) {
17560 // TODO: It is worthwhile to cast integer to floating point and back
17561 // and incur a domain crossing penalty if that's what we'll end up
17562 // doing anyway after extracting to a 128-bit vector.
17563 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
17564 (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
17565 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17566 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
17567 DAG.getTargetConstant(1, dl, MVT::i8));
17568 }
17569 }
17570
17571 // Get the desired 128-bit vector chunk.
17572 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
17573
17574 // Insert the element into the desired chunk.
17575 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
17576 assert(isPowerOf2_32(NumEltsIn128))((isPowerOf2_32(NumEltsIn128)) ? static_cast<void> (0) :
__assert_fail ("isPowerOf2_32(NumEltsIn128)", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17576, __PRETTY_FUNCTION__))
;
17577 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
17578 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
17579
17580 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
17581 DAG.getIntPtrConstant(IdxIn128, dl));
17582
17583 // Insert the changed part back into the bigger vector
17584 return insert128BitVector(N0, V, IdxVal, DAG, dl);
17585 }
17586 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!")((VT.is128BitVector() && "Only 128-bit vector types should be left!"
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only 128-bit vector types should be left!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17586, __PRETTY_FUNCTION__))
;
17587
17588 // This will be just movd/movq/movss/movsd.
17589 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode()) &&
17590 (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
17591 EltVT == MVT::i64)) {
17592 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
17593 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
17594 }
17595
17596 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
17597 // argument. SSE41 required for pinsrb.
17598 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
17599 unsigned Opc;
17600 if (VT == MVT::v8i16) {
17601 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW")((Subtarget.hasSSE2() && "SSE2 required for PINSRW") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"SSE2 required for PINSRW\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17601, __PRETTY_FUNCTION__))
;
17602 Opc = X86ISD::PINSRW;
17603 } else {
17604 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector")((VT == MVT::v16i8 && "PINSRB requires v16i8 vector")
? static_cast<void> (0) : __assert_fail ("VT == MVT::v16i8 && \"PINSRB requires v16i8 vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17604, __PRETTY_FUNCTION__))
;
17605 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB")((Subtarget.hasSSE41() && "SSE41 required for PINSRB"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE41() && \"SSE41 required for PINSRB\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17605, __PRETTY_FUNCTION__))
;
17606 Opc = X86ISD::PINSRB;
17607 }
17608
17609 if (N1.getValueType() != MVT::i32)
17610 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
17611 if (N2.getValueType() != MVT::i32)
17612 N2 = DAG.getIntPtrConstant(IdxVal, dl);
17613 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
17614 }
17615
17616 if (Subtarget.hasSSE41()) {
17617 if (EltVT == MVT::f32) {
17618 // Bits [7:6] of the constant are the source select. This will always be
17619 // zero here. The DAG Combiner may combine an extract_elt index into
17620 // these bits. For example (insert (extract, 3), 2) could be matched by
17621 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
17622 // Bits [5:4] of the constant are the destination select. This is the
17623 // value of the incoming immediate.
17624 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
17625 // combine either bitwise AND or insert of float 0.0 to set these bits.
17626
17627 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
17628 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
17629 // If this is an insertion of 32-bits into the low 32-bits of
17630 // a vector, we prefer to generate a blend with immediate rather
17631 // than an insertps. Blends are simpler operations in hardware and so
17632 // will always have equal or better performance than insertps.
17633 // But if optimizing for size and there's a load folding opportunity,
17634 // generate insertps because blendps does not have a 32-bit memory
17635 // operand form.
17636 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17637 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
17638 DAG.getTargetConstant(1, dl, MVT::i8));
17639 }
17640 // Create this as a scalar to vector..
17641 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
17642 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
17643 DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
17644 }
17645
17646 // PINSR* works with constant index.
17647 if (EltVT == MVT::i32 || EltVT == MVT::i64)
17648 return Op;
17649 }
17650
17651 return SDValue();
17652}
17653
17654static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
17655 SelectionDAG &DAG) {
17656 SDLoc dl(Op);
17657 MVT OpVT = Op.getSimpleValueType();
17658
17659 // It's always cheaper to replace a xor+movd with xorps and simplifies further
17660 // combines.
17661 if (X86::isZeroNode(Op.getOperand(0)))
17662 return getZeroVector(OpVT, Subtarget, DAG, dl);
17663
17664 // If this is a 256-bit vector result, first insert into a 128-bit
17665 // vector and then insert into the 256-bit vector.
17666 if (!OpVT.is128BitVector()) {
17667 // Insert into a 128-bit vector.
17668 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
17669 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
17670 OpVT.getVectorNumElements() / SizeFactor);
17671
17672 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
17673
17674 // Insert the 128-bit vector.
17675 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
17676 }
17677 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&((OpVT.is128BitVector() && OpVT.isInteger() &&
OpVT != MVT::v2i64 && "Expected an SSE type!") ? static_cast
<void> (0) : __assert_fail ("OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 && \"Expected an SSE type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17678, __PRETTY_FUNCTION__))
17678 "Expected an SSE type!")((OpVT.is128BitVector() && OpVT.isInteger() &&
OpVT != MVT::v2i64 && "Expected an SSE type!") ? static_cast
<void> (0) : __assert_fail ("OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 && \"Expected an SSE type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17678, __PRETTY_FUNCTION__))
;
17679
17680 // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
17681 if (OpVT == MVT::v4i32)
17682 return Op;
17683
17684 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
17685 return DAG.getBitcast(
17686 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
17687}
17688
17689// Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
17690// simple superregister reference or explicit instructions to insert
17691// the upper bits of a vector.
17692static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17693 SelectionDAG &DAG) {
17694 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1)((Op.getSimpleValueType().getVectorElementType() == MVT::i1) ?
static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().getVectorElementType() == MVT::i1"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17694, __PRETTY_FUNCTION__))
;
17695
17696 return insert1BitVector(Op, DAG, Subtarget);
17697}
17698
17699static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
17700 SelectionDAG &DAG) {
17701 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&((Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
"Only vXi1 extract_subvectors need custom lowering") ? static_cast
<void> (0) : __assert_fail ("Op.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Only vXi1 extract_subvectors need custom lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17702, __PRETTY_FUNCTION__))
17702 "Only vXi1 extract_subvectors need custom lowering")((Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
"Only vXi1 extract_subvectors need custom lowering") ? static_cast
<void> (0) : __assert_fail ("Op.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Only vXi1 extract_subvectors need custom lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 17702, __PRETTY_FUNCTION__))
;
17703
17704 SDLoc dl(Op);
17705 SDValue Vec = Op.getOperand(0);
17706 SDValue Idx = Op.getOperand(1);
17707
17708 if (!isa<ConstantSDNode>(Idx))
17709 return SDValue();
17710
17711 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
17712 if (IdxVal == 0) // the operation is legal
17713 return Op;
17714
17715 MVT VecVT = Vec.getSimpleValueType();
17716 unsigned NumElems = VecVT.getVectorNumElements();
17717
17718 // Extend to natively supported kshift.
17719 MVT WideVecVT = VecVT;
17720 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
17721 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17722 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
17723 DAG.getUNDEF(WideVecVT), Vec,
17724 DAG.getIntPtrConstant(0, dl));
17725 }
17726
17727 // Shift to the LSB.
17728 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
17729 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17730
17731 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
17732 DAG.getIntPtrConstant(0, dl));
17733}
17734
17735// Returns the appropriate wrapper opcode for a global reference.
17736unsigned X86TargetLowering::getGlobalWrapperKind(
17737 const GlobalValue *GV, const unsigned char OpFlags) const {
17738 // References to absolute symbols are never PC-relative.
17739 if (GV && GV->isAbsoluteSymbolRef())
17740 return X86ISD::Wrapper;
17741
17742 CodeModel::Model M = getTargetMachine().getCodeModel();
17743 if (Subtarget.isPICStyleRIPRel() &&
17744 (M == CodeModel::Small || M == CodeModel::Kernel))
17745 return X86ISD::WrapperRIP;
17746
17747 // GOTPCREL references must always use RIP.
17748 if (OpFlags == X86II::MO_GOTPCREL)
17749 return X86ISD::WrapperRIP;
17750
17751 return X86ISD::Wrapper;
17752}
17753
17754// ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
17755// their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
17756// one of the above mentioned nodes. It has to be wrapped because otherwise
17757// Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
17758// be used to form addressing mode. These wrapped nodes will be selected
17759// into MOV32ri.
17760SDValue
17761X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
17762 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
17763
17764 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17765 // global base reg.
17766 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
17767
17768 auto PtrVT = getPointerTy(DAG.getDataLayout());
17769 SDValue Result = DAG.getTargetConstantPool(
17770 CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag);
17771 SDLoc DL(CP);
17772 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
17773 // With PIC, the address is actually $g + Offset.
17774 if (OpFlag) {
17775 Result =
17776 DAG.getNode(ISD::ADD, DL, PtrVT,
17777 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
17778 }
17779
17780 return Result;
17781}
17782
17783SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
17784 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
17785
17786 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
17787 // global base reg.
17788 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
17789
17790 auto PtrVT = getPointerTy(DAG.getDataLayout());
17791 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
17792 SDLoc DL(JT);
17793 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
17794
17795 // With PIC, the address is actually $g + Offset.
17796 if (OpFlag)
17797 Result =
17798 DAG.getNode(ISD::ADD, DL, PtrVT,
17799 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
17800
17801 return Result;
17802}
17803
17804SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
17805 SelectionDAG &DAG) const {
17806 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
17807}
17808
17809SDValue
17810X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
17811 // Create the TargetBlockAddressAddress node.
17812 unsigned char OpFlags =
17813 Subtarget.classifyBlockAddressReference();
17814 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
17815 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
17816 SDLoc dl(Op);
17817 auto PtrVT = getPointerTy(DAG.getDataLayout());
17818 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
17819 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
17820
17821 // With PIC, the address is actually $g + Offset.
17822 if (isGlobalRelativeToPICBase(OpFlags)) {
17823 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
17824 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
17825 }
17826
17827 return Result;
17828}
17829
17830/// Creates target global address or external symbol nodes for calls or
17831/// other uses.
17832SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
17833 bool ForCall) const {
17834 // Unpack the global address or external symbol.
17835 const SDLoc &dl = SDLoc(Op);
17836 const GlobalValue *GV = nullptr;
17837 int64_t Offset = 0;
17838 const char *ExternalSym = nullptr;
17839 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
17840 GV = G->getGlobal();
17841 Offset = G->getOffset();
17842 } else {
17843 const auto *ES = cast<ExternalSymbolSDNode>(Op);
17844 ExternalSym = ES->getSymbol();
17845 }
17846
17847 // Calculate some flags for address lowering.
17848 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
17849 unsigned char OpFlags;
17850 if (ForCall)
17851 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
17852 else
17853 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
17854 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
17855 bool NeedsLoad = isGlobalStubReference(OpFlags);
17856
17857 CodeModel::Model M = DAG.getTarget().getCodeModel();
17858 auto PtrVT = getPointerTy(DAG.getDataLayout());
17859 SDValue Result;
17860
17861 if (GV) {
17862 // Create a target global address if this is a global. If possible, fold the
17863 // offset into the global address reference. Otherwise, ADD it on later.
17864 int64_t GlobalOffset = 0;
17865 if (OpFlags == X86II::MO_NO_FLAG &&
17866 X86::isOffsetSuitableForCodeModel(Offset, M)) {
17867 std::swap(GlobalOffset, Offset);
17868 }
17869 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
17870 } else {
17871 // If this is not a global address, this must be an external symbol.
17872 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
17873 }
17874
17875 // If this is a direct call, avoid the wrapper if we don't need to do any
17876 // loads or adds. This allows SDAG ISel to match direct calls.
17877 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
17878 return Result;
17879
17880 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
17881
17882 // With PIC, the address is actually $g + Offset.
17883 if (HasPICReg) {
17884 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
17885 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
17886 }
17887
17888 // For globals that require a load from a stub to get the address, emit the
17889 // load.
17890 if (NeedsLoad)
17891 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
17892 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
17893
17894 // If there was a non-zero offset that we didn't fold, create an explicit
17895 // addition for it.
17896 if (Offset != 0)
17897 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
17898 DAG.getConstant(Offset, dl, PtrVT));
17899
17900 return Result;
17901}
17902
17903SDValue
17904X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
17905 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
17906}
17907
17908static SDValue
17909GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
17910 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
17911 unsigned char OperandFlags, bool LocalDynamic = false) {
17912 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
17913 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17914 SDLoc dl(GA);
17915 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
17916 GA->getValueType(0),
17917 GA->getOffset(),
17918 OperandFlags);
17919
17920 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
17921 : X86ISD::TLSADDR;
17922
17923 if (InFlag) {
17924 SDValue Ops[] = { Chain, TGA, *InFlag };
17925 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
17926 } else {
17927 SDValue Ops[] = { Chain, TGA };
17928 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
17929 }
17930
17931 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
17932 MFI.setAdjustsStack(true);
17933 MFI.setHasCalls(true);
17934
17935 SDValue Flag = Chain.getValue(1);
17936 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
17937}
17938
17939// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
17940static SDValue
17941LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17942 const EVT PtrVT) {
17943 SDValue InFlag;
17944 SDLoc dl(GA); // ? function entry point might be better
17945 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
17946 DAG.getNode(X86ISD::GlobalBaseReg,
17947 SDLoc(), PtrVT), InFlag);
17948 InFlag = Chain.getValue(1);
17949
17950 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
17951}
17952
17953// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
17954static SDValue
17955LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
17956 const EVT PtrVT) {
17957 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
17958 X86::RAX, X86II::MO_TLSGD);
17959}
17960
17961static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
17962 SelectionDAG &DAG,
17963 const EVT PtrVT,
17964 bool is64Bit) {
17965 SDLoc dl(GA);
17966
17967 // Get the start address of the TLS block for this module.
17968 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
17969 .getInfo<X86MachineFunctionInfo>();
17970 MFI->incNumLocalDynamicTLSAccesses();
17971
17972 SDValue Base;
17973 if (is64Bit) {
17974 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
17975 X86II::MO_TLSLD, /*LocalDynamic=*/true);
17976 } else {
17977 SDValue InFlag;
17978 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
17979 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
17980 InFlag = Chain.getValue(1);
17981 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
17982 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
17983 }
17984
17985 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
17986 // of Base.
17987
17988 // Build x@dtpoff.
17989 unsigned char OperandFlags = X86II::MO_DTPOFF;
17990 unsigned WrapperKind = X86ISD::Wrapper;
17991 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
17992 GA->getValueType(0),
17993 GA->getOffset(), OperandFlags);
17994 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
17995
17996 // Add x@dtpoff with the base.
17997 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
17998}
17999
18000// Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
18001static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18002 const EVT PtrVT, TLSModel::Model model,
18003 bool is64Bit, bool isPIC) {
18004 SDLoc dl(GA);
18005
18006 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18007 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
18008 is64Bit ? 257 : 256));
18009
18010 SDValue ThreadPointer =
18011 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18012 MachinePointerInfo(Ptr));
18013
18014 unsigned char OperandFlags = 0;
18015 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
18016 // initialexec.
18017 unsigned WrapperKind = X86ISD::Wrapper;
18018 if (model == TLSModel::LocalExec) {
18019 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18020 } else if (model == TLSModel::InitialExec) {
18021 if (is64Bit) {
18022 OperandFlags = X86II::MO_GOTTPOFF;
18023 WrapperKind = X86ISD::WrapperRIP;
18024 } else {
18025 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18026 }
18027 } else {
18028 llvm_unreachable("Unexpected model")::llvm::llvm_unreachable_internal("Unexpected model", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18028)
;
18029 }
18030
18031 // emit "addl x@ntpoff,%eax" (local exec)
18032 // or "addl x@indntpoff,%eax" (initial exec)
18033 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18034 SDValue TGA =
18035 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18036 GA->getOffset(), OperandFlags);
18037 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18038
18039 if (model == TLSModel::InitialExec) {
18040 if (isPIC && !is64Bit) {
18041 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18042 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18043 Offset);
18044 }
18045
18046 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18047 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18048 }
18049
18050 // The address of the thread local variable is the add of the thread
18051 // pointer with the offset of the variable.
18052 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18053}
18054
18055SDValue
18056X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18057
18058 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18059
18060 if (DAG.getTarget().useEmulatedTLS())
18061 return LowerToTLSEmulatedModel(GA, DAG);
18062
18063 const GlobalValue *GV = GA->getGlobal();
18064 auto PtrVT = getPointerTy(DAG.getDataLayout());
18065 bool PositionIndependent = isPositionIndependent();
18066
18067 if (Subtarget.isTargetELF()) {
18068 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18069 switch (model) {
18070 case TLSModel::GeneralDynamic:
18071 if (Subtarget.is64Bit())
18072 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18073 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18074 case TLSModel::LocalDynamic:
18075 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
18076 Subtarget.is64Bit());
18077 case TLSModel::InitialExec:
18078 case TLSModel::LocalExec:
18079 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18080 PositionIndependent);
18081 }
18082 llvm_unreachable("Unknown TLS model.")::llvm::llvm_unreachable_internal("Unknown TLS model.", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18082)
;
18083 }
18084
18085 if (Subtarget.isTargetDarwin()) {
18086 // Darwin only has one model of TLS. Lower to that.
18087 unsigned char OpFlag = 0;
18088 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
18089 X86ISD::WrapperRIP : X86ISD::Wrapper;
18090
18091 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18092 // global base reg.
18093 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18094 if (PIC32)
18095 OpFlag = X86II::MO_TLVP_PIC_BASE;
18096 else
18097 OpFlag = X86II::MO_TLVP;
18098 SDLoc DL(Op);
18099 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18100 GA->getValueType(0),
18101 GA->getOffset(), OpFlag);
18102 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18103
18104 // With PIC32, the address is actually $g + Offset.
18105 if (PIC32)
18106 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18107 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18108 Offset);
18109
18110 // Lowering the machine isd will make sure everything is in the right
18111 // location.
18112 SDValue Chain = DAG.getEntryNode();
18113 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18114 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18115 SDValue Args[] = { Chain, Offset };
18116 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18117 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
18118 DAG.getIntPtrConstant(0, DL, true),
18119 Chain.getValue(1), DL);
18120
18121 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18122 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18123 MFI.setAdjustsStack(true);
18124
18125 // And our return value (tls address) is in the standard call return value
18126 // location.
18127 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18128 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18129 }
18130
18131 if (Subtarget.isOSWindows()) {
18132 // Just use the implicit TLS architecture
18133 // Need to generate something similar to:
18134 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18135 // ; from TEB
18136 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
18137 // mov rcx, qword [rdx+rcx*8]
18138 // mov eax, .tls$:tlsvar
18139 // [rax+rcx] contains the address
18140 // Windows 64bit: gs:0x58
18141 // Windows 32bit: fs:__tls_array
18142
18143 SDLoc dl(GA);
18144 SDValue Chain = DAG.getEntryNode();
18145
18146 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18147 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18148 // use its literal value of 0x2C.
18149 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
18150 ? Type::getInt8PtrTy(*DAG.getContext(),
18151 256)
18152 : Type::getInt32PtrTy(*DAG.getContext(),
18153 257));
18154
18155 SDValue TlsArray = Subtarget.is64Bit()
18156 ? DAG.getIntPtrConstant(0x58, dl)
18157 : (Subtarget.isTargetWindowsGNU()
18158 ? DAG.getIntPtrConstant(0x2C, dl)
18159 : DAG.getExternalSymbol("_tls_array", PtrVT));
18160
18161 SDValue ThreadPointer =
18162 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
18163
18164 SDValue res;
18165 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
18166 res = ThreadPointer;
18167 } else {
18168 // Load the _tls_index variable
18169 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
18170 if (Subtarget.is64Bit())
18171 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
18172 MachinePointerInfo(), MVT::i32);
18173 else
18174 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
18175
18176 auto &DL = DAG.getDataLayout();
18177 SDValue Scale =
18178 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
18179 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
18180
18181 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
18182 }
18183
18184 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
18185
18186 // Get the offset of start of .tls section
18187 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18188 GA->getValueType(0),
18189 GA->getOffset(), X86II::MO_SECREL);
18190 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
18191
18192 // The address of the thread local variable is the add of the thread
18193 // pointer with the offset of the variable.
18194 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
18195 }
18196
18197 llvm_unreachable("TLS not implemented for this target.")::llvm::llvm_unreachable_internal("TLS not implemented for this target."
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18197)
;
18198}
18199
18200/// Lower SRA_PARTS and friends, which return two i32 values
18201/// and take a 2 x i32 value to shift plus a shift amount.
18202/// TODO: Can this be moved to general expansion code?
18203static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
18204 assert(Op.getNumOperands() == 3 && "Not a double-shift!")((Op.getNumOperands() == 3 && "Not a double-shift!") ?
static_cast<void> (0) : __assert_fail ("Op.getNumOperands() == 3 && \"Not a double-shift!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18204, __PRETTY_FUNCTION__))
;
18205 MVT VT = Op.getSimpleValueType();
18206 unsigned VTBits = VT.getSizeInBits();
18207 SDLoc dl(Op);
18208 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
18209 SDValue ShOpLo = Op.getOperand(0);
18210 SDValue ShOpHi = Op.getOperand(1);
18211 SDValue ShAmt = Op.getOperand(2);
18212 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
18213 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
18214 // during isel.
18215 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18216 DAG.getConstant(VTBits - 1, dl, MVT::i8));
18217 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
18218 DAG.getConstant(VTBits - 1, dl, MVT::i8))
18219 : DAG.getConstant(0, dl, VT);
18220
18221 SDValue Tmp2, Tmp3;
18222 if (Op.getOpcode() == ISD::SHL_PARTS) {
18223 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
18224 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
18225 } else {
18226 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
18227 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
18228 }
18229
18230 // If the shift amount is larger or equal than the width of a part we can't
18231 // rely on the results of shld/shrd. Insert a test and select the appropriate
18232 // values for large shift amounts.
18233 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
18234 DAG.getConstant(VTBits, dl, MVT::i8));
18235 SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
18236 DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
18237
18238 SDValue Hi, Lo;
18239 if (Op.getOpcode() == ISD::SHL_PARTS) {
18240 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18241 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18242 } else {
18243 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
18244 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
18245 }
18246
18247 return DAG.getMergeValues({ Lo, Hi }, dl);
18248}
18249
18250static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
18251 SelectionDAG &DAG) {
18252 MVT VT = Op.getSimpleValueType();
18253 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&(((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR
) && "Unexpected funnel shift opcode!") ? static_cast
<void> (0) : __assert_fail ("(Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) && \"Unexpected funnel shift opcode!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18254, __PRETTY_FUNCTION__))
18254 "Unexpected funnel shift opcode!")(((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR
) && "Unexpected funnel shift opcode!") ? static_cast
<void> (0) : __assert_fail ("(Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) && \"Unexpected funnel shift opcode!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18254, __PRETTY_FUNCTION__))
;
18255
18256 SDLoc DL(Op);
18257 SDValue Op0 = Op.getOperand(0);
18258 SDValue Op1 = Op.getOperand(1);
18259 SDValue Amt = Op.getOperand(2);
18260
18261 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
18262
18263 if (VT.isVector()) {
18264 assert(Subtarget.hasVBMI2() && "Expected VBMI2")((Subtarget.hasVBMI2() && "Expected VBMI2") ? static_cast
<void> (0) : __assert_fail ("Subtarget.hasVBMI2() && \"Expected VBMI2\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18264, __PRETTY_FUNCTION__))
;
18265
18266 if (IsFSHR)
18267 std::swap(Op0, Op1);
18268
18269 APInt APIntShiftAmt;
18270 if (X86::isConstantSplat(Amt, APIntShiftAmt)) {
18271 uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
18272 return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT, Op0,
18273 Op1, DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
18274 }
18275
18276 return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
18277 Op0, Op1, Amt);
18278 }
18279
18280 assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&(((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
"Unexpected funnel shift type!") ? static_cast<void> (
0) : __assert_fail ("(VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) && \"Unexpected funnel shift type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18281, __PRETTY_FUNCTION__))
18281 "Unexpected funnel shift type!")(((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
"Unexpected funnel shift type!") ? static_cast<void> (
0) : __assert_fail ("(VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) && \"Unexpected funnel shift type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18281, __PRETTY_FUNCTION__))
;
18282
18283 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
18284 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
18285 if (!OptForSize && Subtarget.isSHLDSlow())
18286 return SDValue();
18287
18288 if (IsFSHR)
18289 std::swap(Op0, Op1);
18290
18291 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
18292 if (VT == MVT::i16)
18293 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
18294 DAG.getConstant(15, DL, Amt.getValueType()));
18295
18296 unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD);
18297 return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt);
18298}
18299
18300// Try to use a packed vector operation to handle i64 on 32-bit targets when
18301// AVX512DQ is enabled.
18302static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
18303 const X86Subtarget &Subtarget) {
18304 assert((Op.getOpcode() == ISD::SINT_TO_FP ||(((Op.getOpcode() == ISD::SINT_TO_FP || Op.getOpcode() == ISD
::UINT_TO_FP) && "Unexpected opcode!") ? static_cast<
void> (0) : __assert_fail ("(Op.getOpcode() == ISD::SINT_TO_FP || Op.getOpcode() == ISD::UINT_TO_FP) && \"Unexpected opcode!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18305, __PRETTY_FUNCTION__))
18305 Op.getOpcode() == ISD::UINT_TO_FP) && "Unexpected opcode!")(((Op.getOpcode() == ISD::SINT_TO_FP || Op.getOpcode() == ISD
::UINT_TO_FP) && "Unexpected opcode!") ? static_cast<
void> (0) : __assert_fail ("(Op.getOpcode() == ISD::SINT_TO_FP || Op.getOpcode() == ISD::UINT_TO_FP) && \"Unexpected opcode!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18305, __PRETTY_FUNCTION__))
;
18306 SDValue Src = Op.getOperand(0);
18307 MVT SrcVT = Src.getSimpleValueType();
18308 MVT VT = Op.getSimpleValueType();
18309
18310 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
18311 (VT != MVT::f32 && VT != MVT::f64))
18312 return SDValue();
18313
18314 // Pack the i64 into a vector, do the operation and extract.
18315
18316 // Using 256-bit to ensure result is 128-bits for f32 case.
18317 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
18318 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
18319 MVT VecVT = MVT::getVectorVT(VT, NumElts);
18320
18321 SDLoc dl(Op);
18322 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
18323 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
18324 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18325 DAG.getIntPtrConstant(0, dl));
18326}
18327
18328static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
18329 const X86Subtarget &Subtarget) {
18330 switch (Opcode) {
18331 case ISD::SINT_TO_FP:
18332 // TODO: Handle wider types with AVX/AVX512.
18333 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
18334 return false;
18335 // CVTDQ2PS or (V)CVTDQ2PD
18336 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
18337
18338 case ISD::UINT_TO_FP:
18339 // TODO: Handle wider types and i64 elements.
18340 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
18341 return false;
18342 // VCVTUDQ2PS or VCVTUDQ2PD
18343 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
18344
18345 default:
18346 return false;
18347 }
18348}
18349
18350/// Given a scalar cast operation that is extracted from a vector, try to
18351/// vectorize the cast op followed by extraction. This will avoid an expensive
18352/// round-trip between XMM and GPR.
18353static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
18354 const X86Subtarget &Subtarget) {
18355 // TODO: This could be enhanced to handle smaller integer types by peeking
18356 // through an extend.
18357 SDValue Extract = Cast.getOperand(0);
18358 MVT DestVT = Cast.getSimpleValueType();
18359 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
18360 !isa<ConstantSDNode>(Extract.getOperand(1)))
18361 return SDValue();
18362
18363 // See if we have a 128-bit vector cast op for this type of cast.
18364 SDValue VecOp = Extract.getOperand(0);
18365 MVT FromVT = VecOp.getSimpleValueType();
18366 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
18367 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
18368 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
18369 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
18370 return SDValue();
18371
18372 // If we are extracting from a non-zero element, first shuffle the source
18373 // vector to allow extracting from element zero.
18374 SDLoc DL(Cast);
18375 if (!isNullConstant(Extract.getOperand(1))) {
18376 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
18377 Mask[0] = Extract.getConstantOperandVal(1);
18378 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
18379 }
18380 // If the source vector is wider than 128-bits, extract the low part. Do not
18381 // create an unnecessarily wide vector cast op.
18382 if (FromVT != Vec128VT)
18383 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
18384
18385 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
18386 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
18387 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
18388 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
18389 DAG.getIntPtrConstant(0, DL));
18390}
18391
18392SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
18393 SelectionDAG &DAG) const {
18394 SDValue Src = Op.getOperand(0);
18395 MVT SrcVT = Src.getSimpleValueType();
18396 MVT VT = Op.getSimpleValueType();
18397 SDLoc dl(Op);
18398
18399 if (VT == MVT::f128)
18400 return LowerF128Call(Op, DAG, RTLIB::getSINTTOFP(SrcVT, VT));
18401
18402 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
18403 return Extract;
18404
18405 if (SrcVT.isVector()) {
18406 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
18407 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
18408 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
18409 DAG.getUNDEF(SrcVT)));
18410 }
18411 return SDValue();
18412 }
18413
18414 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&((SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
"Unknown SINT_TO_FP to lower!") ? static_cast<void> (0
) : __assert_fail ("SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && \"Unknown SINT_TO_FP to lower!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18415, __PRETTY_FUNCTION__))
18415 "Unknown SINT_TO_FP to lower!")((SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
"Unknown SINT_TO_FP to lower!") ? static_cast<void> (0
) : __assert_fail ("SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && \"Unknown SINT_TO_FP to lower!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18415, __PRETTY_FUNCTION__))
;
18416
18417 // These are really Legal; return the operand so the caller accepts it as
18418 // Legal.
18419 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(VT))
18420 return Op;
18421 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && Subtarget.is64Bit())
18422 return Op;
18423
18424 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18425 return V;
18426
18427 SDValue ValueToStore = Op.getOperand(0);
18428 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) &&
18429 !Subtarget.is64Bit())
18430 // Bitcasting to f64 here allows us to do a single 64-bit store from
18431 // an SSE register, avoiding the store forwarding penalty that would come
18432 // with two 32-bit stores.
18433 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18434
18435 unsigned Size = SrcVT.getSizeInBits()/8;
18436 MachineFunction &MF = DAG.getMachineFunction();
18437 auto PtrVT = getPointerTy(MF.getDataLayout());
18438 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false);
18439 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18440 SDValue Chain = DAG.getStore(
18441 DAG.getEntryNode(), dl, ValueToStore, StackSlot,
18442 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18443 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
18444}
18445
18446SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
18447 SDValue StackSlot,
18448 SelectionDAG &DAG) const {
18449 // Build the FILD
18450 SDLoc DL(Op);
18451 SDVTList Tys;
18452 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
18453 if (useSSE)
18454 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
18455 else
18456 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
18457
18458 unsigned ByteSize = SrcVT.getSizeInBits() / 8;
18459
18460 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
18461 MachineMemOperand *LoadMMO;
18462 if (FI) {
18463 int SSFI = FI->getIndex();
18464 LoadMMO = DAG.getMachineFunction().getMachineMemOperand(
18465 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18466 MachineMemOperand::MOLoad, ByteSize, ByteSize);
18467 } else {
18468 LoadMMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
18469 StackSlot = StackSlot.getOperand(1);
18470 }
18471 SDValue FILDOps[] = {Chain, StackSlot};
18472 SDValue Result =
18473 DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : X86ISD::FILD, DL,
18474 Tys, FILDOps, SrcVT, LoadMMO);
18475
18476 if (useSSE) {
18477 Chain = Result.getValue(1);
18478 SDValue InFlag = Result.getValue(2);
18479
18480 // FIXME: Currently the FST is glued to the FILD_FLAG. This
18481 // shouldn't be necessary except that RFP cannot be live across
18482 // multiple blocks. When stackifier is fixed, they can be uncoupled.
18483 MachineFunction &MF = DAG.getMachineFunction();
18484 unsigned SSFISize = Op.getValueSizeInBits() / 8;
18485 int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false);
18486 auto PtrVT = getPointerTy(MF.getDataLayout());
18487 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18488 Tys = DAG.getVTList(MVT::Other);
18489 SDValue FSTOps[] = {Chain, Result, StackSlot, InFlag};
18490 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
18491 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18492 MachineMemOperand::MOStore, SSFISize, SSFISize);
18493
18494 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps,
18495 Op.getValueType(), StoreMMO);
18496 Result = DAG.getLoad(
18497 Op.getValueType(), DL, Chain, StackSlot,
18498 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
18499 }
18500
18501 return Result;
18502}
18503
18504/// 64-bit unsigned integer to double expansion.
18505static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
18506 const X86Subtarget &Subtarget) {
18507 // This algorithm is not obvious. Here it is what we're trying to output:
18508 /*
18509 movq %rax, %xmm0
18510 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
18511 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
18512 #ifdef __SSE3__
18513 haddpd %xmm0, %xmm0
18514 #else
18515 pshufd $0x4e, %xmm0, %xmm1
18516 addpd %xmm1, %xmm0
18517 #endif
18518 */
18519
18520 SDLoc dl(Op);
18521 LLVMContext *Context = DAG.getContext();
18522
18523 // Build some magic constants.
18524 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
18525 Constant *C0 = ConstantDataVector::get(*Context, CV0);
18526 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
18527 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16);
18528
18529 SmallVector<Constant*,2> CV1;
18530 CV1.push_back(
18531 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18532 APInt(64, 0x4330000000000000ULL))));
18533 CV1.push_back(
18534 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
18535 APInt(64, 0x4530000000000000ULL))));
18536 Constant *C1 = ConstantVector::get(CV1);
18537 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16);
18538
18539 // Load the 64-bit value into an XMM register.
18540 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
18541 Op.getOperand(0));
18542 SDValue CLod0 =
18543 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
18544 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18545 /* Alignment = */ 16);
18546 SDValue Unpck1 =
18547 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
18548
18549 SDValue CLod1 =
18550 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
18551 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
18552 /* Alignment = */ 16);
18553 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
18554 // TODO: Are there any fast-math-flags to propagate here?
18555 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
18556 SDValue Result;
18557
18558 if (Subtarget.hasSSE3()) {
18559 // FIXME: The 'haddpd' instruction may be slower than 'shuffle + addsd'.
18560 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
18561 } else {
18562 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
18563 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
18564 }
18565
18566 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
18567 DAG.getIntPtrConstant(0, dl));
18568}
18569
18570/// 32-bit unsigned integer to float expansion.
18571static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
18572 const X86Subtarget &Subtarget) {
18573 SDLoc dl(Op);
18574 // FP constant to bias correct the final result.
18575 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
18576 MVT::f64);
18577
18578 // Load the 32-bit value into an XMM register.
18579 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
18580 Op.getOperand(0));
18581
18582 // Zero out the upper parts of the register.
18583 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
18584
18585 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
18586 DAG.getBitcast(MVT::v2f64, Load),
18587 DAG.getIntPtrConstant(0, dl));
18588
18589 // Or the load with the bias.
18590 SDValue Or = DAG.getNode(
18591 ISD::OR, dl, MVT::v2i64,
18592 DAG.getBitcast(MVT::v2i64,
18593 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)),
18594 DAG.getBitcast(MVT::v2i64,
18595 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
18596 Or =
18597 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
18598 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
18599
18600 // Subtract the bias.
18601 // TODO: Are there any fast-math-flags to propagate here?
18602 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
18603
18604 // Handle final rounding.
18605 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
18606}
18607
18608static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
18609 const X86Subtarget &Subtarget,
18610 const SDLoc &DL) {
18611 if (Op.getSimpleValueType() != MVT::v2f64)
18612 return SDValue();
18613
18614 SDValue N0 = Op.getOperand(0);
18615 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type")((N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type"
) ? static_cast<void> (0) : __assert_fail ("N0.getSimpleValueType() == MVT::v2i32 && \"Unexpected input type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18615, __PRETTY_FUNCTION__))
;
18616
18617 // Legalize to v4i32 type.
18618 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
18619 DAG.getUNDEF(MVT::v2i32));
18620
18621 if (Subtarget.hasAVX512())
18622 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
18623
18624 // Same implementation as VectorLegalizer::ExpandUINT_TO_FLOAT,
18625 // but using v2i32 to v2f64 with X86ISD::CVTSI2P.
18626 SDValue HalfWord = DAG.getConstant(16, DL, MVT::v4i32);
18627 SDValue HalfWordMask = DAG.getConstant(0x0000FFFF, DL, MVT::v4i32);
18628
18629 // Two to the power of half-word-size.
18630 SDValue TWOHW = DAG.getConstantFP((double)(1 << 16), DL, MVT::v2f64);
18631
18632 // Clear upper part of LO, lower HI.
18633 SDValue HI = DAG.getNode(ISD::SRL, DL, MVT::v4i32, N0, HalfWord);
18634 SDValue LO = DAG.getNode(ISD::AND, DL, MVT::v4i32, N0, HalfWordMask);
18635
18636 SDValue fHI = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, HI);
18637 fHI = DAG.getNode(ISD::FMUL, DL, MVT::v2f64, fHI, TWOHW);
18638 SDValue fLO = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, LO);
18639
18640 // Add the two halves.
18641 return DAG.getNode(ISD::FADD, DL, MVT::v2f64, fHI, fLO);
18642}
18643
18644static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
18645 const X86Subtarget &Subtarget) {
18646 // The algorithm is the following:
18647 // #ifdef __SSE4_1__
18648 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
18649 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
18650 // (uint4) 0x53000000, 0xaa);
18651 // #else
18652 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
18653 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
18654 // #endif
18655 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
18656 // return (float4) lo + fhi;
18657
18658 // We shouldn't use it when unsafe-fp-math is enabled though: we might later
18659 // reassociate the two FADDs, and if we do that, the algorithm fails
18660 // spectacularly (PR24512).
18661 // FIXME: If we ever have some kind of Machine FMF, this should be marked
18662 // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because
18663 // there's also the MachineCombiner reassociations happening on Machine IR.
18664 if (DAG.getTarget().Options.UnsafeFPMath)
18665 return SDValue();
18666
18667 SDLoc DL(Op);
18668 SDValue V = Op->getOperand(0);
18669 MVT VecIntVT = V.getSimpleValueType();
18670 bool Is128 = VecIntVT == MVT::v4i32;
18671 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
18672 // If we convert to something else than the supported type, e.g., to v4f64,
18673 // abort early.
18674 if (VecFloatVT != Op->getSimpleValueType(0))
18675 return SDValue();
18676
18677 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&(((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
"Unsupported custom type") ? static_cast<void> (0) : __assert_fail
("(VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) && \"Unsupported custom type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18678, __PRETTY_FUNCTION__))
18678 "Unsupported custom type")(((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
"Unsupported custom type") ? static_cast<void> (0) : __assert_fail
("(VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) && \"Unsupported custom type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18678, __PRETTY_FUNCTION__))
;
18679
18680 // In the #idef/#else code, we have in common:
18681 // - The vector of constants:
18682 // -- 0x4b000000
18683 // -- 0x53000000
18684 // - A shift:
18685 // -- v >> 16
18686
18687 // Create the splat vector for 0x4b000000.
18688 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
18689 // Create the splat vector for 0x53000000.
18690 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
18691
18692 // Create the right shift.
18693 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
18694 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
18695
18696 SDValue Low, High;
18697 if (Subtarget.hasSSE41()) {
18698 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
18699 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
18700 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
18701 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
18702 // Low will be bitcasted right away, so do not bother bitcasting back to its
18703 // original type.
18704 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
18705 VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
18706 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
18707 // (uint4) 0x53000000, 0xaa);
18708 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
18709 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
18710 // High will be bitcasted right away, so do not bother bitcasting back to
18711 // its original type.
18712 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
18713 VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
18714 } else {
18715 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
18716 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
18717 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
18718 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
18719
18720 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
18721 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
18722 }
18723
18724 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
18725 SDValue VecCstFAdd = DAG.getConstantFP(
18726 APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT);
18727
18728 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
18729 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
18730 // TODO: Are there any fast-math-flags to propagate here?
18731 SDValue FHigh =
18732 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
18733 // return (float4) lo + fhi;
18734 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
18735 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
18736}
18737
18738static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
18739 const X86Subtarget &Subtarget) {
18740 SDValue N0 = Op.getOperand(0);
18741 MVT SrcVT = N0.getSimpleValueType();
18742 SDLoc dl(Op);
18743
18744 switch (SrcVT.SimpleTy) {
18745 default:
18746 llvm_unreachable("Custom UINT_TO_FP is not supported!")::llvm::llvm_unreachable_internal("Custom UINT_TO_FP is not supported!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18746)
;
18747 case MVT::v2i32:
18748 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
18749 case MVT::v4i32:
18750 case MVT::v8i32:
18751 assert(!Subtarget.hasAVX512())((!Subtarget.hasAVX512()) ? static_cast<void> (0) : __assert_fail
("!Subtarget.hasAVX512()", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18751, __PRETTY_FUNCTION__))
;
18752 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
18753 }
18754}
18755
18756SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
18757 SelectionDAG &DAG) const {
18758 SDValue N0 = Op.getOperand(0);
18759 SDLoc dl(Op);
18760 auto PtrVT = getPointerTy(DAG.getDataLayout());
18761 MVT SrcVT = N0.getSimpleValueType();
18762 MVT DstVT = Op.getSimpleValueType();
18763
18764 if (DstVT == MVT::f128)
18765 return LowerF128Call(Op, DAG, RTLIB::getUINTTOFP(SrcVT, DstVT));
18766
18767 if (DstVT.isVector())
18768 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
18769
18770 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
18771 return Extract;
18772
18773 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
18774 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
18775 // Conversions from unsigned i32 to f32/f64 are legal,
18776 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
18777 return Op;
18778 }
18779
18780 // Promote i32 to i64 and use a signed conversion on 64-bit targets.
18781 if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
18782 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, N0);
18783 return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, N0);
18784 }
18785
18786 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
18787 return V;
18788
18789 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
18790 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
18791 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
18792 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
18793 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
18794 return SDValue();
18795
18796 // Make a 64-bit buffer, and use it to build an FILD.
18797 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
18798 if (SrcVT == MVT::i32) {
18799 SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
18800 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
18801 StackSlot, MachinePointerInfo());
18802 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
18803 OffsetSlot, MachinePointerInfo());
18804 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
18805 return Fild;
18806 }
18807
18808 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP")((SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP"
) ? static_cast<void> (0) : __assert_fail ("SrcVT == MVT::i64 && \"Unexpected type in UINT_TO_FP\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18808, __PRETTY_FUNCTION__))
;
18809 SDValue ValueToStore = Op.getOperand(0);
18810 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit())
18811 // Bitcasting to f64 here allows us to do a single 64-bit store from
18812 // an SSE register, avoiding the store forwarding penalty that would come
18813 // with two 32-bit stores.
18814 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
18815 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot,
18816 MachinePointerInfo());
18817 // For i64 source, we need to add the appropriate power of 2 if the input
18818 // was negative. This is the same as the optimization in
18819 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
18820 // we must be careful to do the computation in x87 extended precision, not
18821 // in SSE. (The generic code can't know it's OK to do this, or how to.)
18822 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
18823 MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
18824 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
18825 MachineMemOperand::MOLoad, 8, 8);
18826
18827 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
18828 SDValue Ops[] = { Store, StackSlot };
18829 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
18830 MVT::i64, MMO);
18831
18832 APInt FF(32, 0x5F800000ULL);
18833
18834 // Check whether the sign bit is set.
18835 SDValue SignSet = DAG.getSetCC(
18836 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
18837 Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
18838
18839 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
18840 SDValue FudgePtr = DAG.getConstantPool(
18841 ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT);
18842
18843 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
18844 SDValue Zero = DAG.getIntPtrConstant(0, dl);
18845 SDValue Four = DAG.getIntPtrConstant(4, dl);
18846 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four);
18847 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
18848
18849 // Load the value out, extending it from f32 to f80.
18850 // FIXME: Avoid the extend by constructing the right constant pool?
18851 SDValue Fudge = DAG.getExtLoad(
18852 ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr,
18853 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
18854 /* Alignment = */ 4);
18855 // Extend everything to 80 bits to force it to be done on x87.
18856 // TODO: Are there any fast-math-flags to propagate here?
18857 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
18858 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
18859 DAG.getIntPtrConstant(0, dl));
18860}
18861
18862// If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
18863// is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
18864// just return an SDValue().
18865// Otherwise it is assumed to be a conversion from one of f32, f64 or f80
18866// to i16, i32 or i64, and we lower it to a legal sequence and return the
18867// result.
18868SDValue
18869X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
18870 bool IsSigned) const {
18871 SDLoc DL(Op);
18872
18873 EVT DstTy = Op.getValueType();
18874 EVT TheVT = Op.getOperand(0).getValueType();
18875 auto PtrVT = getPointerTy(DAG.getDataLayout());
18876
18877 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
18878 // f16 must be promoted before using the lowering in this routine.
18879 // fp128 does not use this lowering.
18880 return SDValue();
18881 }
18882
18883 // If using FIST to compute an unsigned i64, we'll need some fixup
18884 // to handle values above the maximum signed i64. A FIST is always
18885 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
18886 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
18887
18888 if (!IsSigned && DstTy != MVT::i64) {
18889 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
18890 // The low 32 bits of the fist result will have the correct uint32 result.
18891 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT")((DstTy == MVT::i32 && "Unexpected FP_TO_UINT") ? static_cast
<void> (0) : __assert_fail ("DstTy == MVT::i32 && \"Unexpected FP_TO_UINT\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18891, __PRETTY_FUNCTION__))
;
18892 DstTy = MVT::i64;
18893 }
18894
18895 assert(DstTy.getSimpleVT() <= MVT::i64 &&((DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT
() >= MVT::i16 && "Unknown FP_TO_INT to lower!") ?
static_cast<void> (0) : __assert_fail ("DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT() >= MVT::i16 && \"Unknown FP_TO_INT to lower!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18897, __PRETTY_FUNCTION__))
18896 DstTy.getSimpleVT() >= MVT::i16 &&((DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT
() >= MVT::i16 && "Unknown FP_TO_INT to lower!") ?
static_cast<void> (0) : __assert_fail ("DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT() >= MVT::i16 && \"Unknown FP_TO_INT to lower!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18897, __PRETTY_FUNCTION__))
18897 "Unknown FP_TO_INT to lower!")((DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT
() >= MVT::i16 && "Unknown FP_TO_INT to lower!") ?
static_cast<void> (0) : __assert_fail ("DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT() >= MVT::i16 && \"Unknown FP_TO_INT to lower!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18897, __PRETTY_FUNCTION__))
;
18898
18899 // We lower FP->int64 into FISTP64 followed by a load from a temporary
18900 // stack slot.
18901 MachineFunction &MF = DAG.getMachineFunction();
18902 unsigned MemSize = DstTy.getStoreSize();
18903 int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false);
18904 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
18905
18906 SDValue Chain = DAG.getEntryNode();
18907 SDValue Value = Op.getOperand(0);
18908 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
18909
18910 if (UnsignedFixup) {
18911 //
18912 // Conversion to unsigned i64 is implemented with a select,
18913 // depending on whether the source value fits in the range
18914 // of a signed i64. Let Thresh be the FP equivalent of
18915 // 0x8000000000000000ULL.
18916 //
18917 // Adjust i32 = (Value < Thresh) ? 0 : 0x80000000;
18918 // FistSrc = (Value < Thresh) ? Value : (Value - Thresh);
18919 // Fist-to-mem64 FistSrc
18920 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
18921 // to XOR'ing the high 32 bits with Adjust.
18922 //
18923 // Being a power of 2, Thresh is exactly representable in all FP formats.
18924 // For X87 we'd like to use the smallest FP type for this constant, but
18925 // for DAG type consistency we have to match the FP operand type.
18926
18927 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
18928 LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) APFloat::opStatus Status = APFloat::opOK;
18929 bool LosesInfo = false;
18930 if (TheVT == MVT::f64)
18931 // The rounding mode is irrelevant as the conversion should be exact.
18932 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
18933 &LosesInfo);
18934 else if (TheVT == MVT::f80)
18935 Status = Thresh.convert(APFloat::x87DoubleExtended(),
18936 APFloat::rmNearestTiesToEven, &LosesInfo);
18937
18938 assert(Status == APFloat::opOK && !LosesInfo &&((Status == APFloat::opOK && !LosesInfo && "FP conversion should have been exact"
) ? static_cast<void> (0) : __assert_fail ("Status == APFloat::opOK && !LosesInfo && \"FP conversion should have been exact\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18939, __PRETTY_FUNCTION__))
18939 "FP conversion should have been exact")((Status == APFloat::opOK && !LosesInfo && "FP conversion should have been exact"
) ? static_cast<void> (0) : __assert_fail ("Status == APFloat::opOK && !LosesInfo && \"FP conversion should have been exact\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18939, __PRETTY_FUNCTION__))
;
18940
18941 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
18942
18943 SDValue Cmp = DAG.getSetCC(DL,
18944 getSetCCResultType(DAG.getDataLayout(),
18945 *DAG.getContext(), TheVT),
18946 Value, ThreshVal, ISD::SETLT);
18947 Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
18948 DAG.getConstant(0, DL, MVT::i64),
18949 DAG.getConstant(APInt::getSignMask(64),
18950 DL, MVT::i64));
18951 SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal);
18952 Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(),
18953 *DAG.getContext(), TheVT),
18954 Value, ThreshVal, ISD::SETLT);
18955 Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub);
18956 }
18957
18958 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
18959
18960 // FIXME This causes a redundant load/store if the SSE-class value is already
18961 // in memory, such as if it is on the callstack.
18962 if (isScalarFPTypeInSSEReg(TheVT)) {
18963 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!")((DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!"
) ? static_cast<void> (0) : __assert_fail ("DstTy == MVT::i64 && \"Invalid FP_TO_SINT to lower!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18963, __PRETTY_FUNCTION__))
;
18964 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
18965 SDVTList Tys = DAG.getVTList(TheVT, MVT::Other);
18966 SDValue Ops[] = { Chain, StackSlot };
18967
18968 unsigned FLDSize = TheVT.getStoreSize();
18969 assert(FLDSize <= MemSize && "Stack slot not big enough")((FLDSize <= MemSize && "Stack slot not big enough"
) ? static_cast<void> (0) : __assert_fail ("FLDSize <= MemSize && \"Stack slot not big enough\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 18969, __PRETTY_FUNCTION__))
;
18970 MachineMemOperand *MMO = MF.getMachineMemOperand(
18971 MPI, MachineMemOperand::MOLoad, FLDSize, FLDSize);
18972 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
18973 Chain = Value.getValue(1);
18974 }
18975
18976 // Build the FP_TO_INT*_IN_MEM
18977 MachineMemOperand *MMO = MF.getMachineMemOperand(
18978 MPI, MachineMemOperand::MOStore, MemSize, MemSize);
18979 SDValue Ops[] = { Chain, Value, StackSlot };
18980 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
18981 DAG.getVTList(MVT::Other),
18982 Ops, DstTy, MMO);
18983
18984 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
18985
18986 // If we need an unsigned fixup, XOR the result with adjust.
18987 if (UnsignedFixup)
18988 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
18989
18990 return Res;
18991}
18992
18993static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
18994 const X86Subtarget &Subtarget) {
18995 MVT VT = Op.getSimpleValueType();
18996 SDValue In = Op.getOperand(0);
18997 MVT InVT = In.getSimpleValueType();
18998 SDLoc dl(Op);
18999 unsigned Opc = Op.getOpcode();
19000
19001 assert(VT.isVector() && InVT.isVector() && "Expected vector type")((VT.isVector() && InVT.isVector() && "Expected vector type"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && InVT.isVector() && \"Expected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19001, __PRETTY_FUNCTION__))
;
19002 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&(((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
"Unexpected extension opcode") ? static_cast<void> (0)
: __assert_fail ("(Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) && \"Unexpected extension opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19003, __PRETTY_FUNCTION__))
19003 "Unexpected extension opcode")(((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
"Unexpected extension opcode") ? static_cast<void> (0)
: __assert_fail ("(Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) && \"Unexpected extension opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19003, __PRETTY_FUNCTION__))
;
19004 assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&((VT.getVectorNumElements() == VT.getVectorNumElements() &&
"Expected same number of elements") ? static_cast<void>
(0) : __assert_fail ("VT.getVectorNumElements() == VT.getVectorNumElements() && \"Expected same number of elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19005, __PRETTY_FUNCTION__))
19005 "Expected same number of elements")((VT.getVectorNumElements() == VT.getVectorNumElements() &&
"Expected same number of elements") ? static_cast<void>
(0) : __assert_fail ("VT.getVectorNumElements() == VT.getVectorNumElements() && \"Expected same number of elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19005, __PRETTY_FUNCTION__))
;
19006 assert((VT.getVectorElementType() == MVT::i16 ||(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType
() == MVT::i32 || VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19009, __PRETTY_FUNCTION__))
19007 VT.getVectorElementType() == MVT::i32 ||(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType
() == MVT::i32 || VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19009, __PRETTY_FUNCTION__))
19008 VT.getVectorElementType() == MVT::i64) &&(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType
() == MVT::i32 || VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19009, __PRETTY_FUNCTION__))
19009 "Unexpected element type")(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType
() == MVT::i32 || VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19009, __PRETTY_FUNCTION__))
;
19010 assert((InVT.getVectorElementType() == MVT::i8 ||(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType
() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19013, __PRETTY_FUNCTION__))
19011 InVT.getVectorElementType() == MVT::i16 ||(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType
() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19013, __PRETTY_FUNCTION__))
19012 InVT.getVectorElementType() == MVT::i32) &&(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType
() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19013, __PRETTY_FUNCTION__))
19013 "Unexpected element type")(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType
() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19013, __PRETTY_FUNCTION__))
;
19014
19015 unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
19016
19017 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
19018 if (InVT == MVT::v8i8) {
19019 if (VT != MVT::v8i64)
19020 return SDValue();
19021
19022 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
19023 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
19024 return DAG.getNode(ExtendInVecOpc, dl, VT, In);
19025 }
19026
19027 if (Subtarget.hasInt256())
19028 return Op;
19029
19030 // Optimize vectors in AVX mode:
19031 //
19032 // v8i16 -> v8i32
19033 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
19034 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
19035 // Concat upper and lower parts.
19036 //
19037 // v4i32 -> v4i64
19038 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
19039 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
19040 // Concat upper and lower parts.
19041 //
19042 MVT HalfVT = VT.getHalfNumVectorElementsVT();
19043 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
19044
19045 // Short-circuit if we can determine that each 128-bit half is the same value.
19046 // Otherwise, this is difficult to match and optimize.
19047 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
19048 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
19049 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
19050
19051 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
19052 SDValue Undef = DAG.getUNDEF(InVT);
19053 bool NeedZero = Opc == ISD::ZERO_EXTEND;
19054 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
19055 OpHi = DAG.getBitcast(HalfVT, OpHi);
19056
19057 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
19058}
19059
19060// Helper to split and extend a v16i1 mask to v16i8 or v16i16.
19061static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
19062 const SDLoc &dl, SelectionDAG &DAG) {
19063 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.")(((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT."
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v16i8 || VT == MVT::v16i16) && \"Unexpected VT.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19063, __PRETTY_FUNCTION__))
;
19064 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19065 DAG.getIntPtrConstant(0, dl));
19066 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
19067 DAG.getIntPtrConstant(8, dl));
19068 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
19069 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
19070 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
19071 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19072}
19073
19074static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
19075 const X86Subtarget &Subtarget,
19076 SelectionDAG &DAG) {
19077 MVT VT = Op->getSimpleValueType(0);
19078 SDValue In = Op->getOperand(0);
19079 MVT InVT = In.getSimpleValueType();
19080 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!")((InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!"
) ? static_cast<void> (0) : __assert_fail ("InVT.getVectorElementType() == MVT::i1 && \"Unexpected input type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19080, __PRETTY_FUNCTION__))
;
19081 SDLoc DL(Op);
19082 unsigned NumElts = VT.getVectorNumElements();
19083
19084 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
19085 // avoids a constant pool load.
19086 if (VT.getVectorElementType() != MVT::i8) {
19087 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
19088 return DAG.getNode(ISD::SRL, DL, VT, Extend,
19089 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
19090 }
19091
19092 // Extend VT if BWI is not supported.
19093 MVT ExtVT = VT;
19094 if (!Subtarget.hasBWI()) {
19095 // If v16i32 is to be avoided, we'll need to split and concatenate.
19096 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
19097 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
19098
19099 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
19100 }
19101
19102 // Widen to 512-bits if VLX is not supported.
19103 MVT WideVT = ExtVT;
19104 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
19105 NumElts *= 512 / ExtVT.getSizeInBits();
19106 InVT = MVT::getVectorVT(MVT::i1, NumElts);
19107 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
19108 In, DAG.getIntPtrConstant(0, DL));
19109 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
19110 NumElts);
19111 }
19112
19113 SDValue One = DAG.getConstant(1, DL, WideVT);
19114 SDValue Zero = DAG.getConstant(0, DL, WideVT);
19115
19116 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
19117
19118 // Truncate if we had to extend above.
19119 if (VT != ExtVT) {
19120 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
19121 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
19122 }
19123
19124 // Extract back to 128/256-bit if we widened.
19125 if (WideVT != VT)
19126 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
19127 DAG.getIntPtrConstant(0, DL));
19128
19129 return SelectedVal;
19130}
19131
19132static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
19133 SelectionDAG &DAG) {
19134 SDValue In = Op.getOperand(0);
19135 MVT SVT = In.getSimpleValueType();
19136
19137 if (SVT.getVectorElementType() == MVT::i1)
19138 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
19139
19140 assert(Subtarget.hasAVX() && "Expected AVX support")((Subtarget.hasAVX() && "Expected AVX support") ? static_cast
<void> (0) : __assert_fail ("Subtarget.hasAVX() && \"Expected AVX support\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19140, __PRETTY_FUNCTION__))
;
19141 return LowerAVXExtend(Op, DAG, Subtarget);
19142}
19143
19144/// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
19145/// It makes use of the fact that vectors with enough leading sign/zero bits
19146/// prevent the PACKSS/PACKUS from saturating the results.
19147/// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
19148/// within each 128-bit lane.
19149static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
19150 const SDLoc &DL, SelectionDAG &DAG,
19151 const X86Subtarget &Subtarget) {
19152 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&(((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
"Unexpected PACK opcode") ? static_cast<void> (0) : __assert_fail
("(Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) && \"Unexpected PACK opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19153, __PRETTY_FUNCTION__))
19153 "Unexpected PACK opcode")(((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
"Unexpected PACK opcode") ? static_cast<void> (0) : __assert_fail
("(Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) && \"Unexpected PACK opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19153, __PRETTY_FUNCTION__))
;
19154 assert(DstVT.isVector() && "VT not a vector?")((DstVT.isVector() && "VT not a vector?") ? static_cast
<void> (0) : __assert_fail ("DstVT.isVector() && \"VT not a vector?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19154, __PRETTY_FUNCTION__))
;
19155
19156 // Requires SSE2 but AVX512 has fast vector truncate.
19157 if (!Subtarget.hasSSE2())
19158 return SDValue();
19159
19160 EVT SrcVT = In.getValueType();
19161
19162 // No truncation required, we might get here due to recursive calls.
19163 if (SrcVT == DstVT)
19164 return In;
19165
19166 // We only support vector truncation to 64bits or greater from a
19167 // 128bits or greater source.
19168 unsigned DstSizeInBits = DstVT.getSizeInBits();
19169 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
19170 if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
19171 return SDValue();
19172
19173 unsigned NumElems = SrcVT.getVectorNumElements();
19174 if (!isPowerOf2_32(NumElems))
19175 return SDValue();
19176
19177 LLVMContext &Ctx = *DAG.getContext();
19178 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation")((DstVT.getVectorNumElements() == NumElems && "Illegal truncation"
) ? static_cast<void> (0) : __assert_fail ("DstVT.getVectorNumElements() == NumElems && \"Illegal truncation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19178, __PRETTY_FUNCTION__))
;
19179 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation")((SrcSizeInBits > DstSizeInBits && "Illegal truncation"
) ? static_cast<void> (0) : __assert_fail ("SrcSizeInBits > DstSizeInBits && \"Illegal truncation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19179, __PRETTY_FUNCTION__))
;
19180
19181 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
19182
19183 // Pack to the largest type possible:
19184 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
19185 EVT InVT = MVT::i16, OutVT = MVT::i8;
19186 if (SrcVT.getScalarSizeInBits() > 16 &&
19187 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
19188 InVT = MVT::i32;
19189 OutVT = MVT::i16;
19190 }
19191
19192 // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
19193 if (SrcVT.is128BitVector()) {
19194 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
19195 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
19196 In = DAG.getBitcast(InVT, In);
19197 SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In);
19198 Res = extractSubVector(Res, 0, DAG, DL, 64);
19199 return DAG.getBitcast(DstVT, Res);
19200 }
19201
19202 // Extract lower/upper subvectors.
19203 unsigned NumSubElts = NumElems / 2;
19204 SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19205 SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2);
19206
19207 unsigned SubSizeInBits = SrcSizeInBits / 2;
19208 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
19209 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
19210
19211 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
19212 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
19213 Lo = DAG.getBitcast(InVT, Lo);
19214 Hi = DAG.getBitcast(InVT, Hi);
19215 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19216 return DAG.getBitcast(DstVT, Res);
19217 }
19218
19219 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
19220 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
19221 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
19222 Lo = DAG.getBitcast(InVT, Lo);
19223 Hi = DAG.getBitcast(InVT, Hi);
19224 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
19225
19226 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
19227 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
19228 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
19229 SmallVector<int, 64> Mask;
19230 int Scale = 64 / OutVT.getScalarSizeInBits();
19231 scaleShuffleMask<int>(Scale, ArrayRef<int>({ 0, 2, 1, 3 }), Mask);
19232 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
19233
19234 if (DstVT.is256BitVector())
19235 return DAG.getBitcast(DstVT, Res);
19236
19237 // If 512bit -> 128bit truncate another stage.
19238 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19239 Res = DAG.getBitcast(PackedVT, Res);
19240 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19241 }
19242
19243 // Recursively pack lower/upper subvectors, concat result and pack again.
19244 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater")((SrcSizeInBits >= 256 && "Expected 256-bit vector or greater"
) ? static_cast<void> (0) : __assert_fail ("SrcSizeInBits >= 256 && \"Expected 256-bit vector or greater\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19244, __PRETTY_FUNCTION__))
;
19245 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts);
19246 Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
19247 Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
19248
19249 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
19250 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
19251 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
19252}
19253
19254static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
19255 const X86Subtarget &Subtarget) {
19256
19257 SDLoc DL(Op);
19258 MVT VT = Op.getSimpleValueType();
19259 SDValue In = Op.getOperand(0);
19260 MVT InVT = In.getSimpleValueType();
19261
19262 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.")((VT.getVectorElementType() == MVT::i1 && "Unexpected vector type."
) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i1 && \"Unexpected vector type.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19262, __PRETTY_FUNCTION__))
;
19263
19264 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
19265 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
19266 if (InVT.getScalarSizeInBits() <= 16) {
19267 if (Subtarget.hasBWI()) {
19268 // legal, will go to VPMOVB2M, VPMOVW2M
19269 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19270 // We need to shift to get the lsb into sign position.
19271 // Shift packed bytes not supported natively, bitcast to word
19272 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
19273 In = DAG.getNode(ISD::SHL, DL, ExtVT,
19274 DAG.getBitcast(ExtVT, In),
19275 DAG.getConstant(ShiftInx, DL, ExtVT));
19276 In = DAG.getBitcast(InVT, In);
19277 }
19278 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
19279 In, ISD::SETGT);
19280 }
19281 // Use TESTD/Q, extended vector to packed dword/qword.
19282 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&(((InVT.is256BitVector() || InVT.is128BitVector()) &&
"Unexpected vector type.") ? static_cast<void> (0) : __assert_fail
("(InVT.is256BitVector() || InVT.is128BitVector()) && \"Unexpected vector type.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19283, __PRETTY_FUNCTION__))
19283 "Unexpected vector type.")(((InVT.is256BitVector() || InVT.is128BitVector()) &&
"Unexpected vector type.") ? static_cast<void> (0) : __assert_fail
("(InVT.is256BitVector() || InVT.is128BitVector()) && \"Unexpected vector type.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19283, __PRETTY_FUNCTION__))
;
19284 unsigned NumElts = InVT.getVectorNumElements();
19285 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements")(((NumElts == 8 || NumElts == 16) && "Unexpected number of elements"
) ? static_cast<void> (0) : __assert_fail ("(NumElts == 8 || NumElts == 16) && \"Unexpected number of elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19285, __PRETTY_FUNCTION__))
;
19286 // We need to change to a wider element type that we have support for.
19287 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
19288 // For 16 element vectors we extend to v16i32 unless we are explicitly
19289 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
19290 // we need to split into two 8 element vectors which we can extend to v8i32,
19291 // truncate and concat the results. There's an additional complication if
19292 // the original type is v16i8. In that case we can't split the v16i8 so
19293 // first we pre-extend it to v16i16 which we can split to v8i16, then extend
19294 // to v8i32, truncate that to v8i1 and concat the two halves.
19295 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
19296 if (InVT == MVT::v16i8) {
19297 // First we need to sign extend up to 256-bits so we can split that.
19298 InVT = MVT::v16i16;
19299 In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In);
19300 }
19301 SDValue Lo = extract128BitVector(In, 0, DAG, DL);
19302 SDValue Hi = extract128BitVector(In, 8, DAG, DL);
19303 // We're split now, just emit two truncates and a concat. The two
19304 // truncates will trigger legalization to come back to this function.
19305 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
19306 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
19307 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19308 }
19309 // We either have 8 elements or we're allowed to use 512-bit vectors.
19310 // If we have VLX, we want to use the narrowest vector that can get the
19311 // job done so we use vXi32.
19312 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
19313 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
19314 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
19315 InVT = ExtVT;
19316 ShiftInx = InVT.getScalarSizeInBits() - 1;
19317 }
19318
19319 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
19320 // We need to shift to get the lsb into sign position.
19321 In = DAG.getNode(ISD::SHL, DL, InVT, In,
19322 DAG.getConstant(ShiftInx, DL, InVT));
19323 }
19324 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
19325 if (Subtarget.hasDQI())
19326 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
19327 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
19328}
19329
19330SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
19331 SDLoc DL(Op);
19332 MVT VT = Op.getSimpleValueType();
19333 SDValue In = Op.getOperand(0);
19334 MVT InVT = In.getSimpleValueType();
19335 unsigned InNumEltBits = InVT.getScalarSizeInBits();
19336
19337 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&((VT.getVectorNumElements() == InVT.getVectorNumElements() &&
"Invalid TRUNCATE operation") ? static_cast<void> (0) :
__assert_fail ("VT.getVectorNumElements() == InVT.getVectorNumElements() && \"Invalid TRUNCATE operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19338, __PRETTY_FUNCTION__))
19338 "Invalid TRUNCATE operation")((VT.getVectorNumElements() == InVT.getVectorNumElements() &&
"Invalid TRUNCATE operation") ? static_cast<void> (0) :
__assert_fail ("VT.getVectorNumElements() == InVT.getVectorNumElements() && \"Invalid TRUNCATE operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19338, __PRETTY_FUNCTION__))
;
19339
19340 // If we're called by the type legalizer, handle a few cases.
19341 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19342 if (!TLI.isTypeLegal(InVT)) {
19343 if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
19344 VT.is128BitVector()) {
19345 assert(Subtarget.hasVLX() && "Unexpected subtarget!")((Subtarget.hasVLX() && "Unexpected subtarget!") ? static_cast
<void> (0) : __assert_fail ("Subtarget.hasVLX() && \"Unexpected subtarget!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19345, __PRETTY_FUNCTION__))
;
19346 // The default behavior is to truncate one step, concatenate, and then
19347 // truncate the remainder. We'd rather produce two 64-bit results and
19348 // concatenate those.
19349 SDValue Lo, Hi;
19350 std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
19351
19352 EVT LoVT, HiVT;
19353 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
19354
19355 Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
19356 Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
19357 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
19358 }
19359
19360 // Otherwise let default legalization handle it.
19361 return SDValue();
19362 }
19363
19364 if (VT.getVectorElementType() == MVT::i1)
19365 return LowerTruncateVecI1(Op, DAG, Subtarget);
19366
19367 // vpmovqb/w/d, vpmovdb/w, vpmovwb
19368 if (Subtarget.hasAVX512()) {
19369 // word to byte only under BWI. Otherwise we have to promoted to v16i32
19370 // and then truncate that. But we should only do that if we haven't been
19371 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
19372 // handled by isel patterns.
19373 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
19374 Subtarget.canExtendTo512DQ())
19375 return Op;
19376 }
19377
19378 unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
19379 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
19380
19381 // Truncate with PACKUS if we are truncating a vector with leading zero bits
19382 // that extend all the way to the packed/truncated value.
19383 // Pre-SSE41 we can only use PACKUSWB.
19384 KnownBits Known = DAG.computeKnownBits(In);
19385 if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
19386 if (SDValue V =
19387 truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
19388 return V;
19389
19390 // Truncate with PACKSS if we are truncating a vector with sign-bits that
19391 // extend all the way to the packed/truncated value.
19392 if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
19393 if (SDValue V =
19394 truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
19395 return V;
19396
19397 // Handle truncation of V256 to V128 using shuffles.
19398 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!")((VT.is128BitVector() && InVT.is256BitVector() &&
"Unexpected types!") ? static_cast<void> (0) : __assert_fail
("VT.is128BitVector() && InVT.is256BitVector() && \"Unexpected types!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19398, __PRETTY_FUNCTION__))
;
19399
19400 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
19401 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
19402 if (Subtarget.hasInt256()) {
19403 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
19404 In = DAG.getBitcast(MVT::v8i32, In);
19405 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
19406 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
19407 DAG.getIntPtrConstant(0, DL));
19408 }
19409
19410 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19411 DAG.getIntPtrConstant(0, DL));
19412 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19413 DAG.getIntPtrConstant(2, DL));
19414 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
19415 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
19416 static const int ShufMask[] = {0, 2, 4, 6};
19417 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
19418 }
19419
19420 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
19421 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
19422 if (Subtarget.hasInt256()) {
19423 In = DAG.getBitcast(MVT::v32i8, In);
19424
19425 // The PSHUFB mask:
19426 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
19427 -1, -1, -1, -1, -1, -1, -1, -1,
19428 16, 17, 20, 21, 24, 25, 28, 29,
19429 -1, -1, -1, -1, -1, -1, -1, -1 };
19430 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
19431 In = DAG.getBitcast(MVT::v4i64, In);
19432
19433 static const int ShufMask2[] = {0, 2, -1, -1};
19434 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
19435 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
19436 DAG.getIntPtrConstant(0, DL));
19437 return DAG.getBitcast(VT, In);
19438 }
19439
19440 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
19441 DAG.getIntPtrConstant(0, DL));
19442
19443 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
19444 DAG.getIntPtrConstant(4, DL));
19445
19446 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
19447 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
19448
19449 // The PSHUFB mask:
19450 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
19451 -1, -1, -1, -1, -1, -1, -1, -1};
19452
19453 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
19454 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
19455
19456 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
19457 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
19458
19459 // The MOVLHPS Mask:
19460 static const int ShufMask2[] = {0, 1, 4, 5};
19461 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
19462 return DAG.getBitcast(MVT::v8i16, res);
19463 }
19464
19465 if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
19466 // Use an AND to zero uppper bits for PACKUS.
19467 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
19468
19469 SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
19470 DAG.getIntPtrConstant(0, DL));
19471 SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
19472 DAG.getIntPtrConstant(8, DL));
19473 return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
19474 }
19475
19476 llvm_unreachable("All 256->128 cases should have been handled above!")::llvm::llvm_unreachable_internal("All 256->128 cases should have been handled above!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19476)
;
19477}
19478
19479SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
19480 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT;
19481 MVT VT = Op.getSimpleValueType();
19482 SDValue Src = Op.getOperand(0);
19483 MVT SrcVT = Src.getSimpleValueType();
19484 SDLoc dl(Op);
19485
19486 if (SrcVT == MVT::f128) {
19487 RTLIB::Libcall LC;
19488 if (Op.getOpcode() == ISD::FP_TO_SINT)
19489 LC = RTLIB::getFPTOSINT(SrcVT, VT);
19490 else
19491 LC = RTLIB::getFPTOUINT(SrcVT, VT);
19492
19493 MakeLibCallOptions CallOptions;
19494 return makeLibCall(DAG, LC, VT, Src, CallOptions, SDLoc(Op)).first;
19495 }
19496
19497 if (VT.isVector()) {
19498 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
19499 MVT ResVT = MVT::v4i32;
19500 MVT TruncVT = MVT::v4i1;
19501 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
19502 if (!IsSigned && !Subtarget.hasVLX()) {
19503 // Widen to 512-bits.
19504 ResVT = MVT::v8i32;
19505 TruncVT = MVT::v8i1;
19506 Opc = ISD::FP_TO_UINT;
19507 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64,
19508 DAG.getUNDEF(MVT::v8f64),
19509 Src, DAG.getIntPtrConstant(0, dl));
19510 }
19511 SDValue Res = DAG.getNode(Opc, dl, ResVT, Src);
19512 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
19513 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
19514 DAG.getIntPtrConstant(0, dl));
19515 }
19516
19517 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!")((Subtarget.hasDQI() && Subtarget.hasVLX() &&
"Requires AVX512DQVL!") ? static_cast<void> (0) : __assert_fail
("Subtarget.hasDQI() && Subtarget.hasVLX() && \"Requires AVX512DQVL!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19517, __PRETTY_FUNCTION__))
;
19518 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
19519 return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT,
19520 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
19521 DAG.getUNDEF(MVT::v2f32)));
19522 }
19523
19524 return SDValue();
19525 }
19526
19527 assert(!VT.isVector())((!VT.isVector()) ? static_cast<void> (0) : __assert_fail
("!VT.isVector()", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19527, __PRETTY_FUNCTION__))
;
19528
19529 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
19530
19531 if (!IsSigned && UseSSEReg) {
19532 // Conversions from f32/f64 with AVX512 should be legal.
19533 if (Subtarget.hasAVX512())
19534 return Op;
19535
19536 // Use default expansion for i64.
19537 if (VT == MVT::i64)
19538 return SDValue();
19539
19540 assert(VT == MVT::i32 && "Unexpected VT!")((VT == MVT::i32 && "Unexpected VT!") ? static_cast<
void> (0) : __assert_fail ("VT == MVT::i32 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19540, __PRETTY_FUNCTION__))
;
19541
19542 // Promote i32 to i64 and use a signed operation on 64-bit targets.
19543 if (Subtarget.is64Bit()) {
19544 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
19545 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19546 }
19547
19548 // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
19549 // use fisttp which will be handled later.
19550 if (!Subtarget.hasSSE3())
19551 return SDValue();
19552 }
19553
19554 // Promote i16 to i32 if we can use a SSE operation.
19555 if (VT == MVT::i16 && UseSSEReg) {
19556 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!")((IsSigned && "Expected i16 FP_TO_UINT to have been promoted!"
) ? static_cast<void> (0) : __assert_fail ("IsSigned && \"Expected i16 FP_TO_UINT to have been promoted!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19556, __PRETTY_FUNCTION__))
;
19557 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
19558 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
19559 }
19560
19561 // If this is a SINT_TO_FP using SSEReg we're done.
19562 if (UseSSEReg && IsSigned)
19563 return Op;
19564
19565 // Fall back to X87.
19566 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned))
19567 return V;
19568
19569 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.")::llvm::llvm_unreachable_internal("Expected FP_TO_INTHelper to handle all remaining cases."
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19569)
;
19570}
19571
19572SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
19573 SDLoc DL(Op);
19574 MVT VT = Op.getSimpleValueType();
19575 SDValue In = Op.getOperand(0);
19576 MVT SVT = In.getSimpleValueType();
19577
19578 if (VT == MVT::f128) {
19579 RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, VT);
19580 return LowerF128Call(Op, DAG, LC);
19581 }
19582
19583 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!")((SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!"
) ? static_cast<void> (0) : __assert_fail ("SVT == MVT::v2f32 && \"Only customize MVT::v2f32 type legalization!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19583, __PRETTY_FUNCTION__))
;
19584
19585 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
19586 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
19587 In, DAG.getUNDEF(SVT)));
19588}
19589
19590SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
19591 MVT VT = Op.getSimpleValueType();
19592 SDValue In = Op.getOperand(0);
19593 MVT SVT = In.getSimpleValueType();
19594
19595 // It's legal except when f128 is involved
19596 if (SVT != MVT::f128)
19597 return Op;
19598
19599 RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, VT);
19600
19601 // FP_ROUND node has a second operand indicating whether it is known to be
19602 // precise. That doesn't take part in the LibCall so we can't directly use
19603 // LowerF128Call.
19604 MakeLibCallOptions CallOptions;
19605 return makeLibCall(DAG, LC, VT, In, CallOptions, SDLoc(Op)).first;
19606}
19607
19608// FIXME: This is a hack to allow FP_ROUND to be marked Custom without breaking
19609// the default expansion of STRICT_FP_ROUND.
19610static SDValue LowerSTRICT_FP_ROUND(SDValue Op, SelectionDAG &DAG) {
19611 // FIXME: Need to form a libcall with an input chain for f128.
19612 assert(Op.getOperand(0).getValueType() != MVT::f128 &&((Op.getOperand(0).getValueType() != MVT::f128 && "Don't know how to handle f128 yet!"
) ? static_cast<void> (0) : __assert_fail ("Op.getOperand(0).getValueType() != MVT::f128 && \"Don't know how to handle f128 yet!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19613, __PRETTY_FUNCTION__))
19613 "Don't know how to handle f128 yet!")((Op.getOperand(0).getValueType() != MVT::f128 && "Don't know how to handle f128 yet!"
) ? static_cast<void> (0) : __assert_fail ("Op.getOperand(0).getValueType() != MVT::f128 && \"Don't know how to handle f128 yet!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19613, __PRETTY_FUNCTION__))
;
19614 return Op;
19615}
19616
19617/// Horizontal vector math instructions may be slower than normal math with
19618/// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
19619/// implementation, and likely shuffle complexity of the alternate sequence.
19620static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
19621 const X86Subtarget &Subtarget) {
19622 bool IsOptimizingSize = DAG.getMachineFunction().getFunction().hasOptSize();
19623 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
19624 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
19625}
19626
19627/// Depending on uarch and/or optimizing for size, we might prefer to use a
19628/// vector operation in place of the typical scalar operation.
19629static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
19630 const X86Subtarget &Subtarget) {
19631 // If both operands have other uses, this is probably not profitable.
19632 SDValue LHS = Op.getOperand(0);
19633 SDValue RHS = Op.getOperand(1);
19634 if (!LHS.hasOneUse() && !RHS.hasOneUse())
19635 return Op;
19636
19637 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
19638 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
19639 if (IsFP && !Subtarget.hasSSE3())
19640 return Op;
19641 if (!IsFP && !Subtarget.hasSSSE3())
19642 return Op;
19643
19644 // Extract from a common vector.
19645 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19646 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19647 LHS.getOperand(0) != RHS.getOperand(0) ||
19648 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
19649 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
19650 !shouldUseHorizontalOp(true, DAG, Subtarget))
19651 return Op;
19652
19653 // Allow commuted 'hadd' ops.
19654 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
19655 unsigned HOpcode;
19656 switch (Op.getOpcode()) {
19657 case ISD::ADD: HOpcode = X86ISD::HADD; break;
19658 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
19659 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
19660 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
19661 default:
19662 llvm_unreachable("Trying to lower unsupported opcode to horizontal op")::llvm::llvm_unreachable_internal("Trying to lower unsupported opcode to horizontal op"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19662)
;
19663 }
19664 unsigned LExtIndex = LHS.getConstantOperandVal(1);
19665 unsigned RExtIndex = RHS.getConstantOperandVal(1);
19666 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
19667 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
19668 std::swap(LExtIndex, RExtIndex);
19669
19670 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
19671 return Op;
19672
19673 SDValue X = LHS.getOperand(0);
19674 EVT VecVT = X.getValueType();
19675 unsigned BitWidth = VecVT.getSizeInBits();
19676 unsigned NumLanes = BitWidth / 128;
19677 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
19678 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&(((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
"Not expecting illegal vector widths here") ? static_cast<
void> (0) : __assert_fail ("(BitWidth == 128 || BitWidth == 256 || BitWidth == 512) && \"Not expecting illegal vector widths here\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19679, __PRETTY_FUNCTION__))
19679 "Not expecting illegal vector widths here")(((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
"Not expecting illegal vector widths here") ? static_cast<
void> (0) : __assert_fail ("(BitWidth == 128 || BitWidth == 256 || BitWidth == 512) && \"Not expecting illegal vector widths here\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19679, __PRETTY_FUNCTION__))
;
19680
19681 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
19682 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
19683 SDLoc DL(Op);
19684 if (BitWidth == 256 || BitWidth == 512) {
19685 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
19686 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
19687 LExtIndex %= NumEltsPerLane;
19688 }
19689
19690 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
19691 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
19692 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
19693 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
19694 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
19695 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
19696 DAG.getIntPtrConstant(LExtIndex / 2, DL));
19697}
19698
19699/// Depending on uarch and/or optimizing for size, we might prefer to use a
19700/// vector operation in place of the typical scalar operation.
19701SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
19702 if (Op.getValueType() == MVT::f128) {
19703 RTLIB::Libcall LC = Op.getOpcode() == ISD::FADD ? RTLIB::ADD_F128
19704 : RTLIB::SUB_F128;
19705 return LowerF128Call(Op, DAG, LC);
19706 }
19707
19708 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&(((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::
f64) && "Only expecting float/double") ? static_cast<
void> (0) : __assert_fail ("(Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) && \"Only expecting float/double\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19709, __PRETTY_FUNCTION__))
19709 "Only expecting float/double")(((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::
f64) && "Only expecting float/double") ? static_cast<
void> (0) : __assert_fail ("(Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) && \"Only expecting float/double\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19709, __PRETTY_FUNCTION__))
;
19710 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
19711}
19712
19713/// The only differences between FABS and FNEG are the mask and the logic op.
19714/// FNEG also has a folding opportunity for FNEG(FABS(x)).
19715static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
19716 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&(((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG
) && "Wrong opcode for lowering FABS or FNEG.") ? static_cast
<void> (0) : __assert_fail ("(Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) && \"Wrong opcode for lowering FABS or FNEG.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19717, __PRETTY_FUNCTION__))
19717 "Wrong opcode for lowering FABS or FNEG.")(((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG
) && "Wrong opcode for lowering FABS or FNEG.") ? static_cast
<void> (0) : __assert_fail ("(Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) && \"Wrong opcode for lowering FABS or FNEG.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19717, __PRETTY_FUNCTION__))
;
19718
19719 bool IsFABS = (Op.getOpcode() == ISD::FABS);
19720
19721 // If this is a FABS and it has an FNEG user, bail out to fold the combination
19722 // into an FNABS. We'll lower the FABS after that if it is still in use.
19723 if (IsFABS)
19724 for (SDNode *User : Op->uses())
19725 if (User->getOpcode() == ISD::FNEG)
19726 return Op;
19727
19728 SDLoc dl(Op);
19729 MVT VT = Op.getSimpleValueType();
19730
19731 bool IsF128 = (VT == MVT::f128);
19732 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT ==
MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT
::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFABSorFNEG"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFABSorFNEG\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19735, __PRETTY_FUNCTION__))
19733 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT ==
MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT
::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFABSorFNEG"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFABSorFNEG\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19735, __PRETTY_FUNCTION__))
19734 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT ==
MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT
::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFABSorFNEG"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFABSorFNEG\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19735, __PRETTY_FUNCTION__))
19735 "Unexpected type in LowerFABSorFNEG")(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT ==
MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT
::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFABSorFNEG"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFABSorFNEG\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19735, __PRETTY_FUNCTION__))
;
19736
19737 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
19738 // decide if we should generate a 16-byte constant mask when we only need 4 or
19739 // 8 bytes for the scalar case.
19740
19741 // There are no scalar bitwise logical SSE/AVX instructions, so we
19742 // generate a 16-byte vector constant and logic op even for the scalar case.
19743 // Using a 16-byte mask allows folding the load of the mask with
19744 // the logic op, so it can save (~4 bytes) on code size.
19745 bool IsFakeVector = !VT.isVector() && !IsF128;
19746 MVT LogicVT = VT;
19747 if (IsFakeVector)
19748 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
19749
19750 unsigned EltBits = VT.getScalarSizeInBits();
19751 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
19752 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
19753 APInt::getSignMask(EltBits);
19754 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
19755 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
19756
19757 SDValue Op0 = Op.getOperand(0);
19758 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
19759 unsigned LogicOp = IsFABS ? X86ISD::FAND :
19760 IsFNABS ? X86ISD::FOR :
19761 X86ISD::FXOR;
19762 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
19763
19764 if (VT.isVector() || IsF128)
19765 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
19766
19767 // For the scalar case extend to a 128-bit vector, perform the logic op,
19768 // and extract the scalar result back out.
19769 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
19770 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
19771 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
19772 DAG.getIntPtrConstant(0, dl));
19773}
19774
19775static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
19776 SDValue Mag = Op.getOperand(0);
19777 SDValue Sign = Op.getOperand(1);
19778 SDLoc dl(Op);
19779
19780 // If the sign operand is smaller, extend it first.
19781 MVT VT = Op.getSimpleValueType();
19782 if (Sign.getSimpleValueType().bitsLT(VT))
19783 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
19784
19785 // And if it is bigger, shrink it first.
19786 if (Sign.getSimpleValueType().bitsGT(VT))
19787 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
19788
19789 // At this point the operands and the result should have the same
19790 // type, and that won't be f80 since that is not custom lowered.
19791 bool IsF128 = (VT == MVT::f128);
19792 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT ==
MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT
::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFCOPYSIGN"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFCOPYSIGN\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19795, __PRETTY_FUNCTION__))
19793 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT ==
MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT
::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFCOPYSIGN"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFCOPYSIGN\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19795, __PRETTY_FUNCTION__))
19794 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT ==
MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT
::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFCOPYSIGN"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFCOPYSIGN\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19795, __PRETTY_FUNCTION__))
19795 "Unexpected type in LowerFCOPYSIGN")(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT ==
MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT
::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFCOPYSIGN"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFCOPYSIGN\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19795, __PRETTY_FUNCTION__))
;
19796
19797 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
19798
19799 // Perform all scalar logic operations as 16-byte vectors because there are no
19800 // scalar FP logic instructions in SSE.
19801 // TODO: This isn't necessary. If we used scalar types, we might avoid some
19802 // unnecessary splats, but we might miss load folding opportunities. Should
19803 // this decision be based on OptimizeForSize?
19804 bool IsFakeVector = !VT.isVector() && !IsF128;
19805 MVT LogicVT = VT;
19806 if (IsFakeVector)
19807 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
19808
19809 // The mask constants are automatically splatted for vector types.
19810 unsigned EltSizeInBits = VT.getScalarSizeInBits();
19811 SDValue SignMask = DAG.getConstantFP(
19812 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
19813 SDValue MagMask = DAG.getConstantFP(
19814 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
19815
19816 // First, clear all bits but the sign bit from the second operand (sign).
19817 if (IsFakeVector)
19818 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
19819 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
19820
19821 // Next, clear the sign bit from the first operand (magnitude).
19822 // TODO: If we had general constant folding for FP logic ops, this check
19823 // wouldn't be necessary.
19824 SDValue MagBits;
19825 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
19826 APFloat APF = Op0CN->getValueAPF();
19827 APF.clearSign();
19828 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
19829 } else {
19830 // If the magnitude operand wasn't a constant, we need to AND out the sign.
19831 if (IsFakeVector)
19832 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
19833 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
19834 }
19835
19836 // OR the magnitude value with the sign bit.
19837 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
19838 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
19839 DAG.getIntPtrConstant(0, dl));
19840}
19841
19842static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
19843 SDValue N0 = Op.getOperand(0);
19844 SDLoc dl(Op);
19845 MVT VT = Op.getSimpleValueType();
19846
19847 MVT OpVT = N0.getSimpleValueType();
19848 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&(((OpVT == MVT::f32 || OpVT == MVT::f64) && "Unexpected type for FGETSIGN"
) ? static_cast<void> (0) : __assert_fail ("(OpVT == MVT::f32 || OpVT == MVT::f64) && \"Unexpected type for FGETSIGN\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19849, __PRETTY_FUNCTION__))
19849 "Unexpected type for FGETSIGN")(((OpVT == MVT::f32 || OpVT == MVT::f64) && "Unexpected type for FGETSIGN"
) ? static_cast<void> (0) : __assert_fail ("(OpVT == MVT::f32 || OpVT == MVT::f64) && \"Unexpected type for FGETSIGN\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19849, __PRETTY_FUNCTION__))
;
19850
19851 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
19852 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
19853 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
19854 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
19855 Res = DAG.getZExtOrTrunc(Res, dl, VT);
19856 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
19857 return Res;
19858}
19859
19860/// Helper for creating a X86ISD::SETCC node.
19861static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
19862 SelectionDAG &DAG) {
19863 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
19864 DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
19865}
19866
19867/// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
19868/// style scalarized (associative) reduction patterns.
19869static bool matchBitOpReduction(SDValue Op, ISD::NodeType BinOp,
19870 SmallVectorImpl<SDValue> &SrcOps) {
19871 SmallVector<SDValue, 8> Opnds;
19872 DenseMap<SDValue, APInt> SrcOpMap;
19873 EVT VT = MVT::Other;
19874
19875 // Recognize a special case where a vector is casted into wide integer to
19876 // test all 0s.
19877 assert(Op.getOpcode() == unsigned(BinOp) &&((Op.getOpcode() == unsigned(BinOp) && "Unexpected bit reduction opcode"
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == unsigned(BinOp) && \"Unexpected bit reduction opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19878, __PRETTY_FUNCTION__))
19878 "Unexpected bit reduction opcode")((Op.getOpcode() == unsigned(BinOp) && "Unexpected bit reduction opcode"
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == unsigned(BinOp) && \"Unexpected bit reduction opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19878, __PRETTY_FUNCTION__))
;
19879 Opnds.push_back(Op.getOperand(0));
19880 Opnds.push_back(Op.getOperand(1));
19881
19882 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
19883 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
19884 // BFS traverse all BinOp operands.
19885 if (I->getOpcode() == unsigned(BinOp)) {
19886 Opnds.push_back(I->getOperand(0));
19887 Opnds.push_back(I->getOperand(1));
19888 // Re-evaluate the number of nodes to be traversed.
19889 e += 2; // 2 more nodes (LHS and RHS) are pushed.
19890 continue;
19891 }
19892
19893 // Quit if a non-EXTRACT_VECTOR_ELT
19894 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
19895 return false;
19896
19897 // Quit if without a constant index.
19898 SDValue Idx = I->getOperand(1);
19899 if (!isa<ConstantSDNode>(Idx))
19900 return false;
19901
19902 SDValue Src = I->getOperand(0);
19903 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
19904 if (M == SrcOpMap.end()) {
19905 VT = Src.getValueType();
19906 // Quit if not the same type.
19907 if (SrcOpMap.begin() != SrcOpMap.end() &&
19908 VT != SrcOpMap.begin()->first.getValueType())
19909 return false;
19910 unsigned NumElts = VT.getVectorNumElements();
19911 APInt EltCount = APInt::getNullValue(NumElts);
19912 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
19913 SrcOps.push_back(Src);
19914 }
19915 // Quit if element already used.
19916 unsigned CIdx = cast<ConstantSDNode>(Idx)->getZExtValue();
19917 if (M->second[CIdx])
19918 return false;
19919 M->second.setBit(CIdx);
19920 }
19921
19922 // Quit if not all elements are used.
19923 for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
19924 E = SrcOpMap.end();
19925 I != E; ++I) {
19926 if (!I->second.isAllOnesValue())
19927 return false;
19928 }
19929
19930 return true;
19931}
19932
19933// Check whether an OR'd tree is PTEST-able.
19934static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
19935 const X86Subtarget &Subtarget,
19936 SelectionDAG &DAG, SDValue &X86CC) {
19937 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.")((Op.getOpcode() == ISD::OR && "Only check OR'd tree."
) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::OR && \"Only check OR'd tree.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 19937, __PRETTY_FUNCTION__))
;
19938
19939 if (!Subtarget.hasSSE41() || !Op->hasOneUse())
19940 return SDValue();
19941
19942 SmallVector<SDValue, 8> VecIns;
19943 if (!matchBitOpReduction(Op, ISD::OR, VecIns))
19944 return SDValue();
19945
19946 // Quit if not 128/256-bit vector.
19947 EVT VT = VecIns[0].getValueType();
19948 if (!VT.is128BitVector() && !VT.is256BitVector())
19949 return SDValue();
19950
19951 SDLoc DL(Op);
19952 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
19953
19954 // Cast all vectors into TestVT for PTEST.
19955 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
19956 VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]);
19957
19958 // If more than one full vector is evaluated, OR them first before PTEST.
19959 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
19960 // Each iteration will OR 2 nodes and append the result until there is only
19961 // 1 node left, i.e. the final OR'd value of all vectors.
19962 SDValue LHS = VecIns[Slot];
19963 SDValue RHS = VecIns[Slot + 1];
19964 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
19965 }
19966
19967 X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE,
19968 DL, MVT::i8);
19969 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
19970}
19971
19972/// return true if \c Op has a use that doesn't just read flags.
19973static bool hasNonFlagsUse(SDValue Op) {
19974 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
19975 ++UI) {
19976 SDNode *User = *UI;
19977 unsigned UOpNo = UI.getOperandNo();
19978 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
19979 // Look pass truncate.
19980 UOpNo = User->use_begin().getOperandNo();
19981 User = *User->use_begin();
19982 }
19983
19984 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
19985 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
19986 return true;
19987 }
19988 return false;
19989}
19990
19991/// Emit nodes that will be selected as "test Op0,Op0", or something
19992/// equivalent.
19993static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
19994 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
19995 // CF and OF aren't always set the way we want. Determine which
19996 // of these we need.
19997 bool NeedCF = false;
19998 bool NeedOF = false;
19999 switch (X86CC) {
20000 default: break;
20001 case X86::COND_A: case X86::COND_AE:
20002 case X86::COND_B: case X86::COND_BE:
20003 NeedCF = true;
20004 break;
20005 case X86::COND_G: case X86::COND_GE:
20006 case X86::COND_L: case X86::COND_LE:
20007 case X86::COND_O: case X86::COND_NO: {
20008 // Check if we really need to set the
20009 // Overflow flag. If NoSignedWrap is present
20010 // that is not actually needed.
20011 switch (Op->getOpcode()) {
20012 case ISD::ADD:
20013 case ISD::SUB:
20014 case ISD::MUL:
20015 case ISD::SHL:
20016 if (Op.getNode()->getFlags().hasNoSignedWrap())
20017 break;
20018 LLVM_FALLTHROUGH[[gnu::fallthrough]];
20019 default:
20020 NeedOF = true;
20021 break;
20022 }
20023 break;
20024 }
20025 }
20026 // See if we can use the EFLAGS value from the operand instead of
20027 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
20028 // we prove that the arithmetic won't overflow, we can't use OF or CF.
20029 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
20030 // Emit a CMP with 0, which is the TEST pattern.
20031 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20032 DAG.getConstant(0, dl, Op.getValueType()));
20033 }
20034 unsigned Opcode = 0;
20035 unsigned NumOperands = 0;
20036
20037 SDValue ArithOp = Op;
20038
20039 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
20040 // which may be the result of a CAST. We use the variable 'Op', which is the
20041 // non-casted variable when we check for possible users.
20042 switch (ArithOp.getOpcode()) {
20043 case ISD::AND:
20044 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
20045 // because a TEST instruction will be better.
20046 if (!hasNonFlagsUse(Op))
20047 break;
20048
20049 LLVM_FALLTHROUGH[[gnu::fallthrough]];
20050 case ISD::ADD:
20051 case ISD::SUB:
20052 case ISD::OR:
20053 case ISD::XOR:
20054 // Transform to an x86-specific ALU node with flags if there is a chance of
20055 // using an RMW op or only the flags are used. Otherwise, leave
20056 // the node alone and emit a 'test' instruction.
20057 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
20058 UE = Op.getNode()->use_end(); UI != UE; ++UI)
20059 if (UI->getOpcode() != ISD::CopyToReg &&
20060 UI->getOpcode() != ISD::SETCC &&
20061 UI->getOpcode() != ISD::STORE)
20062 goto default_case;
20063
20064 // Otherwise use a regular EFLAGS-setting instruction.
20065 switch (ArithOp.getOpcode()) {
20066 default: llvm_unreachable("unexpected operator!")::llvm::llvm_unreachable_internal("unexpected operator!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20066)
;
20067 case ISD::ADD: Opcode = X86ISD::ADD; break;
20068 case ISD::SUB: Opcode = X86ISD::SUB; break;
20069 case ISD::XOR: Opcode = X86ISD::XOR; break;
20070 case ISD::AND: Opcode = X86ISD::AND; break;
20071 case ISD::OR: Opcode = X86ISD::OR; break;
20072 }
20073
20074 NumOperands = 2;
20075 break;
20076 case X86ISD::ADD:
20077 case X86ISD::SUB:
20078 case X86ISD::OR:
20079 case X86ISD::XOR:
20080 case X86ISD::AND:
20081 return SDValue(Op.getNode(), 1);
20082 case ISD::SSUBO:
20083 case ISD::USUBO: {
20084 // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
20085 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20086 return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
20087 Op->getOperand(1)).getValue(1);
20088 }
20089 default:
20090 default_case:
20091 break;
20092 }
20093
20094 if (Opcode == 0) {
20095 // Emit a CMP with 0, which is the TEST pattern.
20096 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
20097 DAG.getConstant(0, dl, Op.getValueType()));
20098 }
20099 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
20100 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
20101
20102 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
20103 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
20104 return SDValue(New.getNode(), 1);
20105}
20106
20107/// Emit nodes that will be selected as "cmp Op0,Op1", or something
20108/// equivalent.
20109SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
20110 const SDLoc &dl, SelectionDAG &DAG) const {
20111 if (isNullConstant(Op1))
20112 return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
20113
20114 EVT CmpVT = Op0.getValueType();
20115
20116 if (CmpVT.isFloatingPoint())
20117 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
20118
20119 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||(((CmpVT == MVT::i8 || CmpVT == MVT::i16 || CmpVT == MVT::i32
|| CmpVT == MVT::i64) && "Unexpected VT!") ? static_cast
<void> (0) : __assert_fail ("(CmpVT == MVT::i8 || CmpVT == MVT::i16 || CmpVT == MVT::i32 || CmpVT == MVT::i64) && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20120, __PRETTY_FUNCTION__))
20120 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!")(((CmpVT == MVT::i8 || CmpVT == MVT::i16 || CmpVT == MVT::i32
|| CmpVT == MVT::i64) && "Unexpected VT!") ? static_cast
<void> (0) : __assert_fail ("(CmpVT == MVT::i8 || CmpVT == MVT::i16 || CmpVT == MVT::i32 || CmpVT == MVT::i64) && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20120, __PRETTY_FUNCTION__))
;
20121
20122 // Only promote the compare up to I32 if it is a 16 bit operation
20123 // with an immediate. 16 bit immediates are to be avoided.
20124 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
20125 !DAG.getMachineFunction().getFunction().hasMinSize()) {
20126 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
20127 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
20128 // Don't do this if the immediate can fit in 8-bits.
20129 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
20130 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
20131 unsigned ExtendOp =
20132 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
20133 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
20134 // For equality comparisons try to use SIGN_EXTEND if the input was
20135 // truncate from something with enough sign bits.
20136 if (Op0.getOpcode() == ISD::TRUNCATE) {
20137 SDValue In = Op0.getOperand(0);
20138 unsigned EffBits =
20139 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20140 if (EffBits <= 16)
20141 ExtendOp = ISD::SIGN_EXTEND;
20142 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
20143 SDValue In = Op1.getOperand(0);
20144 unsigned EffBits =
20145 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
20146 if (EffBits <= 16)
20147 ExtendOp = ISD::SIGN_EXTEND;
20148 }
20149 }
20150
20151 CmpVT = MVT::i32;
20152 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
20153 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
20154 }
20155 }
20156 // Use SUB instead of CMP to enable CSE between SUB and CMP.
20157 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
20158 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
20159 return Sub.getValue(1);
20160}
20161
20162/// Convert a comparison if required by the subtarget.
20163SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
20164 SelectionDAG &DAG) const {
20165 // If the subtarget does not support the FUCOMI instruction, floating-point
20166 // comparisons have to be converted.
20167 if (Subtarget.hasCMov() ||
20168 Cmp.getOpcode() != X86ISD::CMP ||
20169 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
20170 !Cmp.getOperand(1).getValueType().isFloatingPoint())
20171 return Cmp;
20172
20173 // The instruction selector will select an FUCOM instruction instead of
20174 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
20175 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
20176 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
20177 SDLoc dl(Cmp);
20178 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
20179 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
20180 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
20181 DAG.getConstant(8, dl, MVT::i8));
20182 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
20183
20184 // Some 64-bit targets lack SAHF support, but they do support FCOMI.
20185 assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?")((Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasLAHFSAHF() && \"Target doesn't support SAHF or FCOMI?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20185, __PRETTY_FUNCTION__))
;
20186 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
20187}
20188
20189/// Check if replacement of SQRT with RSQRT should be disabled.
20190bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
20191 EVT VT = Op.getValueType();
20192
20193 // We never want to use both SQRT and RSQRT instructions for the same input.
20194 if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
20195 return false;
20196
20197 if (VT.isVector())
20198 return Subtarget.hasFastVectorFSQRT();
20199 return Subtarget.hasFastScalarFSQRT();
20200}
20201
20202/// The minimum architected relative accuracy is 2^-12. We need one
20203/// Newton-Raphson step to have a good float result (24 bits of precision).
20204SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
20205 SelectionDAG &DAG, int Enabled,
20206 int &RefinementSteps,
20207 bool &UseOneConstNR,
20208 bool Reciprocal) const {
20209 EVT VT = Op.getValueType();
20210
20211 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
20212 // It is likely not profitable to do this for f64 because a double-precision
20213 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
20214 // instructions: convert to single, rsqrtss, convert back to double, refine
20215 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
20216 // along with FMA, this could be a throughput win.
20217 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
20218 // after legalize types.
20219 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20220 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
20221 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
20222 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20223 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20224 if (RefinementSteps == ReciprocalEstimate::Unspecified)
20225 RefinementSteps = 1;
20226
20227 UseOneConstNR = false;
20228 // There is no FSQRT for 512-bits, but there is RSQRT14.
20229 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
20230 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
20231 }
20232 return SDValue();
20233}
20234
20235/// The minimum architected relative accuracy is 2^-12. We need one
20236/// Newton-Raphson step to have a good float result (24 bits of precision).
20237SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
20238 int Enabled,
20239 int &RefinementSteps) const {
20240 EVT VT = Op.getValueType();
20241
20242 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
20243 // It is likely not profitable to do this for f64 because a double-precision
20244 // reciprocal estimate with refinement on x86 prior to FMA requires
20245 // 15 instructions: convert to single, rcpss, convert back to double, refine
20246 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
20247 // along with FMA, this could be a throughput win.
20248
20249 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
20250 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
20251 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
20252 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
20253 // Enable estimate codegen with 1 refinement step for vector division.
20254 // Scalar division estimates are disabled because they break too much
20255 // real-world code. These defaults are intended to match GCC behavior.
20256 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
20257 return SDValue();
20258
20259 if (RefinementSteps == ReciprocalEstimate::Unspecified)
20260 RefinementSteps = 1;
20261
20262 // There is no FSQRT for 512-bits, but there is RCP14.
20263 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
20264 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
20265 }
20266 return SDValue();
20267}
20268
20269/// If we have at least two divisions that use the same divisor, convert to
20270/// multiplication by a reciprocal. This may need to be adjusted for a given
20271/// CPU if a division's cost is not at least twice the cost of a multiplication.
20272/// This is because we still need one division to calculate the reciprocal and
20273/// then we need two multiplies by that reciprocal as replacements for the
20274/// original divisions.
20275unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
20276 return 2;
20277}
20278
20279SDValue
20280X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
20281 SelectionDAG &DAG,
20282 SmallVectorImpl<SDNode *> &Created) const {
20283 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
20284 if (isIntDivCheap(N->getValueType(0), Attr))
20285 return SDValue(N,0); // Lower SDIV as SDIV
20286
20287 assert((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&(((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&
"Unexpected divisor!") ? static_cast<void> (0) : __assert_fail
("(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) && \"Unexpected divisor!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20288, __PRETTY_FUNCTION__))
20288 "Unexpected divisor!")(((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&
"Unexpected divisor!") ? static_cast<void> (0) : __assert_fail
("(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) && \"Unexpected divisor!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20288, __PRETTY_FUNCTION__))
;
20289
20290 // Only perform this transform if CMOV is supported otherwise the select
20291 // below will become a branch.
20292 if (!Subtarget.hasCMov())
20293 return SDValue();
20294
20295 // fold (sdiv X, pow2)
20296 EVT VT = N->getValueType(0);
20297 // FIXME: Support i8.
20298 if (VT != MVT::i16 && VT != MVT::i32 &&
20299 !(Subtarget.is64Bit() && VT == MVT::i64))
20300 return SDValue();
20301
20302 unsigned Lg2 = Divisor.countTrailingZeros();
20303
20304 // If the divisor is 2 or -2, the default expansion is better.
20305 if (Lg2 == 1)
20306 return SDValue();
20307
20308 SDLoc DL(N);
20309 SDValue N0 = N->getOperand(0);
20310 SDValue Zero = DAG.getConstant(0, DL, VT);
20311 APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
20312 SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
20313
20314 // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
20315 SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
20316 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
20317 SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
20318
20319 Created.push_back(Cmp.getNode());
20320 Created.push_back(Add.getNode());
20321 Created.push_back(CMov.getNode());
20322
20323 // Divide by pow2.
20324 SDValue SRA =
20325 DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i64));
20326
20327 // If we're dividing by a positive value, we're done. Otherwise, we must
20328 // negate the result.
20329 if (Divisor.isNonNegative())
20330 return SRA;
20331
20332 Created.push_back(SRA.getNode());
20333 return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
20334}
20335
20336/// Result of 'and' is compared against zero. Change to a BT node if possible.
20337/// Returns the BT node and the condition code needed to use it.
20338static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
20339 const SDLoc &dl, SelectionDAG &DAG,
20340 SDValue &X86CC) {
20341 assert(And.getOpcode() == ISD::AND && "Expected AND node!")((And.getOpcode() == ISD::AND && "Expected AND node!"
) ? static_cast<void> (0) : __assert_fail ("And.getOpcode() == ISD::AND && \"Expected AND node!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20341, __PRETTY_FUNCTION__))
;
20342 SDValue Op0 = And.getOperand(0);
20343 SDValue Op1 = And.getOperand(1);
20344 if (Op0.getOpcode() == ISD::TRUNCATE)
20345 Op0 = Op0.getOperand(0);
20346 if (Op1.getOpcode() == ISD::TRUNCATE)
20347 Op1 = Op1.getOperand(0);
20348
20349 SDValue Src, BitNo;
20350 if (Op1.getOpcode() == ISD::SHL)
20351 std::swap(Op0, Op1);
20352 if (Op0.getOpcode() == ISD::SHL) {
20353 if (isOneConstant(Op0.getOperand(0))) {
20354 // If we looked past a truncate, check that it's only truncating away
20355 // known zeros.
20356 unsigned BitWidth = Op0.getValueSizeInBits();
20357 unsigned AndBitWidth = And.getValueSizeInBits();
20358 if (BitWidth > AndBitWidth) {
20359 KnownBits Known = DAG.computeKnownBits(Op0);
20360 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
20361 return SDValue();
20362 }
20363 Src = Op1;
20364 BitNo = Op0.getOperand(1);
20365 }
20366 } else if (Op1.getOpcode() == ISD::Constant) {
20367 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
20368 uint64_t AndRHSVal = AndRHS->getZExtValue();
20369 SDValue AndLHS = Op0;
20370
20371 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
20372 Src = AndLHS.getOperand(0);
20373 BitNo = AndLHS.getOperand(1);
20374 } else {
20375 // Use BT if the immediate can't be encoded in a TEST instruction or we
20376 // are optimizing for size and the immedaite won't fit in a byte.
20377 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
20378 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
20379 isPowerOf2_64(AndRHSVal)) {
20380 Src = AndLHS;
20381 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
20382 Src.getValueType());
20383 }
20384 }
20385 }
20386
20387 // No patterns found, give up.
20388 if (!Src.getNode())
20389 return SDValue();
20390
20391 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
20392 // instruction. Since the shift amount is in-range-or-undefined, we know
20393 // that doing a bittest on the i32 value is ok. We extend to i32 because
20394 // the encoding for the i16 version is larger than the i32 version.
20395 // Also promote i16 to i32 for performance / code size reason.
20396 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
20397 Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
20398
20399 // See if we can use the 32-bit instruction instead of the 64-bit one for a
20400 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
20401 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
20402 // known to be zero.
20403 if (Src.getValueType() == MVT::i64 &&
20404 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
20405 Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
20406
20407 // If the operand types disagree, extend the shift amount to match. Since
20408 // BT ignores high bits (like shifts) we can use anyextend.
20409 if (Src.getValueType() != BitNo.getValueType())
20410 BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
20411
20412 X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
20413 dl, MVT::i8);
20414 return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
20415}
20416
20417/// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
20418/// CMPs.
20419static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
20420 SDValue &Op1) {
20421 unsigned SSECC;
20422 bool Swap = false;
20423
20424 // SSE Condition code mapping:
20425 // 0 - EQ
20426 // 1 - LT
20427 // 2 - LE
20428 // 3 - UNORD
20429 // 4 - NEQ
20430 // 5 - NLT
20431 // 6 - NLE
20432 // 7 - ORD
20433 switch (SetCCOpcode) {
20434 default: llvm_unreachable("Unexpected SETCC condition")::llvm::llvm_unreachable_internal("Unexpected SETCC condition"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20434)
;
20435 case ISD::SETOEQ:
20436 case ISD::SETEQ: SSECC = 0; break;
20437 case ISD::SETOGT:
20438 case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]];
20439 case ISD::SETLT:
20440 case ISD::SETOLT: SSECC = 1; break;
20441 case ISD::SETOGE:
20442 case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]];
20443 case ISD::SETLE:
20444 case ISD::SETOLE: SSECC = 2; break;
20445 case ISD::SETUO: SSECC = 3; break;
20446 case ISD::SETUNE:
20447 case ISD::SETNE: SSECC = 4; break;
20448 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]];
20449 case ISD::SETUGE: SSECC = 5; break;
20450 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH[[gnu::fallthrough]];
20451 case ISD::SETUGT: SSECC = 6; break;
20452 case ISD::SETO: SSECC = 7; break;
20453 case ISD::SETUEQ: SSECC = 8; break;
20454 case ISD::SETONE: SSECC = 12; break;
20455 }
20456 if (Swap)
20457 std::swap(Op0, Op1);
20458
20459 return SSECC;
20460}
20461
20462/// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
20463/// concatenate the result back.
20464static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
20465 MVT VT = Op.getSimpleValueType();
20466
20467 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&((VT.is256BitVector() && Op.getOpcode() == ISD::SETCC
&& "Unsupported value type for operation") ? static_cast
<void> (0) : __assert_fail ("VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && \"Unsupported value type for operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20468, __PRETTY_FUNCTION__))
20468 "Unsupported value type for operation")((VT.is256BitVector() && Op.getOpcode() == ISD::SETCC
&& "Unsupported value type for operation") ? static_cast
<void> (0) : __assert_fail ("VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && \"Unsupported value type for operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20468, __PRETTY_FUNCTION__))
;
20469
20470 unsigned NumElems = VT.getVectorNumElements();
20471 SDLoc dl(Op);
20472 SDValue CC = Op.getOperand(2);
20473
20474 // Extract the LHS vectors
20475 SDValue LHS = Op.getOperand(0);
20476 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
20477 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
20478
20479 // Extract the RHS vectors
20480 SDValue RHS = Op.getOperand(1);
20481 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
20482 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
20483
20484 // Issue the operation on the smaller types and concatenate the result back
20485 MVT EltVT = VT.getVectorElementType();
20486 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
20487 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
20488 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
20489 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
20490}
20491
20492static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
20493
20494 SDValue Op0 = Op.getOperand(0);
20495 SDValue Op1 = Op.getOperand(1);
20496 SDValue CC = Op.getOperand(2);
20497 MVT VT = Op.getSimpleValueType();
20498 SDLoc dl(Op);
20499
20500 assert(VT.getVectorElementType() == MVT::i1 &&((VT.getVectorElementType() == MVT::i1 && "Cannot set masked compare for this operation"
) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i1 && \"Cannot set masked compare for this operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20501, __PRETTY_FUNCTION__))
20501 "Cannot set masked compare for this operation")((VT.getVectorElementType() == MVT::i1 && "Cannot set masked compare for this operation"
) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i1 && \"Cannot set masked compare for this operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20501, __PRETTY_FUNCTION__))
;
20502
20503 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
20504
20505 // Prefer SETGT over SETLT.
20506 if (SetCCOpcode == ISD::SETLT) {
20507 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
20508 std::swap(Op0, Op1);
20509 }
20510
20511 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
20512}
20513
20514/// Given a buildvector constant, return a new vector constant with each element
20515/// incremented or decremented. If incrementing or decrementing would result in
20516/// unsigned overflow or underflow or this is not a simple vector constant,
20517/// return an empty value.
20518static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
20519 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
20520 if (!BV)
20521 return SDValue();
20522
20523 MVT VT = V.getSimpleValueType();
20524 MVT EltVT = VT.getVectorElementType();
20525 unsigned NumElts = VT.getVectorNumElements();
20526 SmallVector<SDValue, 8> NewVecC;
20527 SDLoc DL(V);
20528 for (unsigned i = 0; i < NumElts; ++i) {
20529 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
20530 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
20531 return SDValue();
20532
20533 // Avoid overflow/underflow.
20534 const APInt &EltC = Elt->getAPIntValue();
20535 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
20536 return SDValue();
20537
20538 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
20539 }
20540
20541 return DAG.getBuildVector(VT, DL, NewVecC);
20542}
20543
20544/// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
20545/// Op0 u<= Op1:
20546/// t = psubus Op0, Op1
20547/// pcmpeq t, <0..0>
20548static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
20549 ISD::CondCode Cond, const SDLoc &dl,
20550 const X86Subtarget &Subtarget,
20551 SelectionDAG &DAG) {
20552 if (!Subtarget.hasSSE2())
20553 return SDValue();
20554
20555 MVT VET = VT.getVectorElementType();
20556 if (VET != MVT::i8 && VET != MVT::i16)
20557 return SDValue();
20558
20559 switch (Cond) {
20560 default:
20561 return SDValue();
20562 case ISD::SETULT: {
20563 // If the comparison is against a constant we can turn this into a
20564 // setule. With psubus, setule does not require a swap. This is
20565 // beneficial because the constant in the register is no longer
20566 // destructed as the destination so it can be hoisted out of a loop.
20567 // Only do this pre-AVX since vpcmp* is no longer destructive.
20568 if (Subtarget.hasAVX())
20569 return SDValue();
20570 SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
20571 if (!ULEOp1)
20572 return SDValue();
20573 Op1 = ULEOp1;
20574 break;
20575 }
20576 case ISD::SETUGT: {
20577 // If the comparison is against a constant, we can turn this into a setuge.
20578 // This is beneficial because materializing a constant 0 for the PCMPEQ is
20579 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
20580 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
20581 SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
20582 if (!UGEOp1)
20583 return SDValue();
20584 Op1 = Op0;
20585 Op0 = UGEOp1;
20586 break;
20587 }
20588 // Psubus is better than flip-sign because it requires no inversion.
20589 case ISD::SETUGE:
20590 std::swap(Op0, Op1);
20591 break;
20592 case ISD::SETULE:
20593 break;
20594 }
20595
20596 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
20597 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
20598 DAG.getConstant(0, dl, VT));
20599}
20600
20601static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
20602 SelectionDAG &DAG) {
20603 SDValue Op0 = Op.getOperand(0);
20604 SDValue Op1 = Op.getOperand(1);
20605 SDValue CC = Op.getOperand(2);
20606 MVT VT = Op.getSimpleValueType();
20607 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
20608 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
20609 SDLoc dl(Op);
20610
20611 if (isFP) {
20612#ifndef NDEBUG
20613 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
20614 assert(EltVT == MVT::f32 || EltVT == MVT::f64)((EltVT == MVT::f32 || EltVT == MVT::f64) ? static_cast<void
> (0) : __assert_fail ("EltVT == MVT::f32 || EltVT == MVT::f64"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20614, __PRETTY_FUNCTION__))
;
20615#endif
20616
20617 unsigned Opc;
20618 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) {
20619 assert(VT.getVectorNumElements() <= 16)((VT.getVectorNumElements() <= 16) ? static_cast<void>
(0) : __assert_fail ("VT.getVectorNumElements() <= 16", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20619, __PRETTY_FUNCTION__))
;
20620 Opc = X86ISD::CMPM;
20621 } else {
20622 Opc = X86ISD::CMPP;
20623 // The SSE/AVX packed FP comparison nodes are defined with a
20624 // floating-point vector result that matches the operand type. This allows
20625 // them to work with an SSE1 target (integer vector types are not legal).
20626 VT = Op0.getSimpleValueType();
20627 }
20628
20629 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
20630 // emit two comparisons and a logic op to tie them together.
20631 SDValue Cmp;
20632 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1);
20633 if (SSECC >= 8 && !Subtarget.hasAVX()) {
20634 // LLVM predicate is SETUEQ or SETONE.
20635 unsigned CC0, CC1;
20636 unsigned CombineOpc;
20637 if (Cond == ISD::SETUEQ) {
20638 CC0 = 3; // UNORD
20639 CC1 = 0; // EQ
20640 CombineOpc = X86ISD::FOR;
20641 } else {
20642 assert(Cond == ISD::SETONE)((Cond == ISD::SETONE) ? static_cast<void> (0) : __assert_fail
("Cond == ISD::SETONE", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20642, __PRETTY_FUNCTION__))
;
20643 CC0 = 7; // ORD
20644 CC1 = 4; // NEQ
20645 CombineOpc = X86ISD::FAND;
20646 }
20647
20648 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
20649 DAG.getTargetConstant(CC0, dl, MVT::i8));
20650 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
20651 DAG.getTargetConstant(CC1, dl, MVT::i8));
20652 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
20653 } else {
20654 // Handle all other FP comparisons here.
20655 Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1,
20656 DAG.getTargetConstant(SSECC, dl, MVT::i8));
20657 }
20658
20659 // If this is SSE/AVX CMPP, bitcast the result back to integer to match the
20660 // result type of SETCC. The bitcast is expected to be optimized away
20661 // during combining/isel.
20662 if (Opc == X86ISD::CMPP)
20663 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
20664
20665 return Cmp;
20666 }
20667
20668 MVT VTOp0 = Op0.getSimpleValueType();
20669 (void)VTOp0;
20670 assert(VTOp0 == Op1.getSimpleValueType() &&((VTOp0 == Op1.getSimpleValueType() && "Expected operands with same type!"
) ? static_cast<void> (0) : __assert_fail ("VTOp0 == Op1.getSimpleValueType() && \"Expected operands with same type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20671, __PRETTY_FUNCTION__))
20671 "Expected operands with same type!")((VTOp0 == Op1.getSimpleValueType() && "Expected operands with same type!"
) ? static_cast<void> (0) : __assert_fail ("VTOp0 == Op1.getSimpleValueType() && \"Expected operands with same type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20671, __PRETTY_FUNCTION__))
;
20672 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&((VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
"Invalid number of packed elements for source and destination!"
) ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == VTOp0.getVectorNumElements() && \"Invalid number of packed elements for source and destination!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20673, __PRETTY_FUNCTION__))
20673 "Invalid number of packed elements for source and destination!")((VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
"Invalid number of packed elements for source and destination!"
) ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == VTOp0.getVectorNumElements() && \"Invalid number of packed elements for source and destination!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20673, __PRETTY_FUNCTION__))
;
20674
20675 // The non-AVX512 code below works under the assumption that source and
20676 // destination types are the same.
20677 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&(((Subtarget.hasAVX512() || (VT == VTOp0)) && "Value types for source and destination must be the same!"
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasAVX512() || (VT == VTOp0)) && \"Value types for source and destination must be the same!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20678, __PRETTY_FUNCTION__))
20678 "Value types for source and destination must be the same!")(((Subtarget.hasAVX512() || (VT == VTOp0)) && "Value types for source and destination must be the same!"
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasAVX512() || (VT == VTOp0)) && \"Value types for source and destination must be the same!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20678, __PRETTY_FUNCTION__))
;
20679
20680 // The result is boolean, but operands are int/float
20681 if (VT.getVectorElementType() == MVT::i1) {
20682 // In AVX-512 architecture setcc returns mask with i1 elements,
20683 // But there is no compare instruction for i8 and i16 elements in KNL.
20684 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&(((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()
) && "Unexpected operand type") ? static_cast<void
> (0) : __assert_fail ("(VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) && \"Unexpected operand type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20685, __PRETTY_FUNCTION__))
20685 "Unexpected operand type")(((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()
) && "Unexpected operand type") ? static_cast<void
> (0) : __assert_fail ("(VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) && \"Unexpected operand type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20685, __PRETTY_FUNCTION__))
;
20686 return LowerIntVSETCC_AVX512(Op, DAG);
20687 }
20688
20689 // Lower using XOP integer comparisons.
20690 if (VT.is128BitVector() && Subtarget.hasXOP()) {
20691 // Translate compare code to XOP PCOM compare mode.
20692 unsigned CmpMode = 0;
20693 switch (Cond) {
20694 default: llvm_unreachable("Unexpected SETCC condition")::llvm::llvm_unreachable_internal("Unexpected SETCC condition"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20694)
;
20695 case ISD::SETULT:
20696 case ISD::SETLT: CmpMode = 0x00; break;
20697 case ISD::SETULE:
20698 case ISD::SETLE: CmpMode = 0x01; break;
20699 case ISD::SETUGT:
20700 case ISD::SETGT: CmpMode = 0x02; break;
20701 case ISD::SETUGE:
20702 case ISD::SETGE: CmpMode = 0x03; break;
20703 case ISD::SETEQ: CmpMode = 0x04; break;
20704 case ISD::SETNE: CmpMode = 0x05; break;
20705 }
20706
20707 // Are we comparing unsigned or signed integers?
20708 unsigned Opc =
20709 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
20710
20711 return DAG.getNode(Opc, dl, VT, Op0, Op1,
20712 DAG.getTargetConstant(CmpMode, dl, MVT::i8));
20713 }
20714
20715 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
20716 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
20717 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
20718 SDValue BC0 = peekThroughBitcasts(Op0);
20719 if (BC0.getOpcode() == ISD::AND) {
20720 APInt UndefElts;
20721 SmallVector<APInt, 64> EltBits;
20722 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
20723 VT.getScalarSizeInBits(), UndefElts,
20724 EltBits, false, false)) {
20725 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
20726 Cond = ISD::SETEQ;
20727 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
20728 }
20729 }
20730 }
20731 }
20732
20733 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
20734 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
20735 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
20736 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
20737 if (C1 && C1->getAPIntValue().isPowerOf2()) {
20738 unsigned BitWidth = VT.getScalarSizeInBits();
20739 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
20740
20741 SDValue Result = Op0.getOperand(0);
20742 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
20743 DAG.getConstant(ShiftAmt, dl, VT));
20744 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
20745 DAG.getConstant(BitWidth - 1, dl, VT));
20746 return Result;
20747 }
20748 }
20749
20750 // Break 256-bit integer vector compare into smaller ones.
20751 if (VT.is256BitVector() && !Subtarget.hasInt256())
20752 return Lower256IntVSETCC(Op, DAG);
20753
20754 // If this is a SETNE against the signed minimum value, change it to SETGT.
20755 // If this is a SETNE against the signed maximum value, change it to SETLT.
20756 // which will be swapped to SETGT.
20757 // Otherwise we use PCMPEQ+invert.
20758 APInt ConstValue;
20759 if (Cond == ISD::SETNE &&
20760 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
20761 if (ConstValue.isMinSignedValue())
20762 Cond = ISD::SETGT;
20763 else if (ConstValue.isMaxSignedValue())
20764 Cond = ISD::SETLT;
20765 }
20766
20767 // If both operands are known non-negative, then an unsigned compare is the
20768 // same as a signed compare and there's no need to flip signbits.
20769 // TODO: We could check for more general simplifications here since we're
20770 // computing known bits.
20771 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
20772 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
20773
20774 // Special case: Use min/max operations for unsigned compares.
20775 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20776 if (ISD::isUnsignedIntSetCC(Cond) &&
20777 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
20778 TLI.isOperationLegal(ISD::UMIN, VT)) {
20779 // If we have a constant operand, increment/decrement it and change the
20780 // condition to avoid an invert.
20781 if (Cond == ISD::SETUGT) {
20782 // X > C --> X >= (C+1) --> X == umax(X, C+1)
20783 if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
20784 Op1 = UGTOp1;
20785 Cond = ISD::SETUGE;
20786 }
20787 }
20788 if (Cond == ISD::SETULT) {
20789 // X < C --> X <= (C-1) --> X == umin(X, C-1)
20790 if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
20791 Op1 = ULTOp1;
20792 Cond = ISD::SETULE;
20793 }
20794 }
20795 bool Invert = false;
20796 unsigned Opc;
20797 switch (Cond) {
20798 default: llvm_unreachable("Unexpected condition code")::llvm::llvm_unreachable_internal("Unexpected condition code"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20798)
;
20799 case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH[[gnu::fallthrough]];
20800 case ISD::SETULE: Opc = ISD::UMIN; break;
20801 case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH[[gnu::fallthrough]];
20802 case ISD::SETUGE: Opc = ISD::UMAX; break;
20803 }
20804
20805 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
20806 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
20807
20808 // If the logical-not of the result is required, perform that now.
20809 if (Invert)
20810 Result = DAG.getNOT(dl, Result, VT);
20811
20812 return Result;
20813 }
20814
20815 // Try to use SUBUS and PCMPEQ.
20816 if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
20817 return V;
20818
20819 // We are handling one of the integer comparisons here. Since SSE only has
20820 // GT and EQ comparisons for integer, swapping operands and multiple
20821 // operations may be required for some comparisons.
20822 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
20823 : X86ISD::PCMPGT;
20824 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
20825 Cond == ISD::SETGE || Cond == ISD::SETUGE;
20826 bool Invert = Cond == ISD::SETNE ||
20827 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
20828
20829 if (Swap)
20830 std::swap(Op0, Op1);
20831
20832 // Check that the operation in question is available (most are plain SSE2,
20833 // but PCMPGTQ and PCMPEQQ have different requirements).
20834 if (VT == MVT::v2i64) {
20835 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
20836 assert(Subtarget.hasSSE2() && "Don't know how to lower!")((Subtarget.hasSSE2() && "Don't know how to lower!") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Don't know how to lower!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20836, __PRETTY_FUNCTION__))
;
20837
20838 // Since SSE has no unsigned integer comparisons, we need to flip the sign
20839 // bits of the inputs before performing those operations. The lower
20840 // compare is always unsigned.
20841 SDValue SB;
20842 if (FlipSigns) {
20843 SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
20844 } else {
20845 SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
20846 }
20847 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
20848 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
20849
20850 // Cast everything to the right type.
20851 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
20852 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
20853
20854 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
20855 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
20856 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
20857
20858 // Create masks for only the low parts/high parts of the 64 bit integers.
20859 static const int MaskHi[] = { 1, 1, 3, 3 };
20860 static const int MaskLo[] = { 0, 0, 2, 2 };
20861 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
20862 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
20863 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
20864
20865 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
20866 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
20867
20868 if (Invert)
20869 Result = DAG.getNOT(dl, Result, MVT::v4i32);
20870
20871 return DAG.getBitcast(VT, Result);
20872 }
20873
20874 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
20875 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
20876 // pcmpeqd + pshufd + pand.
20877 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!")((Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && !FlipSigns && \"Don't know how to lower!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 20877, __PRETTY_FUNCTION__))
;
20878
20879 // First cast everything to the right type.
20880 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
20881 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
20882
20883 // Do the compare.
20884 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
20885
20886 // Make sure the lower and upper halves are both all-ones.
20887 static const int Mask[] = { 1, 0, 3, 2 };
20888 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
20889 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
20890
20891 if (Invert)
20892 Result = DAG.getNOT(dl, Result, MVT::v4i32);
20893
20894 return DAG.getBitcast(VT, Result);
20895 }
20896 }
20897
20898 // Since SSE has no unsigned integer comparisons, we need to flip the sign
20899 // bits of the inputs before performing those operations.
20900 if (FlipSigns) {
20901 MVT EltVT = VT.getVectorElementType();
20902 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
20903 VT);
20904 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
20905 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
20906 }
20907
20908 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
20909
20910 // If the logical-not of the result is required, perform that now.
20911 if (Invert)
20912 Result = DAG.getNOT(dl, Result, VT);
20913
20914 return Result;
20915}
20916
20917// Try to select this as a KORTEST+SETCC if possible.
20918static SDValue EmitKORTEST(SDValue Op0, SDValue Op1, ISD::CondCode CC,
20919 const SDLoc &dl, SelectionDAG &DAG,
20920 const X86Subtarget &Subtarget,
20921 SDValue &X86CC) {
20922 // Only support equality comparisons.
20923 if (CC != ISD::SETEQ && CC != ISD::SETNE)
20924 return SDValue();
20925
20926 // Must be a bitcast from vXi1.
20927 if (Op0.getOpcode() != ISD::BITCAST)
20928 return SDValue();
20929
20930 Op0 = Op0.getOperand(0);
20931 MVT VT = Op0.getSimpleValueType();
20932 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
20933 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
20934 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
20935 return SDValue();
20936
20937 X86::CondCode X86Cond;
20938 if (isNullConstant(Op1)) {
20939 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
20940 } else if (isAllOnesConstant(Op1)) {
20941 // C flag is set for all ones.
20942 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
20943 } else
20944 return SDValue();
20945
20946 // If the input is an OR, we can combine it's operands into the KORTEST.
20947 SDValue LHS = Op0;
20948 SDValue RHS = Op0;
20949 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
20950 LHS = Op0.getOperand(0);
20951 RHS = Op0.getOperand(1);
20952 }
20953
20954 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
20955 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
20956}
20957
20958/// Emit flags for the given setcc condition and operands. Also returns the
20959/// corresponding X86 condition code constant in X86CC.
20960SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
20961 ISD::CondCode CC, const SDLoc &dl,
20962 SelectionDAG &DAG,
20963 SDValue &X86CC) const {
20964 // Optimize to BT if possible.
20965 // Lower (X & (1 << N)) == 0 to BT(X, N).
20966 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
20967 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
20968 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
20969 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
20970 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
20971 return BT;
20972 }
20973
20974 // Try to use PTEST for a tree ORs equality compared with 0.
20975 // TODO: We could do AND tree with all 1s as well by using the C flag.
20976 if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) &&
20977 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
20978 if (SDValue PTEST = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG, X86CC))
20979 return PTEST;
20980 }
20981
20982 // Try to lower using KORTEST.
20983 if (SDValue KORTEST = EmitKORTEST(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
20984 return KORTEST;
20985
20986 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
20987 // these.
20988 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
20989 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
20990 // If the input is a setcc, then reuse the input setcc or use a new one with
20991 // the inverted condition.
20992 if (Op0.getOpcode() == X86ISD::SETCC) {
20993 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
20994
20995 X86CC = Op0.getOperand(0);
20996 if (Invert) {
20997 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
20998 CCode = X86::GetOppositeBranchCondition(CCode);
20999 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
21000 }
21001
21002 return Op0.getOperand(1);
21003 }
21004 }
21005
21006 bool IsFP = Op1.getSimpleValueType().isFloatingPoint();
21007 X86::CondCode CondCode = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG);
21008 if (CondCode == X86::COND_INVALID)
21009 return SDValue();
21010
21011 SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG);
21012 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
21013 X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
21014 return EFLAGS;
21015}
21016
21017SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
21018
21019 MVT VT = Op.getSimpleValueType();
21020
21021 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
21022
21023 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer")((VT == MVT::i8 && "SetCC type must be 8-bit integer"
) ? static_cast<void> (0) : __assert_fail ("VT == MVT::i8 && \"SetCC type must be 8-bit integer\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21023, __PRETTY_FUNCTION__))
;
21024 SDValue Op0 = Op.getOperand(0);
21025 SDValue Op1 = Op.getOperand(1);
21026 SDLoc dl(Op);
21027 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
21028
21029 // Handle f128 first, since one possible outcome is a normal integer
21030 // comparison which gets handled by emitFlagsForSetcc.
21031 if (Op0.getValueType() == MVT::f128) {
21032 softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1);
21033
21034 // If softenSetCCOperands returned a scalar, use it.
21035 if (!Op1.getNode()) {
21036 assert(Op0.getValueType() == Op.getValueType() &&((Op0.getValueType() == Op.getValueType() && "Unexpected setcc expansion!"
) ? static_cast<void> (0) : __assert_fail ("Op0.getValueType() == Op.getValueType() && \"Unexpected setcc expansion!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21037, __PRETTY_FUNCTION__))
21037 "Unexpected setcc expansion!")((Op0.getValueType() == Op.getValueType() && "Unexpected setcc expansion!"
) ? static_cast<void> (0) : __assert_fail ("Op0.getValueType() == Op.getValueType() && \"Unexpected setcc expansion!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21037, __PRETTY_FUNCTION__))
;
21038 return Op0;
21039 }
21040 }
21041
21042 SDValue X86CC;
21043 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
21044 if (!EFLAGS)
21045 return SDValue();
21046
21047 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
21048}
21049
21050SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
21051 SDValue LHS = Op.getOperand(0);
21052 SDValue RHS = Op.getOperand(1);
21053 SDValue Carry = Op.getOperand(2);
21054 SDValue Cond = Op.getOperand(3);
21055 SDLoc DL(Op);
21056
21057 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.")((LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only."
) ? static_cast<void> (0) : __assert_fail ("LHS.getSimpleValueType().isInteger() && \"SETCCCARRY is integer only.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21057, __PRETTY_FUNCTION__))
;
21058 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
21059
21060 // Recreate the carry if needed.
21061 EVT CarryVT = Carry.getValueType();
21062 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
21063 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
21064 Carry, DAG.getConstant(NegOne, DL, CarryVT));
21065
21066 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
21067 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
21068 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
21069}
21070
21071// This function returns three things: the arithmetic computation itself
21072// (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
21073// flag and the condition code define the case in which the arithmetic
21074// computation overflows.
21075static std::pair<SDValue, SDValue>
21076getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
21077 assert(Op.getResNo() == 0 && "Unexpected result number!")((Op.getResNo() == 0 && "Unexpected result number!") ?
static_cast<void> (0) : __assert_fail ("Op.getResNo() == 0 && \"Unexpected result number!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21077, __PRETTY_FUNCTION__))
;
21078 SDValue Value, Overflow;
21079 SDValue LHS = Op.getOperand(0);
21080 SDValue RHS = Op.getOperand(1);
21081 unsigned BaseOp = 0;
21082 SDLoc DL(Op);
21083 switch (Op.getOpcode()) {
21084 default: llvm_unreachable("Unknown ovf instruction!")::llvm::llvm_unreachable_internal("Unknown ovf instruction!",
"/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21084)
;
21085 case ISD::SADDO:
21086 BaseOp = X86ISD::ADD;
21087 Cond = X86::COND_O;
21088 break;
21089 case ISD::UADDO:
21090 BaseOp = X86ISD::ADD;
21091 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
21092 break;
21093 case ISD::SSUBO:
21094 BaseOp = X86ISD::SUB;
21095 Cond = X86::COND_O;
21096 break;
21097 case ISD::USUBO:
21098 BaseOp = X86ISD::SUB;
21099 Cond = X86::COND_B;
21100 break;
21101 case ISD::SMULO:
21102 BaseOp = X86ISD::SMUL;
21103 Cond = X86::COND_O;
21104 break;
21105 case ISD::UMULO:
21106 BaseOp = X86ISD::UMUL;
21107 Cond = X86::COND_O;
21108 break;
21109 }
21110
21111 if (BaseOp) {
21112 // Also sets EFLAGS.
21113 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21114 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
21115 Overflow = Value.getValue(1);
21116 }
21117
21118 return std::make_pair(Value, Overflow);
21119}
21120
21121static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
21122 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
21123 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
21124 // looks for this combo and may remove the "setcc" instruction if the "setcc"
21125 // has only one use.
21126 SDLoc DL(Op);
21127 X86::CondCode Cond;
21128 SDValue Value, Overflow;
21129 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
21130
21131 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
21132 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!")((Op->getValueType(1) == MVT::i8 && "Unexpected VT!"
) ? static_cast<void> (0) : __assert_fail ("Op->getValueType(1) == MVT::i8 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21132, __PRETTY_FUNCTION__))
;
21133 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
21134}
21135
21136/// Return true if opcode is a X86 logical comparison.
21137static bool isX86LogicalCmp(SDValue Op) {
21138 unsigned Opc = Op.getOpcode();
21139 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
21140 Opc == X86ISD::SAHF)
21141 return true;
21142 if (Op.getResNo() == 1 &&
21143 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
21144 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
21145 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
21146 return true;
21147
21148 return false;
21149}
21150
21151static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
21152 if (V.getOpcode() != ISD::TRUNCATE)
21153 return false;
21154
21155 SDValue VOp0 = V.getOperand(0);
21156 unsigned InBits = VOp0.getValueSizeInBits();
21157 unsigned Bits = V.getValueSizeInBits();
21158 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
21159}
21160
21161SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
21162 bool AddTest = true;
21163 SDValue Cond = Op.getOperand(0);
21164 SDValue Op1 = Op.getOperand(1);
21165 SDValue Op2 = Op.getOperand(2);
21166 SDLoc DL(Op);
21167 MVT VT = Op1.getSimpleValueType();
21168 SDValue CC;
21169
21170 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
21171 // are available or VBLENDV if AVX is available.
21172 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
21173 if (Cond.getOpcode() == ISD::SETCC &&
21174 ((Subtarget.hasSSE2() && VT == MVT::f64) ||
21175 (Subtarget.hasSSE1() && VT == MVT::f32)) &&
21176 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
21177 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
21178 unsigned SSECC = translateX86FSETCC(
21179 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
21180
21181 if (Subtarget.hasAVX512()) {
21182 SDValue Cmp =
21183 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
21184 DAG.getTargetConstant(SSECC, DL, MVT::i8));
21185 assert(!VT.isVector() && "Not a scalar type?")((!VT.isVector() && "Not a scalar type?") ? static_cast
<void> (0) : __assert_fail ("!VT.isVector() && \"Not a scalar type?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21185, __PRETTY_FUNCTION__))
;
21186 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
21187 }
21188
21189 if (SSECC < 8 || Subtarget.hasAVX()) {
21190 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
21191 DAG.getTargetConstant(SSECC, DL, MVT::i8));
21192
21193 // If we have AVX, we can use a variable vector select (VBLENDV) instead
21194 // of 3 logic instructions for size savings and potentially speed.
21195 // Unfortunately, there is no scalar form of VBLENDV.
21196
21197 // If either operand is a +0.0 constant, don't try this. We can expect to
21198 // optimize away at least one of the logic instructions later in that
21199 // case, so that sequence would be faster than a variable blend.
21200
21201 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
21202 // uses XMM0 as the selection register. That may need just as many
21203 // instructions as the AND/ANDN/OR sequence due to register moves, so
21204 // don't bother.
21205 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
21206 !isNullFPConstant(Op2)) {
21207 // Convert to vectors, do a VSELECT, and convert back to scalar.
21208 // All of the conversions should be optimized away.
21209 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
21210 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
21211 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
21212 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
21213
21214 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
21215 VCmp = DAG.getBitcast(VCmpVT, VCmp);
21216
21217 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
21218
21219 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
21220 VSel, DAG.getIntPtrConstant(0, DL));
21221 }
21222 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
21223 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
21224 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
21225 }
21226 }
21227
21228 // AVX512 fallback is to lower selects of scalar floats to masked moves.
21229 if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) {
21230 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
21231 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
21232 }
21233
21234 // For v64i1 without 64-bit support we need to split and rejoin.
21235 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
21236 assert(Subtarget.hasBWI() && "Expected BWI to be legal")((Subtarget.hasBWI() && "Expected BWI to be legal") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected BWI to be legal\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21236, __PRETTY_FUNCTION__))
;
21237 SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
21238 SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
21239 SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
21240 SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
21241 SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
21242 SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
21243 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
21244 }
21245
21246 if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
21247 SDValue Op1Scalar;
21248 if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
21249 Op1Scalar = ConvertI1VectorToInteger(Op1, DAG);
21250 else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0))
21251 Op1Scalar = Op1.getOperand(0);
21252 SDValue Op2Scalar;
21253 if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode()))
21254 Op2Scalar = ConvertI1VectorToInteger(Op2, DAG);
21255 else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0))
21256 Op2Scalar = Op2.getOperand(0);
21257 if (Op1Scalar.getNode() && Op2Scalar.getNode()) {
21258 SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond,
21259 Op1Scalar, Op2Scalar);
21260 if (newSelect.getValueSizeInBits() == VT.getSizeInBits())
21261 return DAG.getBitcast(VT, newSelect);
21262 SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect);
21263 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec,
21264 DAG.getIntPtrConstant(0, DL));
21265 }
21266 }
21267
21268 if (Cond.getOpcode() == ISD::SETCC) {
21269 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
21270 Cond = NewCond;
21271 // If the condition was updated, it's possible that the operands of the
21272 // select were also updated (for example, EmitTest has a RAUW). Refresh
21273 // the local references to the select operands in case they got stale.
21274 Op1 = Op.getOperand(1);
21275 Op2 = Op.getOperand(2);
21276 }
21277 }
21278
21279 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
21280 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
21281 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
21282 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
21283 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
21284 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
21285 if (Cond.getOpcode() == X86ISD::SETCC &&
21286 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
21287 isNullConstant(Cond.getOperand(1).getOperand(1))) {
21288 SDValue Cmp = Cond.getOperand(1);
21289 unsigned CondCode = Cond.getConstantOperandVal(0);
21290
21291 if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
21292 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
21293 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
21294 SDValue CmpOp0 = Cmp.getOperand(0);
21295
21296 // Apply further optimizations for special cases
21297 // (select (x != 0), -1, 0) -> neg & sbb
21298 // (select (x == 0), 0, -1) -> neg & sbb
21299 if (isNullConstant(Y) &&
21300 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
21301 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
21302 SDValue CmpZero = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0);
21303 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21304 Zero = DAG.getConstant(0, DL, Op.getValueType());
21305 return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, CmpZero);
21306 }
21307
21308 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
21309 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
21310 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
21311
21312 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21313 SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
21314 SDValue Res = // Res = 0 or -1.
21315 DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp);
21316
21317 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
21318 Res = DAG.getNOT(DL, Res, Res.getValueType());
21319
21320 if (!isNullConstant(Op2))
21321 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
21322 return Res;
21323 } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
21324 Cmp.getOperand(0).getOpcode() == ISD::AND &&
21325 isOneConstant(Cmp.getOperand(0).getOperand(1))) {
21326 SDValue CmpOp0 = Cmp.getOperand(0);
21327 SDValue Src1, Src2;
21328 // true if Op2 is XOR or OR operator and one of its operands
21329 // is equal to Op1
21330 // ( a , a op b) || ( b , a op b)
21331 auto isOrXorPattern = [&]() {
21332 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
21333 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
21334 Src1 =
21335 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
21336 Src2 = Op1;
21337 return true;
21338 }
21339 return false;
21340 };
21341
21342 if (isOrXorPattern()) {
21343 SDValue Neg;
21344 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
21345 // we need mask of all zeros or ones with same size of the other
21346 // operands.
21347 if (CmpSz > VT.getSizeInBits())
21348 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
21349 else if (CmpSz < VT.getSizeInBits())
21350 Neg = DAG.getNode(ISD::AND, DL, VT,
21351 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
21352 DAG.getConstant(1, DL, VT));
21353 else
21354 Neg = CmpOp0;
21355 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
21356 Neg); // -(and (x, 0x1))
21357 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
21358 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
21359 }
21360 }
21361 }
21362
21363 // Look past (and (setcc_carry (cmp ...)), 1).
21364 if (Cond.getOpcode() == ISD::AND &&
21365 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
21366 isOneConstant(Cond.getOperand(1)))
21367 Cond = Cond.getOperand(0);
21368
21369 // If condition flag is set by a X86ISD::CMP, then use it as the condition
21370 // setting operand in place of the X86ISD::SETCC.
21371 unsigned CondOpcode = Cond.getOpcode();
21372 if (CondOpcode == X86ISD::SETCC ||
21373 CondOpcode == X86ISD::SETCC_CARRY) {
21374 CC = Cond.getOperand(0);
21375
21376 SDValue Cmp = Cond.getOperand(1);
21377 bool IllegalFPCMov = false;
21378 if (VT.isFloatingPoint() && !VT.isVector() &&
21379 !isScalarFPTypeInSSEReg(VT)) // FPStack?
21380 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
21381
21382 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
21383 Cmp.getOpcode() == X86ISD::BT) { // FIXME
21384 Cond = Cmp;
21385 AddTest = false;
21386 }
21387 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
21388 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
21389 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
21390 SDValue Value;
21391 X86::CondCode X86Cond;
21392 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
21393
21394 CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
21395 AddTest = false;
21396 }
21397
21398 if (AddTest) {
21399 // Look past the truncate if the high bits are known zero.
21400 if (isTruncWithZeroHighBitsInput(Cond, DAG))
21401 Cond = Cond.getOperand(0);
21402
21403 // We know the result of AND is compared against zero. Try to match
21404 // it to BT.
21405 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
21406 SDValue BTCC;
21407 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
21408 CC = BTCC;
21409 Cond = BT;
21410 AddTest = false;
21411 }
21412 }
21413 }
21414
21415 if (AddTest) {
21416 CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
21417 Cond = EmitCmp(Cond, DAG.getConstant(0, DL, Cond.getValueType()),
21418 X86::COND_NE, DL, DAG);
21419 }
21420
21421 // a < b ? -1 : 0 -> RES = ~setcc_carry
21422 // a < b ? 0 : -1 -> RES = setcc_carry
21423 // a >= b ? -1 : 0 -> RES = setcc_carry
21424 // a >= b ? 0 : -1 -> RES = ~setcc_carry
21425 if (Cond.getOpcode() == X86ISD::SUB) {
21426 Cond = ConvertCmpIfNecessary(Cond, DAG);
21427 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
21428
21429 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
21430 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
21431 (isNullConstant(Op1) || isNullConstant(Op2))) {
21432 SDValue Res =
21433 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
21434 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
21435 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
21436 return DAG.getNOT(DL, Res, Res.getValueType());
21437 return Res;
21438 }
21439 }
21440
21441 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
21442 // widen the cmov and push the truncate through. This avoids introducing a new
21443 // branch during isel and doesn't add any extensions.
21444 if (Op.getValueType() == MVT::i8 &&
21445 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
21446 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
21447 if (T1.getValueType() == T2.getValueType() &&
21448 // Blacklist CopyFromReg to avoid partial register stalls.
21449 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
21450 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
21451 CC, Cond);
21452 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
21453 }
21454 }
21455
21456 // Or finally, promote i8 cmovs if we have CMOV,
21457 // or i16 cmovs if it won't prevent folding a load.
21458 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
21459 // legal, but EmitLoweredSelect() can not deal with these extensions
21460 // being inserted between two CMOV's. (in i16 case too TBN)
21461 // https://bugs.llvm.org/show_bug.cgi?id=40974
21462 if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
21463 (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
21464 !MayFoldLoad(Op2))) {
21465 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
21466 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
21467 SDValue Ops[] = { Op2, Op1, CC, Cond };
21468 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
21469 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
21470 }
21471
21472 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
21473 // condition is true.
21474 SDValue Ops[] = { Op2, Op1, CC, Cond };
21475 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
21476}
21477
21478static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
21479 const X86Subtarget &Subtarget,
21480 SelectionDAG &DAG) {
21481 MVT VT = Op->getSimpleValueType(0);
21482 SDValue In = Op->getOperand(0);
21483 MVT InVT = In.getSimpleValueType();
21484 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!")((InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!"
) ? static_cast<void> (0) : __assert_fail ("InVT.getVectorElementType() == MVT::i1 && \"Unexpected input type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21484, __PRETTY_FUNCTION__))
;
21485 MVT VTElt = VT.getVectorElementType();
21486 SDLoc dl(Op);
21487
21488 unsigned NumElts = VT.getVectorNumElements();
21489
21490 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
21491 MVT ExtVT = VT;
21492 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
21493 // If v16i32 is to be avoided, we'll need to split and concatenate.
21494 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
21495 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
21496
21497 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
21498 }
21499
21500 // Widen to 512-bits if VLX is not supported.
21501 MVT WideVT = ExtVT;
21502 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
21503 NumElts *= 512 / ExtVT.getSizeInBits();
21504 InVT = MVT::getVectorVT(MVT::i1, NumElts);
21505 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
21506 In, DAG.getIntPtrConstant(0, dl));
21507 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
21508 }
21509
21510 SDValue V;
21511 MVT WideEltVT = WideVT.getVectorElementType();
21512 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
21513 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
21514 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
21515 } else {
21516 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
21517 SDValue Zero = DAG.getConstant(0, dl, WideVT);
21518 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
21519 }
21520
21521 // Truncate if we had to extend i16/i8 above.
21522 if (VT != ExtVT) {
21523 WideVT = MVT::getVectorVT(VTElt, NumElts);
21524 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
21525 }
21526
21527 // Extract back to 128/256-bit if we widened.
21528 if (WideVT != VT)
21529 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
21530 DAG.getIntPtrConstant(0, dl));
21531
21532 return V;
21533}
21534
21535static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
21536 SelectionDAG &DAG) {
21537 SDValue In = Op->getOperand(0);
21538 MVT InVT = In.getSimpleValueType();
21539
21540 if (InVT.getVectorElementType() == MVT::i1)
21541 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
21542
21543 assert(Subtarget.hasAVX() && "Expected AVX support")((Subtarget.hasAVX() && "Expected AVX support") ? static_cast
<void> (0) : __assert_fail ("Subtarget.hasAVX() && \"Expected AVX support\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21543, __PRETTY_FUNCTION__))
;
21544 return LowerAVXExtend(Op, DAG, Subtarget);
21545}
21546
21547// Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
21548// For sign extend this needs to handle all vector sizes and SSE4.1 and
21549// non-SSE4.1 targets. For zero extend this should only handle inputs of
21550// MVT::v64i8 when BWI is not supported, but AVX512 is.
21551static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
21552 const X86Subtarget &Subtarget,
21553 SelectionDAG &DAG) {
21554 SDValue In = Op->getOperand(0);
21555 MVT VT = Op->getSimpleValueType(0);
21556 MVT InVT = In.getSimpleValueType();
21557
21558 MVT SVT = VT.getVectorElementType();
21559 MVT InSVT = InVT.getVectorElementType();
21560 assert(SVT.getSizeInBits() > InSVT.getSizeInBits())((SVT.getSizeInBits() > InSVT.getSizeInBits()) ? static_cast
<void> (0) : __assert_fail ("SVT.getSizeInBits() > InSVT.getSizeInBits()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21560, __PRETTY_FUNCTION__))
;
21561
21562 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
21563 return SDValue();
21564 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
21565 return SDValue();
21566 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
21567 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
21568 !(VT.is512BitVector() && Subtarget.hasAVX512()))
21569 return SDValue();
21570
21571 SDLoc dl(Op);
21572 unsigned Opc = Op.getOpcode();
21573 unsigned NumElts = VT.getVectorNumElements();
21574
21575 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
21576 // For 512-bit vectors, we need 128-bits or 256-bits.
21577 if (InVT.getSizeInBits() > 128) {
21578 // Input needs to be at least the same number of elements as output, and
21579 // at least 128-bits.
21580 int InSize = InSVT.getSizeInBits() * NumElts;
21581 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
21582 InVT = In.getSimpleValueType();
21583 }
21584
21585 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
21586 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
21587 // need to be handled here for 256/512-bit results.
21588 if (Subtarget.hasInt256()) {
21589 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension")((VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() > 128 && \"Unexpected 128-bit vector extension\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21589, __PRETTY_FUNCTION__))
;
21590
21591 if (InVT.getVectorNumElements() != NumElts)
21592 return DAG.getNode(Op.getOpcode(), dl, VT, In);
21593
21594 // FIXME: Apparently we create inreg operations that could be regular
21595 // extends.
21596 unsigned ExtOpc =
21597 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
21598 : ISD::ZERO_EXTEND;
21599 return DAG.getNode(ExtOpc, dl, VT, In);
21600 }
21601
21602 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
21603 if (Subtarget.hasAVX()) {
21604 assert(VT.is256BitVector() && "256-bit vector expected")((VT.is256BitVector() && "256-bit vector expected") ?
static_cast<void> (0) : __assert_fail ("VT.is256BitVector() && \"256-bit vector expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21604, __PRETTY_FUNCTION__))
;
21605 MVT HalfVT = VT.getHalfNumVectorElementsVT();
21606 int HalfNumElts = HalfVT.getVectorNumElements();
21607
21608 unsigned NumSrcElts = InVT.getVectorNumElements();
21609 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
21610 for (int i = 0; i != HalfNumElts; ++i)
21611 HiMask[i] = HalfNumElts + i;
21612
21613 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
21614 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
21615 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
21616 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
21617 }
21618
21619 // We should only get here for sign extend.
21620 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!")((Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!"
) ? static_cast<void> (0) : __assert_fail ("Opc == ISD::SIGN_EXTEND_VECTOR_INREG && \"Unexpected opcode!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21620, __PRETTY_FUNCTION__))
;
21621 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs")((VT.is128BitVector() && InVT.is128BitVector() &&
"Unexpected VTs") ? static_cast<void> (0) : __assert_fail
("VT.is128BitVector() && InVT.is128BitVector() && \"Unexpected VTs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21621, __PRETTY_FUNCTION__))
;
21622
21623 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
21624 SDValue Curr = In;
21625 SDValue SignExt = Curr;
21626
21627 // As SRAI is only available on i16/i32 types, we expand only up to i32
21628 // and handle i64 separately.
21629 if (InVT != MVT::v4i32) {
21630 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
21631
21632 unsigned DestWidth = DestVT.getScalarSizeInBits();
21633 unsigned Scale = DestWidth / InSVT.getSizeInBits();
21634
21635 unsigned InNumElts = InVT.getVectorNumElements();
21636 unsigned DestElts = DestVT.getVectorNumElements();
21637
21638 // Build a shuffle mask that takes each input element and places it in the
21639 // MSBs of the new element size.
21640 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
21641 for (unsigned i = 0; i != DestElts; ++i)
21642 Mask[i * Scale + (Scale - 1)] = i;
21643
21644 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
21645 Curr = DAG.getBitcast(DestVT, Curr);
21646
21647 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
21648 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
21649 DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
21650 }
21651
21652 if (VT == MVT::v2i64) {
21653 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT")((Curr.getValueType() == MVT::v4i32 && "Unexpected input VT"
) ? static_cast<void> (0) : __assert_fail ("Curr.getValueType() == MVT::v4i32 && \"Unexpected input VT\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21653, __PRETTY_FUNCTION__))
;
21654 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
21655 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
21656 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
21657 SignExt = DAG.getBitcast(VT, SignExt);
21658 }
21659
21660 return SignExt;
21661}
21662
21663static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
21664 SelectionDAG &DAG) {
21665 MVT VT = Op->getSimpleValueType(0);
21666 SDValue In = Op->getOperand(0);
21667 MVT InVT = In.getSimpleValueType();
21668 SDLoc dl(Op);
21669
21670 if (InVT.getVectorElementType() == MVT::i1)
21671 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
21672
21673 assert(VT.isVector() && InVT.isVector() && "Expected vector type")((VT.isVector() && InVT.isVector() && "Expected vector type"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && InVT.isVector() && \"Expected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21673, __PRETTY_FUNCTION__))
;
21674 assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&((VT.getVectorNumElements() == VT.getVectorNumElements() &&
"Expected same number of elements") ? static_cast<void>
(0) : __assert_fail ("VT.getVectorNumElements() == VT.getVectorNumElements() && \"Expected same number of elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21675, __PRETTY_FUNCTION__))
21675 "Expected same number of elements")((VT.getVectorNumElements() == VT.getVectorNumElements() &&
"Expected same number of elements") ? static_cast<void>
(0) : __assert_fail ("VT.getVectorNumElements() == VT.getVectorNumElements() && \"Expected same number of elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21675, __PRETTY_FUNCTION__))
;
21676 assert((VT.getVectorElementType() == MVT::i16 ||(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType
() == MVT::i32 || VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21679, __PRETTY_FUNCTION__))
21677 VT.getVectorElementType() == MVT::i32 ||(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType
() == MVT::i32 || VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21679, __PRETTY_FUNCTION__))
21678 VT.getVectorElementType() == MVT::i64) &&(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType
() == MVT::i32 || VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21679, __PRETTY_FUNCTION__))
21679 "Unexpected element type")(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType
() == MVT::i32 || VT.getVectorElementType() == MVT::i64) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21679, __PRETTY_FUNCTION__))
;
21680 assert((InVT.getVectorElementType() == MVT::i8 ||(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType
() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21683, __PRETTY_FUNCTION__))
21681 InVT.getVectorElementType() == MVT::i16 ||(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType
() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21683, __PRETTY_FUNCTION__))
21682 InVT.getVectorElementType() == MVT::i32) &&(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType
() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21683, __PRETTY_FUNCTION__))
21683 "Unexpected element type")(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType
() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) &&
"Unexpected element type") ? static_cast<void> (0) : __assert_fail
("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21683, __PRETTY_FUNCTION__))
;
21684
21685 // Custom legalize v8i8->v8i64 on CPUs without avx512bw.
21686 if (InVT == MVT::v8i8) {
21687 if (VT != MVT::v8i64)
21688 return SDValue();
21689
21690 In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op),
21691 MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8));
21692 return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In);
21693 }
21694
21695 if (Subtarget.hasInt256())
21696 return Op;
21697
21698 // Optimize vectors in AVX mode
21699 // Sign extend v8i16 to v8i32 and
21700 // v4i32 to v4i64
21701 //
21702 // Divide input vector into two parts
21703 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
21704 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
21705 // concat the vectors to original VT
21706 MVT HalfVT = VT.getHalfNumVectorElementsVT();
21707 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
21708
21709 unsigned NumElems = InVT.getVectorNumElements();
21710 SmallVector<int,8> ShufMask(NumElems, -1);
21711 for (unsigned i = 0; i != NumElems/2; ++i)
21712 ShufMask[i] = i + NumElems/2;
21713
21714 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
21715 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
21716
21717 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
21718}
21719
21720/// Change a vector store into a pair of half-size vector stores.
21721static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
21722 SDValue StoredVal = Store->getValue();
21723 assert((StoredVal.getValueType().is256BitVector() ||(((StoredVal.getValueType().is256BitVector() || StoredVal.getValueType
().is512BitVector()) && "Expecting 256/512-bit op") ?
static_cast<void> (0) : __assert_fail ("(StoredVal.getValueType().is256BitVector() || StoredVal.getValueType().is512BitVector()) && \"Expecting 256/512-bit op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21725, __PRETTY_FUNCTION__))
21724 StoredVal.getValueType().is512BitVector()) &&(((StoredVal.getValueType().is256BitVector() || StoredVal.getValueType
().is512BitVector()) && "Expecting 256/512-bit op") ?
static_cast<void> (0) : __assert_fail ("(StoredVal.getValueType().is256BitVector() || StoredVal.getValueType().is512BitVector()) && \"Expecting 256/512-bit op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21725, __PRETTY_FUNCTION__))
21725 "Expecting 256/512-bit op")(((StoredVal.getValueType().is256BitVector() || StoredVal.getValueType
().is512BitVector()) && "Expecting 256/512-bit op") ?
static_cast<void> (0) : __assert_fail ("(StoredVal.getValueType().is256BitVector() || StoredVal.getValueType().is512BitVector()) && \"Expecting 256/512-bit op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21725, __PRETTY_FUNCTION__))
;
21726
21727 // Splitting volatile memory ops is not allowed unless the operation was not
21728 // legal to begin with. We are assuming the input op is legal (this transform
21729 // is only used for targets with AVX).
21730 if (!Store->isSimple())
21731 return SDValue();
21732
21733 MVT StoreVT = StoredVal.getSimpleValueType();
21734 unsigned NumElems = StoreVT.getVectorNumElements();
21735 unsigned HalfSize = StoredVal.getValueSizeInBits() / 2;
21736 unsigned HalfAlign = (128 == HalfSize ? 16 : 32);
21737
21738 SDLoc DL(Store);
21739 SDValue Value0 = extractSubVector(StoredVal, 0, DAG, DL, HalfSize);
21740 SDValue Value1 = extractSubVector(StoredVal, NumElems / 2, DAG, DL, HalfSize);
21741 SDValue Ptr0 = Store->getBasePtr();
21742 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfAlign, DL);
21743 unsigned Alignment = Store->getAlignment();
21744 SDValue Ch0 =
21745 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
21746 Alignment, Store->getMemOperand()->getFlags());
21747 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
21748 Store->getPointerInfo().getWithOffset(HalfAlign),
21749 MinAlign(Alignment, HalfAlign),
21750 Store->getMemOperand()->getFlags());
21751 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
21752}
21753
21754/// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
21755/// type.
21756static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
21757 SelectionDAG &DAG) {
21758 SDValue StoredVal = Store->getValue();
21759 assert(StoreVT.is128BitVector() &&((StoreVT.is128BitVector() && StoredVal.getValueType(
).is128BitVector() && "Expecting 128-bit op") ? static_cast
<void> (0) : __assert_fail ("StoreVT.is128BitVector() && StoredVal.getValueType().is128BitVector() && \"Expecting 128-bit op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21760, __PRETTY_FUNCTION__))
21760 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op")((StoreVT.is128BitVector() && StoredVal.getValueType(
).is128BitVector() && "Expecting 128-bit op") ? static_cast
<void> (0) : __assert_fail ("StoreVT.is128BitVector() && StoredVal.getValueType().is128BitVector() && \"Expecting 128-bit op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21760, __PRETTY_FUNCTION__))
;
21761 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
21762
21763 // Splitting volatile memory ops is not allowed unless the operation was not
21764 // legal to begin with. We are assuming the input op is legal (this transform
21765 // is only used for targets with AVX).
21766 if (!Store->isSimple())
21767 return SDValue();
21768
21769 MVT StoreSVT = StoreVT.getScalarType();
21770 unsigned NumElems = StoreVT.getVectorNumElements();
21771 unsigned ScalarSize = StoreSVT.getStoreSize();
21772 unsigned Alignment = Store->getAlignment();
21773
21774 SDLoc DL(Store);
21775 SmallVector<SDValue, 4> Stores;
21776 for (unsigned i = 0; i != NumElems; ++i) {
21777 unsigned Offset = i * ScalarSize;
21778 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
21779 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
21780 DAG.getIntPtrConstant(i, DL));
21781 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
21782 Store->getPointerInfo().getWithOffset(Offset),
21783 MinAlign(Alignment, Offset),
21784 Store->getMemOperand()->getFlags());
21785 Stores.push_back(Ch);
21786 }
21787 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
21788}
21789
21790static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
21791 SelectionDAG &DAG) {
21792 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
21793 SDLoc dl(St);
21794 SDValue StoredVal = St->getValue();
21795
21796 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
21797 if (StoredVal.getValueType().isVector() &&
21798 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
21799 assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&((StoredVal.getValueType().getVectorNumElements() <= 8 &&
"Unexpected VT") ? static_cast<void> (0) : __assert_fail
("StoredVal.getValueType().getVectorNumElements() <= 8 && \"Unexpected VT\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21800, __PRETTY_FUNCTION__))
21800 "Unexpected VT")((StoredVal.getValueType().getVectorNumElements() <= 8 &&
"Unexpected VT") ? static_cast<void> (0) : __assert_fail
("StoredVal.getValueType().getVectorNumElements() <= 8 && \"Unexpected VT\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21800, __PRETTY_FUNCTION__))
;
21801 assert(!St->isTruncatingStore() && "Expected non-truncating store")((!St->isTruncatingStore() && "Expected non-truncating store"
) ? static_cast<void> (0) : __assert_fail ("!St->isTruncatingStore() && \"Expected non-truncating store\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21801, __PRETTY_FUNCTION__))
;
21802 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&((Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
"Expected AVX512F without AVX512DQI") ? static_cast<void>
(0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasDQI() && \"Expected AVX512F without AVX512DQI\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21803, __PRETTY_FUNCTION__))
21803 "Expected AVX512F without AVX512DQI")((Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
"Expected AVX512F without AVX512DQI") ? static_cast<void>
(0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasDQI() && \"Expected AVX512F without AVX512DQI\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21803, __PRETTY_FUNCTION__))
;
21804
21805 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
21806 DAG.getUNDEF(MVT::v16i1), StoredVal,
21807 DAG.getIntPtrConstant(0, dl));
21808 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
21809 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
21810
21811 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
21812 St->getPointerInfo(), St->getAlignment(),
21813 St->getMemOperand()->getFlags());
21814 }
21815
21816 if (St->isTruncatingStore())
21817 return SDValue();
21818
21819 // If this is a 256-bit store of concatenated ops, we are better off splitting
21820 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
21821 // and each half can execute independently. Some cores would split the op into
21822 // halves anyway, so the concat (vinsertf128) is purely an extra op.
21823 MVT StoreVT = StoredVal.getSimpleValueType();
21824 if (StoreVT.is256BitVector()) {
21825 SmallVector<SDValue, 4> CatOps;
21826 if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
21827 return splitVectorStore(St, DAG);
21828 return SDValue();
21829 }
21830
21831 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
21832 assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&((StoreVT.isVector() && StoreVT.getSizeInBits() == 64
&& "Unexpected VT") ? static_cast<void> (0) : __assert_fail
("StoreVT.isVector() && StoreVT.getSizeInBits() == 64 && \"Unexpected VT\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21833, __PRETTY_FUNCTION__))
21833 "Unexpected VT")((StoreVT.isVector() && StoreVT.getSizeInBits() == 64
&& "Unexpected VT") ? static_cast<void> (0) : __assert_fail
("StoreVT.isVector() && StoreVT.getSizeInBits() == 64 && \"Unexpected VT\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21833, __PRETTY_FUNCTION__))
;
21834 assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==((TLI.getTypeAction(*DAG.getContext(), StoreVT) == TargetLowering
::TypeWidenVector && "Unexpected type action!") ? static_cast
<void> (0) : __assert_fail ("TLI.getTypeAction(*DAG.getContext(), StoreVT) == TargetLowering::TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21835, __PRETTY_FUNCTION__))
21835 TargetLowering::TypeWidenVector && "Unexpected type action!")((TLI.getTypeAction(*DAG.getContext(), StoreVT) == TargetLowering
::TypeWidenVector && "Unexpected type action!") ? static_cast
<void> (0) : __assert_fail ("TLI.getTypeAction(*DAG.getContext(), StoreVT) == TargetLowering::TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21835, __PRETTY_FUNCTION__))
;
21836
21837 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
21838 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
21839 DAG.getUNDEF(StoreVT));
21840
21841 if (Subtarget.hasSSE2()) {
21842 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
21843 // and store it.
21844 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
21845 MVT CastVT = MVT::getVectorVT(StVT, 2);
21846 StoredVal = DAG.getBitcast(CastVT, StoredVal);
21847 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
21848 DAG.getIntPtrConstant(0, dl));
21849
21850 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
21851 St->getPointerInfo(), St->getAlignment(),
21852 St->getMemOperand()->getFlags());
21853 }
21854 assert(Subtarget.hasSSE1() && "Expected SSE")((Subtarget.hasSSE1() && "Expected SSE") ? static_cast
<void> (0) : __assert_fail ("Subtarget.hasSSE1() && \"Expected SSE\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21854, __PRETTY_FUNCTION__))
;
21855 SDVTList Tys = DAG.getVTList(MVT::Other);
21856 SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
21857 return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
21858 St->getMemOperand());
21859}
21860
21861// Lower vector extended loads using a shuffle. If SSSE3 is not available we
21862// may emit an illegal shuffle but the expansion is still better than scalar
21863// code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
21864// we'll emit a shuffle and a arithmetic shift.
21865// FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
21866// TODO: It is possible to support ZExt by zeroing the undef values during
21867// the shuffle phase or after the shuffle.
21868static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
21869 SelectionDAG &DAG) {
21870 MVT RegVT = Op.getSimpleValueType();
21871 assert(RegVT.isVector() && "We only custom lower vector loads.")((RegVT.isVector() && "We only custom lower vector loads."
) ? static_cast<void> (0) : __assert_fail ("RegVT.isVector() && \"We only custom lower vector loads.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21871, __PRETTY_FUNCTION__))
;
21872 assert(RegVT.isInteger() &&((RegVT.isInteger() && "We only custom lower integer vector loads."
) ? static_cast<void> (0) : __assert_fail ("RegVT.isInteger() && \"We only custom lower integer vector loads.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21873, __PRETTY_FUNCTION__))
21873 "We only custom lower integer vector loads.")((RegVT.isInteger() && "We only custom lower integer vector loads."
) ? static_cast<void> (0) : __assert_fail ("RegVT.isInteger() && \"We only custom lower integer vector loads.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21873, __PRETTY_FUNCTION__))
;
21874
21875 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
21876 SDLoc dl(Ld);
21877
21878 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
21879 if (RegVT.getVectorElementType() == MVT::i1) {
21880 assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load")((EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load"
) ? static_cast<void> (0) : __assert_fail ("EVT(RegVT) == Ld->getMemoryVT() && \"Expected non-extending load\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21880, __PRETTY_FUNCTION__))
;
21881 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT")((RegVT.getVectorNumElements() <= 8 && "Unexpected VT"
) ? static_cast<void> (0) : __assert_fail ("RegVT.getVectorNumElements() <= 8 && \"Unexpected VT\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21881, __PRETTY_FUNCTION__))
;
21882 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&((Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
"Expected AVX512F without AVX512DQI") ? static_cast<void>
(0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasDQI() && \"Expected AVX512F without AVX512DQI\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21883, __PRETTY_FUNCTION__))
21883 "Expected AVX512F without AVX512DQI")((Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
"Expected AVX512F without AVX512DQI") ? static_cast<void>
(0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasDQI() && \"Expected AVX512F without AVX512DQI\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21883, __PRETTY_FUNCTION__))
;
21884
21885 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
21886 Ld->getPointerInfo(), Ld->getAlignment(),
21887 Ld->getMemOperand()->getFlags());
21888
21889 // Replace chain users with the new chain.
21890 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!")((NewLd->getNumValues() == 2 && "Loads must carry a chain!"
) ? static_cast<void> (0) : __assert_fail ("NewLd->getNumValues() == 2 && \"Loads must carry a chain!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 21890, __PRETTY_FUNCTION__))
;
21891
21892 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
21893 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
21894 DAG.getBitcast(MVT::v16i1, Val),
21895 DAG.getIntPtrConstant(0, dl));
21896 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
21897 }
21898
21899 return SDValue();
21900}
21901
21902/// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
21903/// each of which has no other use apart from the AND / OR.
21904static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
21905 Opc = Op.getOpcode();
21906 if (Opc != ISD::OR && Opc != ISD::AND)
21907 return false;
21908 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
21909 Op.getOperand(0).hasOneUse() &&
21910 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
21911 Op.getOperand(1).hasOneUse());
21912}
21913
21914/// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the
21915/// SETCC node has a single use.
21916static bool isXor1OfSetCC(SDValue Op) {
21917 if (Op.getOpcode() != ISD::XOR)
21918 return false;
21919 if (isOneConstant(Op.getOperand(1)))
21920 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
21921 Op.getOperand(0).hasOneUse();
21922 return false;
21923}
21924
21925SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
21926 bool addTest = true;
21927 SDValue Chain = Op.getOperand(0);
21928 SDValue Cond = Op.getOperand(1);
21929 SDValue Dest = Op.getOperand(2);
21930 SDLoc dl(Op);
21931 SDValue CC;
21932 bool Inverted = false;
21933
21934 if (Cond.getOpcode() == ISD::SETCC) {
21935 // Check for setcc([su]{add,sub,mul}o == 0).
21936 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
21937 isNullConstant(Cond.getOperand(1)) &&
21938 Cond.getOperand(0).getResNo() == 1 &&
21939 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
21940 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
21941 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
21942 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
21943 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
21944 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
21945 Inverted = true;
21946 Cond = Cond.getOperand(0);
21947 } else {
21948 if (SDValue NewCond = LowerSETCC(Cond, DAG))
21949 Cond = NewCond;
21950 }
21951 }
21952#if 0
21953 // FIXME: LowerXALUO doesn't handle these!!
21954 else if (Cond.getOpcode() == X86ISD::ADD ||
21955 Cond.getOpcode() == X86ISD::SUB ||
21956 Cond.getOpcode() == X86ISD::SMUL ||
21957 Cond.getOpcode() == X86ISD::UMUL)
21958 Cond = LowerXALUO(Cond, DAG);
21959#endif
21960
21961 // Look pass (and (setcc_carry (cmp ...)), 1).
21962 if (Cond.getOpcode() == ISD::AND &&
21963 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
21964 isOneConstant(Cond.getOperand(1)))
21965 Cond = Cond.getOperand(0);
21966
21967 // If condition flag is set by a X86ISD::CMP, then use it as the condition
21968 // setting operand in place of the X86ISD::SETCC.
21969 unsigned CondOpcode = Cond.getOpcode();
21970 if (CondOpcode == X86ISD::SETCC ||
21971 CondOpcode == X86ISD::SETCC_CARRY) {
21972 CC = Cond.getOperand(0);
21973
21974 SDValue Cmp = Cond.getOperand(1);
21975 unsigned Opc = Cmp.getOpcode();
21976 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
21977 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
21978 Cond = Cmp;
21979 addTest = false;
21980 } else {
21981 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
21982 default: break;
21983 case X86::COND_O:
21984 case X86::COND_B:
21985 // These can only come from an arithmetic instruction with overflow,
21986 // e.g. SADDO, UADDO.
21987 Cond = Cond.getOperand(1);
21988 addTest = false;
21989 break;
21990 }
21991 }
21992 }
21993 CondOpcode = Cond.getOpcode();
21994 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
21995 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
21996 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
21997 SDValue Value;
21998 X86::CondCode X86Cond;
21999 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
22000
22001 if (Inverted)
22002 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
22003
22004 CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22005 addTest = false;
22006 } else {
22007 unsigned CondOpc;
22008 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
22009 SDValue Cmp = Cond.getOperand(0).getOperand(1);
22010 if (CondOpc == ISD::OR) {
22011 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
22012 // two branches instead of an explicit OR instruction with a
22013 // separate test.
22014 if (Cmp == Cond.getOperand(1).getOperand(1) &&
22015 isX86LogicalCmp(Cmp)) {
22016 CC = Cond.getOperand(0).getOperand(0);
22017 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22018 Chain, Dest, CC, Cmp);
22019 CC = Cond.getOperand(1).getOperand(0);
22020 Cond = Cmp;
22021 addTest = false;
22022 }
22023 } else { // ISD::AND
22024 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
22025 // two branches instead of an explicit AND instruction with a
22026 // separate test. However, we only do this if this block doesn't
22027 // have a fall-through edge, because this requires an explicit
22028 // jmp when the condition is false.
22029 if (Cmp == Cond.getOperand(1).getOperand(1) &&
22030 isX86LogicalCmp(Cmp) &&
22031 Op.getNode()->hasOneUse()) {
22032 X86::CondCode CCode0 =
22033 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22034 CCode0 = X86::GetOppositeBranchCondition(CCode0);
22035 CC = DAG.getTargetConstant(CCode0, dl, MVT::i8);
22036 SDNode *User = *Op.getNode()->use_begin();
22037 // Look for an unconditional branch following this conditional branch.
22038 // We need this because we need to reverse the successors in order
22039 // to implement FCMP_OEQ.
22040 if (User->getOpcode() == ISD::BR) {
22041 SDValue FalseBB = User->getOperand(1);
22042 SDNode *NewBR =
22043 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22044 assert(NewBR == User)((NewBR == User) ? static_cast<void> (0) : __assert_fail
("NewBR == User", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22044, __PRETTY_FUNCTION__))
;
22045 (void)NewBR;
22046 Dest = FalseBB;
22047
22048 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), Chain,
22049 Dest, CC, Cmp);
22050 X86::CondCode CCode1 =
22051 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
22052 CCode1 = X86::GetOppositeBranchCondition(CCode1);
22053 CC = DAG.getTargetConstant(CCode1, dl, MVT::i8);
22054 Cond = Cmp;
22055 addTest = false;
22056 }
22057 }
22058 }
22059 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
22060 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
22061 // It should be transformed during dag combiner except when the condition
22062 // is set by a arithmetics with overflow node.
22063 X86::CondCode CCode =
22064 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
22065 CCode = X86::GetOppositeBranchCondition(CCode);
22066 CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
22067 Cond = Cond.getOperand(0).getOperand(1);
22068 addTest = false;
22069 } else if (Cond.getOpcode() == ISD::SETCC &&
22070 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
22071 // For FCMP_OEQ, we can emit
22072 // two branches instead of an explicit AND instruction with a
22073 // separate test. However, we only do this if this block doesn't
22074 // have a fall-through edge, because this requires an explicit
22075 // jmp when the condition is false.
22076 if (Op.getNode()->hasOneUse()) {
22077 SDNode *User = *Op.getNode()->use_begin();
22078 // Look for an unconditional branch following this conditional branch.
22079 // We need this because we need to reverse the successors in order
22080 // to implement FCMP_OEQ.
22081 if (User->getOpcode() == ISD::BR) {
22082 SDValue FalseBB = User->getOperand(1);
22083 SDNode *NewBR =
22084 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
22085 assert(NewBR == User)((NewBR == User) ? static_cast<void> (0) : __assert_fail
("NewBR == User", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22085, __PRETTY_FUNCTION__))
;
22086 (void)NewBR;
22087 Dest = FalseBB;
22088
22089 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22090 Cond.getOperand(0), Cond.getOperand(1));
22091 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22092 CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22093 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22094 Chain, Dest, CC, Cmp);
22095 CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
22096 Cond = Cmp;
22097 addTest = false;
22098 }
22099 }
22100 } else if (Cond.getOpcode() == ISD::SETCC &&
22101 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
22102 // For FCMP_UNE, we can emit
22103 // two branches instead of an explicit OR instruction with a
22104 // separate test.
22105 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
22106 Cond.getOperand(0), Cond.getOperand(1));
22107 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
22108 CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
22109 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22110 Chain, Dest, CC, Cmp);
22111 CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
22112 Cond = Cmp;
22113 addTest = false;
22114 }
22115 }
22116
22117 if (addTest) {
22118 // Look pass the truncate if the high bits are known zero.
22119 if (isTruncWithZeroHighBitsInput(Cond, DAG))
22120 Cond = Cond.getOperand(0);
22121
22122 // We know the result of AND is compared against zero. Try to match
22123 // it to BT.
22124 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
22125 SDValue BTCC;
22126 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, dl, DAG, BTCC)) {
22127 CC = BTCC;
22128 Cond = BT;
22129 addTest = false;
22130 }
22131 }
22132 }
22133
22134 if (addTest) {
22135 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
22136 CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22137 Cond = EmitCmp(Cond, DAG.getConstant(0, dl, Cond.getValueType()),
22138 X86Cond, dl, DAG);
22139 }
22140 Cond = ConvertCmpIfNecessary(Cond, DAG);
22141 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
22142 Chain, Dest, CC, Cond);
22143}
22144
22145// Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
22146// Calls to _alloca are needed to probe the stack when allocating more than 4k
22147// bytes in one go. Touching the stack at 4K increments is necessary to ensure
22148// that the guard pages used by the OS virtual memory manager are allocated in
22149// correct sequence.
22150SDValue
22151X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
22152 SelectionDAG &DAG) const {
22153 MachineFunction &MF = DAG.getMachineFunction();
22154 bool SplitStack = MF.shouldSplitStack();
22155 bool EmitStackProbe = !getStackProbeSymbolName(MF).empty();
22156 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
22157 SplitStack || EmitStackProbe;
22158 SDLoc dl(Op);
22159
22160 // Get the inputs.
22161 SDNode *Node = Op.getNode();
22162 SDValue Chain = Op.getOperand(0);
22163 SDValue Size = Op.getOperand(1);
22164 unsigned Align = Op.getConstantOperandVal(2);
22165 EVT VT = Node->getValueType(0);
22166
22167 // Chain the dynamic stack allocation so that it doesn't modify the stack
22168 // pointer when other instructions are using the stack.
22169 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
22170
22171 bool Is64Bit = Subtarget.is64Bit();
22172 MVT SPTy = getPointerTy(DAG.getDataLayout());
22173
22174 SDValue Result;
22175 if (!Lower) {
22176 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22177 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
22178 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"((SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!") ? static_cast
<void> (0) : __assert_fail ("SPReg && \"Target cannot require DYNAMIC_STACKALLOC expansion and\" \" not tell us which reg is the stack pointer!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22179, __PRETTY_FUNCTION__))
22179 " not tell us which reg is the stack pointer!")((SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
" not tell us which reg is the stack pointer!") ? static_cast
<void> (0) : __assert_fail ("SPReg && \"Target cannot require DYNAMIC_STACKALLOC expansion and\" \" not tell us which reg is the stack pointer!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22179, __PRETTY_FUNCTION__))
;
22180
22181 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
22182 Chain = SP.getValue(1);
22183 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
22184 unsigned StackAlign = TFI.getStackAlignment();
22185 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
22186 if (Align > StackAlign)
22187 Result = DAG.getNode(ISD::AND, dl, VT, Result,
22188 DAG.getConstant(-(uint64_t)Align, dl, VT));
22189 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
22190 } else if (SplitStack) {
22191 MachineRegisterInfo &MRI = MF.getRegInfo();
22192
22193 if (Is64Bit) {
22194 // The 64 bit implementation of segmented stacks needs to clobber both r10
22195 // r11. This makes it impossible to use it along with nested parameters.
22196 const Function &F = MF.getFunction();
22197 for (const auto &A : F.args()) {
22198 if (A.hasNestAttr())
22199 report_fatal_error("Cannot use segmented stacks with functions that "
22200 "have nested arguments.");
22201 }
22202 }
22203
22204 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
22205 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
22206 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
22207 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
22208 DAG.getRegister(Vreg, SPTy));
22209 } else {
22210 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
22211 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
22212 MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
22213
22214 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
22215 Register SPReg = RegInfo->getStackRegister();
22216 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
22217 Chain = SP.getValue(1);
22218
22219 if (Align) {
22220 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
22221 DAG.getConstant(-(uint64_t)Align, dl, VT));
22222 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
22223 }
22224
22225 Result = SP;
22226 }
22227
22228 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
22229 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
22230
22231 SDValue Ops[2] = {Result, Chain};
22232 return DAG.getMergeValues(Ops, dl);
22233}
22234
22235SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
22236 MachineFunction &MF = DAG.getMachineFunction();
22237 auto PtrVT = getPointerTy(MF.getDataLayout());
22238 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
22239
22240 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
22241 SDLoc DL(Op);
22242
22243 if (!Subtarget.is64Bit() ||
22244 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
22245 // vastart just stores the address of the VarArgsFrameIndex slot into the
22246 // memory location argument.
22247 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
22248 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
22249 MachinePointerInfo(SV));
22250 }
22251
22252 // __va_list_tag:
22253 // gp_offset (0 - 6 * 8)
22254 // fp_offset (48 - 48 + 8 * 16)
22255 // overflow_arg_area (point to parameters coming in memory).
22256 // reg_save_area
22257 SmallVector<SDValue, 8> MemOps;
22258 SDValue FIN = Op.getOperand(1);
22259 // Store gp_offset
22260 SDValue Store = DAG.getStore(
22261 Op.getOperand(0), DL,
22262 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
22263 MachinePointerInfo(SV));
22264 MemOps.push_back(Store);
22265
22266 // Store fp_offset
22267 FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
22268 Store = DAG.getStore(
22269 Op.getOperand(0), DL,
22270 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
22271 MachinePointerInfo(SV, 4));
22272 MemOps.push_back(Store);
22273
22274 // Store ptr to overflow_arg_area
22275 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
22276 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
22277 Store =
22278 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
22279 MemOps.push_back(Store);
22280
22281 // Store ptr to reg_save_area.
22282 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
22283 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
22284 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
22285 Store = DAG.getStore(
22286 Op.getOperand(0), DL, RSFIN, FIN,
22287 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
22288 MemOps.push_back(Store);
22289 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
22290}
22291
22292SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
22293 assert(Subtarget.is64Bit() &&((Subtarget.is64Bit() && "LowerVAARG only handles 64-bit va_arg!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"LowerVAARG only handles 64-bit va_arg!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22294, __PRETTY_FUNCTION__))
22294 "LowerVAARG only handles 64-bit va_arg!")((Subtarget.is64Bit() && "LowerVAARG only handles 64-bit va_arg!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"LowerVAARG only handles 64-bit va_arg!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22294, __PRETTY_FUNCTION__))
;
22295 assert(Op.getNumOperands() == 4)((Op.getNumOperands() == 4) ? static_cast<void> (0) : __assert_fail
("Op.getNumOperands() == 4", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22295, __PRETTY_FUNCTION__))
;
22296
22297 MachineFunction &MF = DAG.getMachineFunction();
22298 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
22299 // The Win64 ABI uses char* instead of a structure.
22300 return DAG.expandVAArg(Op.getNode());
22301
22302 SDValue Chain = Op.getOperand(0);
22303 SDValue SrcPtr = Op.getOperand(1);
22304 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
22305 unsigned Align = Op.getConstantOperandVal(3);
22306 SDLoc dl(Op);
22307
22308 EVT ArgVT = Op.getNode()->getValueType(0);
22309 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
22310 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
22311 uint8_t ArgMode;
22312
22313 // Decide which area this value should be read from.
22314 // TODO: Implement the AMD64 ABI in its entirety. This simple
22315 // selection mechanism works only for the basic types.
22316 if (ArgVT == MVT::f80) {
22317 llvm_unreachable("va_arg for f80 not yet implemented")::llvm::llvm_unreachable_internal("va_arg for f80 not yet implemented"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22317)
;
22318 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
22319 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
22320 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
22321 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
22322 } else {
22323 llvm_unreachable("Unhandled argument type in LowerVAARG")::llvm::llvm_unreachable_internal("Unhandled argument type in LowerVAARG"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22323)
;
22324 }
22325
22326 if (ArgMode == 2) {
22327 // Sanity Check: Make sure using fp_offset makes sense.
22328 assert(!Subtarget.useSoftFloat() &&((!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute
(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1())
? static_cast<void> (0) : __assert_fail ("!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22330, __PRETTY_FUNCTION__))
22329 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&((!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute
(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1())
? static_cast<void> (0) : __assert_fail ("!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22330, __PRETTY_FUNCTION__))
22330 Subtarget.hasSSE1())((!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute
(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1())
? static_cast<void> (0) : __assert_fail ("!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22330, __PRETTY_FUNCTION__))
;
22331 }
22332
22333 // Insert VAARG_64 node into the DAG
22334 // VAARG_64 returns two values: Variable Argument Address, Chain
22335 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
22336 DAG.getConstant(ArgMode, dl, MVT::i8),
22337 DAG.getConstant(Align, dl, MVT::i32)};
22338 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
22339 SDValue VAARG = DAG.getMemIntrinsicNode(
22340 X86ISD::VAARG_64, dl,
22341 VTs, InstOps, MVT::i64,
22342 MachinePointerInfo(SV),
22343 /*Align=*/0,
22344 MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
22345 Chain = VAARG.getValue(1);
22346
22347 // Load the next argument and return it
22348 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
22349}
22350
22351static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
22352 SelectionDAG &DAG) {
22353 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
22354 // where a va_list is still an i8*.
22355 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!")((Subtarget.is64Bit() && "This code only handles 64-bit va_copy!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"This code only handles 64-bit va_copy!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22355, __PRETTY_FUNCTION__))
;
22356 if (Subtarget.isCallingConvWin64(
22357 DAG.getMachineFunction().getFunction().getCallingConv()))
22358 // Probably a Win64 va_copy.
22359 return DAG.expandVACopy(Op.getNode());
22360
22361 SDValue Chain = Op.getOperand(0);
22362 SDValue DstPtr = Op.getOperand(1);
22363 SDValue SrcPtr = Op.getOperand(2);
22364 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
22365 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
22366 SDLoc DL(Op);
22367
22368 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
22369 DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false,
22370 false, false,
22371 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
22372}
22373
22374// Helper to get immediate/variable SSE shift opcode from other shift opcodes.
22375static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
22376 switch (Opc) {
22377 case ISD::SHL:
22378 case X86ISD::VSHL:
22379 case X86ISD::VSHLI:
22380 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
22381 case ISD::SRL:
22382 case X86ISD::VSRL:
22383 case X86ISD::VSRLI:
22384 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
22385 case ISD::SRA:
22386 case X86ISD::VSRA:
22387 case X86ISD::VSRAI:
22388 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
22389 }
22390 llvm_unreachable("Unknown target vector shift node")::llvm::llvm_unreachable_internal("Unknown target vector shift node"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22390)
;
22391}
22392
22393/// Handle vector element shifts where the shift amount is a constant.
22394/// Takes immediate version of shift as input.
22395static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
22396 SDValue SrcOp, uint64_t ShiftAmt,
22397 SelectionDAG &DAG) {
22398 MVT ElementType = VT.getVectorElementType();
22399
22400 // Bitcast the source vector to the output type, this is mainly necessary for
22401 // vXi8/vXi64 shifts.
22402 if (VT != SrcOp.getSimpleValueType())
22403 SrcOp = DAG.getBitcast(VT, SrcOp);
22404
22405 // Fold this packed shift into its first operand if ShiftAmt is 0.
22406 if (ShiftAmt == 0)
22407 return SrcOp;
22408
22409 // Check for ShiftAmt >= element width
22410 if (ShiftAmt >= ElementType.getSizeInBits()) {
22411 if (Opc == X86ISD::VSRAI)
22412 ShiftAmt = ElementType.getSizeInBits() - 1;
22413 else
22414 return DAG.getConstant(0, dl, VT);
22415 }
22416
22417 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)(((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD
::VSRAI) && "Unknown target vector shift-by-constant node"
) ? static_cast<void> (0) : __assert_fail ("(Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI) && \"Unknown target vector shift-by-constant node\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22418, __PRETTY_FUNCTION__))
22418 && "Unknown target vector shift-by-constant node")(((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD
::VSRAI) && "Unknown target vector shift-by-constant node"
) ? static_cast<void> (0) : __assert_fail ("(Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI) && \"Unknown target vector shift-by-constant node\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22418, __PRETTY_FUNCTION__))
;
22419
22420 // Fold this packed vector shift into a build vector if SrcOp is a
22421 // vector of Constants or UNDEFs.
22422 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
22423 SmallVector<SDValue, 8> Elts;
22424 unsigned NumElts = SrcOp->getNumOperands();
22425
22426 switch (Opc) {
22427 default: llvm_unreachable("Unknown opcode!")::llvm::llvm_unreachable_internal("Unknown opcode!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22427)
;
22428 case X86ISD::VSHLI:
22429 for (unsigned i = 0; i != NumElts; ++i) {
22430 SDValue CurrentOp = SrcOp->getOperand(i);
22431 if (CurrentOp->isUndef()) {
22432 Elts.push_back(CurrentOp);
22433 continue;
22434 }
22435 auto *ND = cast<ConstantSDNode>(CurrentOp);
22436 const APInt &C = ND->getAPIntValue();
22437 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
22438 }
22439 break;
22440 case X86ISD::VSRLI:
22441 for (unsigned i = 0; i != NumElts; ++i) {
22442 SDValue CurrentOp = SrcOp->getOperand(i);
22443 if (CurrentOp->isUndef()) {
22444 Elts.push_back(CurrentOp);
22445 continue;
22446 }
22447 auto *ND = cast<ConstantSDNode>(CurrentOp);
22448 const APInt &C = ND->getAPIntValue();
22449 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
22450 }
22451 break;
22452 case X86ISD::VSRAI:
22453 for (unsigned i = 0; i != NumElts; ++i) {
22454 SDValue CurrentOp = SrcOp->getOperand(i);
22455 if (CurrentOp->isUndef()) {
22456 Elts.push_back(CurrentOp);
22457 continue;
22458 }
22459 auto *ND = cast<ConstantSDNode>(CurrentOp);
22460 const APInt &C = ND->getAPIntValue();
22461 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
22462 }
22463 break;
22464 }
22465
22466 return DAG.getBuildVector(VT, dl, Elts);
22467 }
22468
22469 return DAG.getNode(Opc, dl, VT, SrcOp,
22470 DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
22471}
22472
22473/// Handle vector element shifts where the shift amount may or may not be a
22474/// constant. Takes immediate version of shift as input.
22475static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
22476 SDValue SrcOp, SDValue ShAmt,
22477 const X86Subtarget &Subtarget,
22478 SelectionDAG &DAG) {
22479 MVT SVT = ShAmt.getSimpleValueType();
22480 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!")(((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!"
) ? static_cast<void> (0) : __assert_fail ("(SVT == MVT::i32 || SVT == MVT::i64) && \"Unexpected value type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22480, __PRETTY_FUNCTION__))
;
22481
22482 // Catch shift-by-constant.
22483 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
22484 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
22485 CShAmt->getZExtValue(), DAG);
22486
22487 // Change opcode to non-immediate version.
22488 Opc = getTargetVShiftUniformOpcode(Opc, true);
22489
22490 // Need to build a vector containing shift amount.
22491 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
22492 // +====================+============+=======================================+
22493 // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
22494 // +====================+============+=======================================+
22495 // | i64 | Yes, No | Use ShAmt as lowest elt |
22496 // | i32 | Yes | zero-extend in-reg |
22497 // | (i32 zext(i16/i8)) | Yes | zero-extend in-reg |
22498 // | (i32 zext(i16/i8)) | No | byte-shift-in-reg |
22499 // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
22500 // +====================+============+=======================================+
22501
22502 if (SVT == MVT::i64)
22503 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
22504 else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
22505 ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
22506 (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
22507 ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
22508 ShAmt = ShAmt.getOperand(0);
22509 MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
22510 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
22511 if (Subtarget.hasSSE41())
22512 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
22513 MVT::v2i64, ShAmt);
22514 else {
22515 SDValue ByteShift = DAG.getTargetConstant(
22516 (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
22517 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
22518 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
22519 ByteShift);
22520 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
22521 ByteShift);
22522 }
22523 } else if (Subtarget.hasSSE41() &&
22524 ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
22525 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
22526 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
22527 MVT::v2i64, ShAmt);
22528 } else {
22529 SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
22530 DAG.getUNDEF(SVT)};
22531 ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
22532 }
22533
22534 // The return type has to be a 128-bit type with the same element
22535 // type as the input type.
22536 MVT EltVT = VT.getVectorElementType();
22537 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
22538
22539 ShAmt = DAG.getBitcast(ShVT, ShAmt);
22540 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
22541}
22542
22543/// Return Mask with the necessary casting or extending
22544/// for \p Mask according to \p MaskVT when lowering masking intrinsics
22545static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
22546 const X86Subtarget &Subtarget, SelectionDAG &DAG,
22547 const SDLoc &dl) {
22548
22549 if (isAllOnesConstant(Mask))
22550 return DAG.getConstant(1, dl, MaskVT);
22551 if (X86::isZeroNode(Mask))
22552 return DAG.getConstant(0, dl, MaskVT);
22553
22554 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!")((MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!"
) ? static_cast<void> (0) : __assert_fail ("MaskVT.bitsLE(Mask.getSimpleValueType()) && \"Unexpected mask size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22554, __PRETTY_FUNCTION__))
;
22555
22556 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
22557 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!")((MaskVT == MVT::v64i1 && "Expected v64i1 mask!") ? static_cast
<void> (0) : __assert_fail ("MaskVT == MVT::v64i1 && \"Expected v64i1 mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22557, __PRETTY_FUNCTION__))
;
22558 assert(Subtarget.hasBWI() && "Expected AVX512BW target!")((Subtarget.hasBWI() && "Expected AVX512BW target!") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected AVX512BW target!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22558, __PRETTY_FUNCTION__))
;
22559 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
22560 SDValue Lo, Hi;
22561 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
22562 DAG.getConstant(0, dl, MVT::i32));
22563 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
22564 DAG.getConstant(1, dl, MVT::i32));
22565
22566 Lo = DAG.getBitcast(MVT::v32i1, Lo);
22567 Hi = DAG.getBitcast(MVT::v32i1, Hi);
22568
22569 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
22570 } else {
22571 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
22572 Mask.getSimpleValueType().getSizeInBits());
22573 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
22574 // are extracted by EXTRACT_SUBVECTOR.
22575 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
22576 DAG.getBitcast(BitcastVT, Mask),
22577 DAG.getIntPtrConstant(0, dl));
22578 }
22579}
22580
22581/// Return (and \p Op, \p Mask) for compare instructions or
22582/// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
22583/// necessary casting or extending for \p Mask when lowering masking intrinsics
22584static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
22585 SDValue PreservedSrc,
22586 const X86Subtarget &Subtarget,
22587 SelectionDAG &DAG) {
22588 MVT VT = Op.getSimpleValueType();
22589 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
22590 unsigned OpcodeSelect = ISD::VSELECT;
22591 SDLoc dl(Op);
22592
22593 if (isAllOnesConstant(Mask))
22594 return Op;
22595
22596 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
22597
22598 if (PreservedSrc.isUndef())
22599 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
22600 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
22601}
22602
22603/// Creates an SDNode for a predicated scalar operation.
22604/// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
22605/// The mask is coming as MVT::i8 and it should be transformed
22606/// to MVT::v1i1 while lowering masking intrinsics.
22607/// The main difference between ScalarMaskingNode and VectorMaskingNode is using
22608/// "X86select" instead of "vselect". We just can't create the "vselect" node
22609/// for a scalar instruction.
22610static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
22611 SDValue PreservedSrc,
22612 const X86Subtarget &Subtarget,
22613 SelectionDAG &DAG) {
22614
22615 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
22616 if (MaskConst->getZExtValue() & 0x1)
22617 return Op;
22618
22619 MVT VT = Op.getSimpleValueType();
22620 SDLoc dl(Op);
22621
22622 assert(Mask.getValueType() == MVT::i8 && "Unexpect type")((Mask.getValueType() == MVT::i8 && "Unexpect type") ?
static_cast<void> (0) : __assert_fail ("Mask.getValueType() == MVT::i8 && \"Unexpect type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22622, __PRETTY_FUNCTION__))
;
22623 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
22624 DAG.getBitcast(MVT::v8i1, Mask),
22625 DAG.getIntPtrConstant(0, dl));
22626 if (Op.getOpcode() == X86ISD::FSETCCM ||
22627 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
22628 Op.getOpcode() == X86ISD::VFPCLASSS)
22629 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
22630
22631 if (PreservedSrc.isUndef())
22632 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
22633 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
22634}
22635
22636static int getSEHRegistrationNodeSize(const Function *Fn) {
22637 if (!Fn->hasPersonalityFn())
22638 report_fatal_error(
22639 "querying registration node size for function without personality");
22640 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
22641 // WinEHStatePass for the full struct definition.
22642 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
22643 case EHPersonality::MSVC_X86SEH: return 24;
22644 case EHPersonality::MSVC_CXX: return 16;
22645 default: break;
22646 }
22647 report_fatal_error(
22648 "can only recover FP for 32-bit MSVC EH personality functions");
22649}
22650
22651/// When the MSVC runtime transfers control to us, either to an outlined
22652/// function or when returning to a parent frame after catching an exception, we
22653/// recover the parent frame pointer by doing arithmetic on the incoming EBP.
22654/// Here's the math:
22655/// RegNodeBase = EntryEBP - RegNodeSize
22656/// ParentFP = RegNodeBase - ParentFrameOffset
22657/// Subtracting RegNodeSize takes us to the offset of the registration node, and
22658/// subtracting the offset (negative on x86) takes us back to the parent FP.
22659static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
22660 SDValue EntryEBP) {
22661 MachineFunction &MF = DAG.getMachineFunction();
22662 SDLoc dl;
22663
22664 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22665 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
22666
22667 // It's possible that the parent function no longer has a personality function
22668 // if the exceptional code was optimized away, in which case we just return
22669 // the incoming EBP.
22670 if (!Fn->hasPersonalityFn())
22671 return EntryEBP;
22672
22673 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
22674 // registration, or the .set_setframe offset.
22675 MCSymbol *OffsetSym =
22676 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
22677 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
22678 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
22679 SDValue ParentFrameOffset =
22680 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
22681
22682 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
22683 // prologue to RBP in the parent function.
22684 const X86Subtarget &Subtarget =
22685 static_cast<const X86Subtarget &>(DAG.getSubtarget());
22686 if (Subtarget.is64Bit())
22687 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
22688
22689 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
22690 // RegNodeBase = EntryEBP - RegNodeSize
22691 // ParentFP = RegNodeBase - ParentFrameOffset
22692 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
22693 DAG.getConstant(RegNodeSize, dl, PtrVT));
22694 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
22695}
22696
22697SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
22698 SelectionDAG &DAG) const {
22699 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
22700 auto isRoundModeCurDirection = [](SDValue Rnd) {
22701 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
22702 return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
22703
22704 return false;
22705 };
22706 auto isRoundModeSAE = [](SDValue Rnd) {
22707 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
22708 unsigned RC = C->getZExtValue();
22709 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
22710 // Clear the NO_EXC bit and check remaining bits.
22711 RC ^= X86::STATIC_ROUNDING::NO_EXC;
22712 // As a convenience we allow no other bits or explicitly
22713 // current direction.
22714 return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
22715 }
22716 }
22717
22718 return false;
22719 };
22720 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
22721 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
22722 RC = C->getZExtValue();
22723 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
22724 // Clear the NO_EXC bit and check remaining bits.
22725 RC ^= X86::STATIC_ROUNDING::NO_EXC;
22726 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
22727 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
22728 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
22729 RC == X86::STATIC_ROUNDING::TO_ZERO;
22730 }
22731 }
22732
22733 return false;
22734 };
22735
22736 SDLoc dl(Op);
22737 unsigned IntNo = Op.getConstantOperandVal(0);
22738 MVT VT = Op.getSimpleValueType();
22739 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
22740 if (IntrData) {
22741 switch(IntrData->Type) {
22742 case INTR_TYPE_1OP: {
22743 // We specify 2 possible opcodes for intrinsics with rounding modes.
22744 // First, we check if the intrinsic may have non-default rounding mode,
22745 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22746 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22747 if (IntrWithRoundingModeOpcode != 0) {
22748 SDValue Rnd = Op.getOperand(2);
22749 unsigned RC = 0;
22750 if (isRoundModeSAEToX(Rnd, RC))
22751 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22752 Op.getOperand(1),
22753 DAG.getTargetConstant(RC, dl, MVT::i32));
22754 if (!isRoundModeCurDirection(Rnd))
22755 return SDValue();
22756 }
22757 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
22758 }
22759 case INTR_TYPE_1OP_SAE: {
22760 SDValue Sae = Op.getOperand(2);
22761
22762 unsigned Opc;
22763 if (isRoundModeCurDirection(Sae))
22764 Opc = IntrData->Opc0;
22765 else if (isRoundModeSAE(Sae))
22766 Opc = IntrData->Opc1;
22767 else
22768 return SDValue();
22769
22770 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
22771 }
22772 case INTR_TYPE_2OP: {
22773 SDValue Src2 = Op.getOperand(2);
22774
22775 // We specify 2 possible opcodes for intrinsics with rounding modes.
22776 // First, we check if the intrinsic may have non-default rounding mode,
22777 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22778 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22779 if (IntrWithRoundingModeOpcode != 0) {
22780 SDValue Rnd = Op.getOperand(3);
22781 unsigned RC = 0;
22782 if (isRoundModeSAEToX(Rnd, RC))
22783 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22784 Op.getOperand(1), Src2,
22785 DAG.getTargetConstant(RC, dl, MVT::i32));
22786 if (!isRoundModeCurDirection(Rnd))
22787 return SDValue();
22788 }
22789
22790 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22791 Op.getOperand(1), Src2);
22792 }
22793 case INTR_TYPE_2OP_SAE: {
22794 SDValue Sae = Op.getOperand(3);
22795
22796 unsigned Opc;
22797 if (isRoundModeCurDirection(Sae))
22798 Opc = IntrData->Opc0;
22799 else if (isRoundModeSAE(Sae))
22800 Opc = IntrData->Opc1;
22801 else
22802 return SDValue();
22803
22804 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
22805 Op.getOperand(2));
22806 }
22807 case INTR_TYPE_3OP:
22808 case INTR_TYPE_3OP_IMM8: {
22809 SDValue Src1 = Op.getOperand(1);
22810 SDValue Src2 = Op.getOperand(2);
22811 SDValue Src3 = Op.getOperand(3);
22812
22813 // We specify 2 possible opcodes for intrinsics with rounding modes.
22814 // First, we check if the intrinsic may have non-default rounding mode,
22815 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
22816 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22817 if (IntrWithRoundingModeOpcode != 0) {
22818 SDValue Rnd = Op.getOperand(4);
22819 unsigned RC = 0;
22820 if (isRoundModeSAEToX(Rnd, RC))
22821 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22822 Src1, Src2, Src3,
22823 DAG.getTargetConstant(RC, dl, MVT::i32));
22824 if (!isRoundModeCurDirection(Rnd))
22825 return SDValue();
22826 }
22827
22828 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
22829 Src1, Src2, Src3);
22830 }
22831 case INTR_TYPE_4OP:
22832 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
22833 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
22834 case INTR_TYPE_1OP_MASK: {
22835 SDValue Src = Op.getOperand(1);
22836 SDValue PassThru = Op.getOperand(2);
22837 SDValue Mask = Op.getOperand(3);
22838 // We add rounding mode to the Node when
22839 // - RC Opcode is specified and
22840 // - RC is not "current direction".
22841 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22842 if (IntrWithRoundingModeOpcode != 0) {
22843 SDValue Rnd = Op.getOperand(4);
22844 unsigned RC = 0;
22845 if (isRoundModeSAEToX(Rnd, RC))
22846 return getVectorMaskingNode(
22847 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
22848 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
22849 Mask, PassThru, Subtarget, DAG);
22850 if (!isRoundModeCurDirection(Rnd))
22851 return SDValue();
22852 }
22853 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src),
22854 Mask, PassThru, Subtarget, DAG);
22855 }
22856 case INTR_TYPE_1OP_MASK_SAE: {
22857 SDValue Src = Op.getOperand(1);
22858 SDValue PassThru = Op.getOperand(2);
22859 SDValue Mask = Op.getOperand(3);
22860 SDValue Rnd = Op.getOperand(4);
22861
22862 unsigned Opc;
22863 if (isRoundModeCurDirection(Rnd))
22864 Opc = IntrData->Opc0;
22865 else if (isRoundModeSAE(Rnd))
22866 Opc = IntrData->Opc1;
22867 else
22868 return SDValue();
22869
22870 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src),
22871 Mask, PassThru, Subtarget, DAG);
22872 }
22873 case INTR_TYPE_SCALAR_MASK: {
22874 SDValue Src1 = Op.getOperand(1);
22875 SDValue Src2 = Op.getOperand(2);
22876 SDValue passThru = Op.getOperand(3);
22877 SDValue Mask = Op.getOperand(4);
22878 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
22879 // There are 2 kinds of intrinsics in this group:
22880 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
22881 // (2) With rounding mode and sae - 7 operands.
22882 bool HasRounding = IntrWithRoundingModeOpcode != 0;
22883 if (Op.getNumOperands() == (5U + HasRounding)) {
22884 if (HasRounding) {
22885 SDValue Rnd = Op.getOperand(5);
22886 unsigned RC = 0;
22887 if (isRoundModeSAEToX(Rnd, RC))
22888 return getScalarMaskingNode(
22889 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
22890 DAG.getTargetConstant(RC, dl, MVT::i32)),
22891 Mask, passThru, Subtarget, DAG);
22892 if (!isRoundModeCurDirection(Rnd))
22893 return SDValue();
22894 }
22895 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
22896 Src2),
22897 Mask, passThru, Subtarget, DAG);
22898 }
22899
22900 assert(Op.getNumOperands() == (6U + HasRounding) &&((Op.getNumOperands() == (6U + HasRounding) && "Unexpected intrinsic form"
) ? static_cast<void> (0) : __assert_fail ("Op.getNumOperands() == (6U + HasRounding) && \"Unexpected intrinsic form\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22901, __PRETTY_FUNCTION__))
22901 "Unexpected intrinsic form")((Op.getNumOperands() == (6U + HasRounding) && "Unexpected intrinsic form"
) ? static_cast<void> (0) : __assert_fail ("Op.getNumOperands() == (6U + HasRounding) && \"Unexpected intrinsic form\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 22901, __PRETTY_FUNCTION__))
;
22902 SDValue RoundingMode = Op.getOperand(5);
22903 unsigned Opc = IntrData->Opc0;
22904 if (HasRounding) {
22905 SDValue Sae = Op.getOperand(6);
22906 if (isRoundModeSAE(Sae))
22907 Opc = IntrWithRoundingModeOpcode;
22908 else if (!isRoundModeCurDirection(Sae))
22909 return SDValue();
22910 }
22911 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
22912 Src2, RoundingMode),
22913 Mask, passThru, Subtarget, DAG);
22914 }
22915 case INTR_TYPE_SCALAR_MASK_RND: {
22916 SDValue Src1 = Op.getOperand(1);
22917 SDValue Src2 = Op.getOperand(2);
22918 SDValue passThru = Op.getOperand(3);
22919 SDValue Mask = Op.getOperand(4);
22920 SDValue Rnd = Op.getOperand(5);
22921
22922 SDValue NewOp;
22923 unsigned RC = 0;
22924 if (isRoundModeCurDirection(Rnd))
22925 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
22926 else if (isRoundModeSAEToX(Rnd, RC))
22927 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
22928 DAG.getTargetConstant(RC, dl, MVT::i32));
22929 else
22930 return SDValue();
22931
22932 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
22933 }
22934 case INTR_TYPE_SCALAR_MASK_SAE: {
22935 SDValue Src1 = Op.getOperand(1);
22936 SDValue Src2 = Op.getOperand(2);
22937 SDValue passThru = Op.getOperand(3);
22938 SDValue Mask = Op.getOperand(4);
22939 SDValue Sae = Op.getOperand(5);
22940 unsigned Opc;
22941 if (isRoundModeCurDirection(Sae))
22942 Opc = IntrData->Opc0;
22943 else if (isRoundModeSAE(Sae))
22944 Opc = IntrData->Opc1;
22945 else
22946 return SDValue();
22947
22948 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
22949 Mask, passThru, Subtarget, DAG);
22950 }
22951 case INTR_TYPE_2OP_MASK: {
22952 SDValue Src1 = Op.getOperand(1);
22953 SDValue Src2 = Op.getOperand(2);
22954 SDValue PassThru = Op.getOperand(3);
22955 SDValue Mask = Op.getOperand(4);
22956 SDValue NewOp;
22957 if (IntrData->Opc1 != 0) {
22958 SDValue Rnd = Op.getOperand(5);
22959 unsigned RC = 0;
22960 if (isRoundModeSAEToX(Rnd, RC))
22961 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
22962 DAG.getTargetConstant(RC, dl, MVT::i32));
22963 else if (!isRoundModeCurDirection(Rnd))
22964 return SDValue();
22965 }
22966 if (!NewOp)
22967 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
22968 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
22969 }
22970 case INTR_TYPE_2OP_MASK_SAE: {
22971 SDValue Src1 = Op.getOperand(1);
22972 SDValue Src2 = Op.getOperand(2);
22973 SDValue PassThru = Op.getOperand(3);
22974 SDValue Mask = Op.getOperand(4);
22975
22976 unsigned Opc = IntrData->Opc0;
22977 if (IntrData->Opc1 != 0) {
22978 SDValue Sae = Op.getOperand(5);
22979 if (isRoundModeSAE(Sae))
22980 Opc = IntrData->Opc1;
22981 else if (!isRoundModeCurDirection(Sae))
22982 return SDValue();
22983 }
22984
22985 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
22986 Mask, PassThru, Subtarget, DAG);
22987 }
22988 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
22989 SDValue Src1 = Op.getOperand(1);
22990 SDValue Src2 = Op.getOperand(2);
22991 SDValue Src3 = Op.getOperand(3);
22992 SDValue PassThru = Op.getOperand(4);
22993 SDValue Mask = Op.getOperand(5);
22994 SDValue Sae = Op.getOperand(6);
22995 unsigned Opc;
22996 if (isRoundModeCurDirection(Sae))
22997 Opc = IntrData->Opc0;
22998 else if (isRoundModeSAE(Sae))
22999 Opc = IntrData->Opc1;
23000 else
23001 return SDValue();
23002
23003 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23004 Mask, PassThru, Subtarget, DAG);
23005 }
23006 case INTR_TYPE_3OP_MASK_SAE: {
23007 SDValue Src1 = Op.getOperand(1);
23008 SDValue Src2 = Op.getOperand(2);
23009 SDValue Src3 = Op.getOperand(3);
23010 SDValue PassThru = Op.getOperand(4);
23011 SDValue Mask = Op.getOperand(5);
23012
23013 unsigned Opc = IntrData->Opc0;
23014 if (IntrData->Opc1 != 0) {
23015 SDValue Sae = Op.getOperand(6);
23016 if (isRoundModeSAE(Sae))
23017 Opc = IntrData->Opc1;
23018 else if (!isRoundModeCurDirection(Sae))
23019 return SDValue();
23020 }
23021 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
23022 Mask, PassThru, Subtarget, DAG);
23023 }
23024 case BLENDV: {
23025 SDValue Src1 = Op.getOperand(1);
23026 SDValue Src2 = Op.getOperand(2);
23027 SDValue Src3 = Op.getOperand(3);
23028
23029 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
23030 Src3 = DAG.getBitcast(MaskVT, Src3);
23031
23032 // Reverse the operands to match VSELECT order.
23033 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
23034 }
23035 case VPERM_2OP : {
23036 SDValue Src1 = Op.getOperand(1);
23037 SDValue Src2 = Op.getOperand(2);
23038
23039 // Swap Src1 and Src2 in the node creation
23040 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
23041 }
23042 case IFMA_OP:
23043 // NOTE: We need to swizzle the operands to pass the multiply operands
23044 // first.
23045 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23046 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
23047 case FPCLASSS: {
23048 SDValue Src1 = Op.getOperand(1);
23049 SDValue Imm = Op.getOperand(2);
23050 SDValue Mask = Op.getOperand(3);
23051 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
23052 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
23053 Subtarget, DAG);
23054 // Need to fill with zeros to ensure the bitcast will produce zeroes
23055 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23056 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23057 DAG.getConstant(0, dl, MVT::v8i1),
23058 FPclassMask, DAG.getIntPtrConstant(0, dl));
23059 return DAG.getBitcast(MVT::i8, Ins);
23060 }
23061
23062 case CMP_MASK_CC: {
23063 MVT MaskVT = Op.getSimpleValueType();
23064 SDValue CC = Op.getOperand(3);
23065 // We specify 2 possible opcodes for intrinsics with rounding modes.
23066 // First, we check if the intrinsic may have non-default rounding mode,
23067 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
23068 if (IntrData->Opc1 != 0) {
23069 SDValue Sae = Op.getOperand(4);
23070 if (isRoundModeSAE(Sae))
23071 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
23072 Op.getOperand(2), CC, Sae);
23073 if (!isRoundModeCurDirection(Sae))
23074 return SDValue();
23075 }
23076 //default rounding mode
23077 return DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
23078 Op.getOperand(2), CC);
23079 }
23080 case CMP_MASK_SCALAR_CC: {
23081 SDValue Src1 = Op.getOperand(1);
23082 SDValue Src2 = Op.getOperand(2);
23083 SDValue CC = Op.getOperand(3);
23084 SDValue Mask = Op.getOperand(4);
23085
23086 SDValue Cmp;
23087 if (IntrData->Opc1 != 0) {
23088 SDValue Sae = Op.getOperand(5);
23089 if (isRoundModeSAE(Sae))
23090 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
23091 else if (!isRoundModeCurDirection(Sae))
23092 return SDValue();
23093 }
23094 //default rounding mode
23095 if (!Cmp.getNode())
23096 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
23097
23098 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
23099 Subtarget, DAG);
23100 // Need to fill with zeros to ensure the bitcast will produce zeroes
23101 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23102 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
23103 DAG.getConstant(0, dl, MVT::v8i1),
23104 CmpMask, DAG.getIntPtrConstant(0, dl));
23105 return DAG.getBitcast(MVT::i8, Ins);
23106 }
23107 case COMI: { // Comparison intrinsics
23108 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
23109 SDValue LHS = Op.getOperand(1);
23110 SDValue RHS = Op.getOperand(2);
23111 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
23112 SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS);
23113 SDValue SetCC;
23114 switch (CC) {
23115 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
23116 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
23117 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
23118 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
23119 break;
23120 }
23121 case ISD::SETNE: { // (ZF = 1 or PF = 1)
23122 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
23123 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
23124 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
23125 break;
23126 }
23127 case ISD::SETGT: // (CF = 0 and ZF = 0)
23128 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
23129 break;
23130 case ISD::SETLT: { // The condition is opposite to GT. Swap the operands.
23131 SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG);
23132 break;
23133 }
23134 case ISD::SETGE: // CF = 0
23135 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
23136 break;
23137 case ISD::SETLE: // The condition is opposite to GE. Swap the operands.
23138 SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG);
23139 break;
23140 default:
23141 llvm_unreachable("Unexpected illegal condition!")::llvm::llvm_unreachable_internal("Unexpected illegal condition!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23141)
;
23142 }
23143 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
23144 }
23145 case COMI_RM: { // Comparison intrinsics with Sae
23146 SDValue LHS = Op.getOperand(1);
23147 SDValue RHS = Op.getOperand(2);
23148 unsigned CondVal = Op.getConstantOperandVal(3);
23149 SDValue Sae = Op.getOperand(4);
23150
23151 SDValue FCmp;
23152 if (isRoundModeCurDirection(Sae))
23153 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
23154 DAG.getTargetConstant(CondVal, dl, MVT::i8));
23155 else if (isRoundModeSAE(Sae))
23156 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
23157 DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
23158 else
23159 return SDValue();
23160 // Need to fill with zeros to ensure the bitcast will produce zeroes
23161 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
23162 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
23163 DAG.getConstant(0, dl, MVT::v16i1),
23164 FCmp, DAG.getIntPtrConstant(0, dl));
23165 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
23166 DAG.getBitcast(MVT::i16, Ins));
23167 }
23168 case VSHIFT:
23169 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
23170 Op.getOperand(1), Op.getOperand(2), Subtarget,
23171 DAG);
23172 case COMPRESS_EXPAND_IN_REG: {
23173 SDValue Mask = Op.getOperand(3);
23174 SDValue DataToCompress = Op.getOperand(1);
23175 SDValue PassThru = Op.getOperand(2);
23176 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
23177 return Op.getOperand(1);
23178
23179 // Avoid false dependency.
23180 if (PassThru.isUndef())
23181 PassThru = DAG.getConstant(0, dl, VT);
23182
23183 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
23184 Mask);
23185 }
23186 case FIXUPIMM:
23187 case FIXUPIMM_MASKZ: {
23188 SDValue Src1 = Op.getOperand(1);
23189 SDValue Src2 = Op.getOperand(2);
23190 SDValue Src3 = Op.getOperand(3);
23191 SDValue Imm = Op.getOperand(4);
23192 SDValue Mask = Op.getOperand(5);
23193 SDValue Passthru = (IntrData->Type == FIXUPIMM)
23194 ? Src1
23195 : getZeroVector(VT, Subtarget, DAG, dl);
23196
23197 unsigned Opc = IntrData->Opc0;
23198 if (IntrData->Opc1 != 0) {
23199 SDValue Sae = Op.getOperand(6);
23200 if (isRoundModeSAE(Sae))
23201 Opc = IntrData->Opc1;
23202 else if (!isRoundModeCurDirection(Sae))
23203 return SDValue();
23204 }
23205
23206 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
23207
23208 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
23209 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
23210
23211 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
23212 }
23213 case ROUNDP: {
23214 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode")((IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode"
) ? static_cast<void> (0) : __assert_fail ("IntrData->Opc0 == X86ISD::VRNDSCALE && \"Unexpected opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23214, __PRETTY_FUNCTION__))
;
23215 // Clear the upper bits of the rounding immediate so that the legacy
23216 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
23217 auto Round = cast<ConstantSDNode>(Op.getOperand(2));
23218 SDValue RoundingMode =
23219 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
23220 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23221 Op.getOperand(1), RoundingMode);
23222 }
23223 case ROUNDS: {
23224 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode")((IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode"
) ? static_cast<void> (0) : __assert_fail ("IntrData->Opc0 == X86ISD::VRNDSCALES && \"Unexpected opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23224, __PRETTY_FUNCTION__))
;
23225 // Clear the upper bits of the rounding immediate so that the legacy
23226 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
23227 auto Round = cast<ConstantSDNode>(Op.getOperand(3));
23228 SDValue RoundingMode =
23229 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
23230 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23231 Op.getOperand(1), Op.getOperand(2), RoundingMode);
23232 }
23233 case BEXTRI: {
23234 assert(IntrData->Opc0 == X86ISD::BEXTR && "Unexpected opcode")((IntrData->Opc0 == X86ISD::BEXTR && "Unexpected opcode"
) ? static_cast<void> (0) : __assert_fail ("IntrData->Opc0 == X86ISD::BEXTR && \"Unexpected opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23234, __PRETTY_FUNCTION__))
;
23235
23236 // The control is a TargetConstant, but we need to convert it to a
23237 // ConstantSDNode.
23238 uint64_t Imm = Op.getConstantOperandVal(2);
23239 SDValue Control = DAG.getConstant(Imm, dl, Op.getValueType());
23240 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
23241 Op.getOperand(1), Control);
23242 }
23243 // ADC/ADCX/SBB
23244 case ADX: {
23245 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
23246 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
23247
23248 SDValue Res;
23249 // If the carry in is zero, then we should just use ADD/SUB instead of
23250 // ADC/SBB.
23251 if (isNullConstant(Op.getOperand(1))) {
23252 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
23253 Op.getOperand(3));
23254 } else {
23255 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
23256 DAG.getConstant(-1, dl, MVT::i8));
23257 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
23258 Op.getOperand(3), GenCF.getValue(1));
23259 }
23260 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
23261 SDValue Results[] = { SetCC, Res };
23262 return DAG.getMergeValues(Results, dl);
23263 }
23264 case CVTPD2PS_MASK:
23265 case CVTPD2DQ_MASK:
23266 case CVTQQ2PS_MASK:
23267 case TRUNCATE_TO_REG: {
23268 SDValue Src = Op.getOperand(1);
23269 SDValue PassThru = Op.getOperand(2);
23270 SDValue Mask = Op.getOperand(3);
23271
23272 if (isAllOnesConstant(Mask))
23273 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
23274
23275 MVT SrcVT = Src.getSimpleValueType();
23276 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
23277 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23278 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
23279 Mask);
23280 }
23281 case CVTPS2PH_MASK: {
23282 SDValue Src = Op.getOperand(1);
23283 SDValue Rnd = Op.getOperand(2);
23284 SDValue PassThru = Op.getOperand(3);
23285 SDValue Mask = Op.getOperand(4);
23286
23287 if (isAllOnesConstant(Mask))
23288 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
23289
23290 MVT SrcVT = Src.getSimpleValueType();
23291 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
23292 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23293 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
23294 PassThru, Mask);
23295
23296 }
23297 case CVTNEPS2BF16_MASK: {
23298 SDValue Src = Op.getOperand(1);
23299 SDValue PassThru = Op.getOperand(2);
23300 SDValue Mask = Op.getOperand(3);
23301
23302 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
23303 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
23304
23305 // Break false dependency.
23306 if (PassThru.isUndef())
23307 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
23308
23309 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
23310 Mask);
23311 }
23312 default:
23313 break;
23314 }
23315 }
23316
23317 switch (IntNo) {
23318 default: return SDValue(); // Don't custom lower most intrinsics.
23319
23320 // ptest and testp intrinsics. The intrinsic these come from are designed to
23321 // return an integer value, not just an instruction so lower it to the ptest
23322 // or testp pattern and a setcc for the result.
23323 case Intrinsic::x86_avx512_ktestc_b:
23324 case Intrinsic::x86_avx512_ktestc_w:
23325 case Intrinsic::x86_avx512_ktestc_d:
23326 case Intrinsic::x86_avx512_ktestc_q:
23327 case Intrinsic::x86_avx512_ktestz_b:
23328 case Intrinsic::x86_avx512_ktestz_w:
23329 case Intrinsic::x86_avx512_ktestz_d:
23330 case Intrinsic::x86_avx512_ktestz_q:
23331 case Intrinsic::x86_sse41_ptestz:
23332 case Intrinsic::x86_sse41_ptestc:
23333 case Intrinsic::x86_sse41_ptestnzc:
23334 case Intrinsic::x86_avx_ptestz_256:
23335 case Intrinsic::x86_avx_ptestc_256:
23336 case Intrinsic::x86_avx_ptestnzc_256:
23337 case Intrinsic::x86_avx_vtestz_ps:
23338 case Intrinsic::x86_avx_vtestc_ps:
23339 case Intrinsic::x86_avx_vtestnzc_ps:
23340 case Intrinsic::x86_avx_vtestz_pd:
23341 case Intrinsic::x86_avx_vtestc_pd:
23342 case Intrinsic::x86_avx_vtestnzc_pd:
23343 case Intrinsic::x86_avx_vtestz_ps_256:
23344 case Intrinsic::x86_avx_vtestc_ps_256:
23345 case Intrinsic::x86_avx_vtestnzc_ps_256:
23346 case Intrinsic::x86_avx_vtestz_pd_256:
23347 case Intrinsic::x86_avx_vtestc_pd_256:
23348 case Intrinsic::x86_avx_vtestnzc_pd_256: {
23349 unsigned TestOpc = X86ISD::PTEST;
23350 X86::CondCode X86CC;
23351 switch (IntNo) {
23352 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.")::llvm::llvm_unreachable_internal("Bad fallthrough in Intrinsic lowering."
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23352)
;
23353 case Intrinsic::x86_avx512_ktestc_b:
23354 case Intrinsic::x86_avx512_ktestc_w:
23355 case Intrinsic::x86_avx512_ktestc_d:
23356 case Intrinsic::x86_avx512_ktestc_q:
23357 // CF = 1
23358 TestOpc = X86ISD::KTEST;
23359 X86CC = X86::COND_B;
23360 break;
23361 case Intrinsic::x86_avx512_ktestz_b:
23362 case Intrinsic::x86_avx512_ktestz_w:
23363 case Intrinsic::x86_avx512_ktestz_d:
23364 case Intrinsic::x86_avx512_ktestz_q:
23365 TestOpc = X86ISD::KTEST;
23366 X86CC = X86::COND_E;
23367 break;
23368 case Intrinsic::x86_avx_vtestz_ps:
23369 case Intrinsic::x86_avx_vtestz_pd:
23370 case Intrinsic::x86_avx_vtestz_ps_256:
23371 case Intrinsic::x86_avx_vtestz_pd_256:
23372 TestOpc = X86ISD::TESTP;
23373 LLVM_FALLTHROUGH[[gnu::fallthrough]];
23374 case Intrinsic::x86_sse41_ptestz:
23375 case Intrinsic::x86_avx_ptestz_256:
23376 // ZF = 1
23377 X86CC = X86::COND_E;
23378 break;
23379 case Intrinsic::x86_avx_vtestc_ps:
23380 case Intrinsic::x86_avx_vtestc_pd:
23381 case Intrinsic::x86_avx_vtestc_ps_256:
23382 case Intrinsic::x86_avx_vtestc_pd_256:
23383 TestOpc = X86ISD::TESTP;
23384 LLVM_FALLTHROUGH[[gnu::fallthrough]];
23385 case Intrinsic::x86_sse41_ptestc:
23386 case Intrinsic::x86_avx_ptestc_256:
23387 // CF = 1
23388 X86CC = X86::COND_B;
23389 break;
23390 case Intrinsic::x86_avx_vtestnzc_ps:
23391 case Intrinsic::x86_avx_vtestnzc_pd:
23392 case Intrinsic::x86_avx_vtestnzc_ps_256:
23393 case Intrinsic::x86_avx_vtestnzc_pd_256:
23394 TestOpc = X86ISD::TESTP;
23395 LLVM_FALLTHROUGH[[gnu::fallthrough]];
23396 case Intrinsic::x86_sse41_ptestnzc:
23397 case Intrinsic::x86_avx_ptestnzc_256:
23398 // ZF and CF = 0
23399 X86CC = X86::COND_A;
23400 break;
23401 }
23402
23403 SDValue LHS = Op.getOperand(1);
23404 SDValue RHS = Op.getOperand(2);
23405 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
23406 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
23407 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
23408 }
23409
23410 case Intrinsic::x86_sse42_pcmpistria128:
23411 case Intrinsic::x86_sse42_pcmpestria128:
23412 case Intrinsic::x86_sse42_pcmpistric128:
23413 case Intrinsic::x86_sse42_pcmpestric128:
23414 case Intrinsic::x86_sse42_pcmpistrio128:
23415 case Intrinsic::x86_sse42_pcmpestrio128:
23416 case Intrinsic::x86_sse42_pcmpistris128:
23417 case Intrinsic::x86_sse42_pcmpestris128:
23418 case Intrinsic::x86_sse42_pcmpistriz128:
23419 case Intrinsic::x86_sse42_pcmpestriz128: {
23420 unsigned Opcode;
23421 X86::CondCode X86CC;
23422 switch (IntNo) {
23423 default: llvm_unreachable("Impossible intrinsic")::llvm::llvm_unreachable_internal("Impossible intrinsic", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23423)
; // Can't reach here.
23424 case Intrinsic::x86_sse42_pcmpistria128:
23425 Opcode = X86ISD::PCMPISTR;
23426 X86CC = X86::COND_A;
23427 break;
23428 case Intrinsic::x86_sse42_pcmpestria128:
23429 Opcode = X86ISD::PCMPESTR;
23430 X86CC = X86::COND_A;
23431 break;
23432 case Intrinsic::x86_sse42_pcmpistric128:
23433 Opcode = X86ISD::PCMPISTR;
23434 X86CC = X86::COND_B;
23435 break;
23436 case Intrinsic::x86_sse42_pcmpestric128:
23437 Opcode = X86ISD::PCMPESTR;
23438 X86CC = X86::COND_B;
23439 break;
23440 case Intrinsic::x86_sse42_pcmpistrio128:
23441 Opcode = X86ISD::PCMPISTR;
23442 X86CC = X86::COND_O;
23443 break;
23444 case Intrinsic::x86_sse42_pcmpestrio128:
23445 Opcode = X86ISD::PCMPESTR;
23446 X86CC = X86::COND_O;
23447 break;
23448 case Intrinsic::x86_sse42_pcmpistris128:
23449 Opcode = X86ISD::PCMPISTR;
23450 X86CC = X86::COND_S;
23451 break;
23452 case Intrinsic::x86_sse42_pcmpestris128:
23453 Opcode = X86ISD::PCMPESTR;
23454 X86CC = X86::COND_S;
23455 break;
23456 case Intrinsic::x86_sse42_pcmpistriz128:
23457 Opcode = X86ISD::PCMPISTR;
23458 X86CC = X86::COND_E;
23459 break;
23460 case Intrinsic::x86_sse42_pcmpestriz128:
23461 Opcode = X86ISD::PCMPESTR;
23462 X86CC = X86::COND_E;
23463 break;
23464 }
23465 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23466 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23467 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
23468 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
23469 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
23470 }
23471
23472 case Intrinsic::x86_sse42_pcmpistri128:
23473 case Intrinsic::x86_sse42_pcmpestri128: {
23474 unsigned Opcode;
23475 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
23476 Opcode = X86ISD::PCMPISTR;
23477 else
23478 Opcode = X86ISD::PCMPESTR;
23479
23480 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23481 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23482 return DAG.getNode(Opcode, dl, VTs, NewOps);
23483 }
23484
23485 case Intrinsic::x86_sse42_pcmpistrm128:
23486 case Intrinsic::x86_sse42_pcmpestrm128: {
23487 unsigned Opcode;
23488 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
23489 Opcode = X86ISD::PCMPISTR;
23490 else
23491 Opcode = X86ISD::PCMPESTR;
23492
23493 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
23494 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
23495 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
23496 }
23497
23498 case Intrinsic::eh_sjlj_lsda: {
23499 MachineFunction &MF = DAG.getMachineFunction();
23500 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23501 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
23502 auto &Context = MF.getMMI().getContext();
23503 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
23504 Twine(MF.getFunctionNumber()));
23505 return DAG.getNode(getGlobalWrapperKind(), dl, VT,
23506 DAG.getMCSymbol(S, PtrVT));
23507 }
23508
23509 case Intrinsic::x86_seh_lsda: {
23510 // Compute the symbol for the LSDA. We know it'll get emitted later.
23511 MachineFunction &MF = DAG.getMachineFunction();
23512 SDValue Op1 = Op.getOperand(1);
23513 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
23514 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
23515 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
23516
23517 // Generate a simple absolute symbol reference. This intrinsic is only
23518 // supported on 32-bit Windows, which isn't PIC.
23519 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
23520 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
23521 }
23522
23523 case Intrinsic::eh_recoverfp: {
23524 SDValue FnOp = Op.getOperand(1);
23525 SDValue IncomingFPOp = Op.getOperand(2);
23526 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
23527 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
23528 if (!Fn)
23529 report_fatal_error(
23530 "llvm.eh.recoverfp must take a function as the first argument");
23531 return recoverFramePointer(DAG, Fn, IncomingFPOp);
23532 }
23533
23534 case Intrinsic::localaddress: {
23535 // Returns one of the stack, base, or frame pointer registers, depending on
23536 // which is used to reference local variables.
23537 MachineFunction &MF = DAG.getMachineFunction();
23538 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23539 unsigned Reg;
23540 if (RegInfo->hasBasePointer(MF))
23541 Reg = RegInfo->getBaseRegister();
23542 else { // Handles the SP or FP case.
23543 bool CantUseFP = RegInfo->needsStackRealignment(MF);
23544 if (CantUseFP)
23545 Reg = RegInfo->getPtrSizedStackRegister(MF);
23546 else
23547 Reg = RegInfo->getPtrSizedFrameRegister(MF);
23548 }
23549 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
23550 }
23551
23552 case Intrinsic::x86_avx512_vp2intersect_q_512:
23553 case Intrinsic::x86_avx512_vp2intersect_q_256:
23554 case Intrinsic::x86_avx512_vp2intersect_q_128:
23555 case Intrinsic::x86_avx512_vp2intersect_d_512:
23556 case Intrinsic::x86_avx512_vp2intersect_d_256:
23557 case Intrinsic::x86_avx512_vp2intersect_d_128: {
23558 MVT MaskVT = Op.getSimpleValueType();
23559
23560 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
23561 SDLoc DL(Op);
23562
23563 SDValue Operation =
23564 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
23565 Op->getOperand(1), Op->getOperand(2));
23566
23567 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
23568 MaskVT, Operation);
23569 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
23570 MaskVT, Operation);
23571 return DAG.getMergeValues({Result0, Result1}, DL);
23572 }
23573 case Intrinsic::x86_mmx_pslli_w:
23574 case Intrinsic::x86_mmx_pslli_d:
23575 case Intrinsic::x86_mmx_pslli_q:
23576 case Intrinsic::x86_mmx_psrli_w:
23577 case Intrinsic::x86_mmx_psrli_d:
23578 case Intrinsic::x86_mmx_psrli_q:
23579 case Intrinsic::x86_mmx_psrai_w:
23580 case Intrinsic::x86_mmx_psrai_d: {
23581 SDLoc DL(Op);
23582 SDValue ShAmt = Op.getOperand(2);
23583 // If the argument is a constant, convert it to a target constant.
23584 if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
23585 ShAmt = DAG.getTargetConstant(C->getZExtValue(), DL, MVT::i32);
23586 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
23587 Op.getOperand(0), Op.getOperand(1), ShAmt);
23588 }
23589
23590 unsigned NewIntrinsic;
23591 switch (IntNo) {
23592 default: llvm_unreachable("Impossible intrinsic")::llvm::llvm_unreachable_internal("Impossible intrinsic", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23592)
; // Can't reach here.
23593 case Intrinsic::x86_mmx_pslli_w:
23594 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
23595 break;
23596 case Intrinsic::x86_mmx_pslli_d:
23597 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
23598 break;
23599 case Intrinsic::x86_mmx_pslli_q:
23600 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
23601 break;
23602 case Intrinsic::x86_mmx_psrli_w:
23603 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
23604 break;
23605 case Intrinsic::x86_mmx_psrli_d:
23606 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
23607 break;
23608 case Intrinsic::x86_mmx_psrli_q:
23609 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
23610 break;
23611 case Intrinsic::x86_mmx_psrai_w:
23612 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
23613 break;
23614 case Intrinsic::x86_mmx_psrai_d:
23615 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
23616 break;
23617 }
23618
23619 // The vector shift intrinsics with scalars uses 32b shift amounts but
23620 // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
23621 // MMX register.
23622 ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
23623 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
23624 DAG.getConstant(NewIntrinsic, DL, MVT::i32),
23625 Op.getOperand(1), ShAmt);
23626
23627 }
23628 }
23629}
23630
23631static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23632 SDValue Src, SDValue Mask, SDValue Base,
23633 SDValue Index, SDValue ScaleOp, SDValue Chain,
23634 const X86Subtarget &Subtarget) {
23635 SDLoc dl(Op);
23636 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23637 // Scale must be constant.
23638 if (!C)
23639 return SDValue();
23640 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23641 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
23642 TLI.getPointerTy(DAG.getDataLayout()));
23643 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
23644 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
23645 // If source is undef or we know it won't be used, use a zero vector
23646 // to break register dependency.
23647 // TODO: use undef instead and let BreakFalseDeps deal with it?
23648 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
23649 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
23650
23651 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23652
23653 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
23654 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
23655 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23656 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
23657}
23658
23659static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
23660 SDValue Src, SDValue Mask, SDValue Base,
23661 SDValue Index, SDValue ScaleOp, SDValue Chain,
23662 const X86Subtarget &Subtarget) {
23663 MVT VT = Op.getSimpleValueType();
23664 SDLoc dl(Op);
23665 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23666 // Scale must be constant.
23667 if (!C)
23668 return SDValue();
23669 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23670 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
23671 TLI.getPointerTy(DAG.getDataLayout()));
23672 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
23673 VT.getVectorNumElements());
23674 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
23675
23676 // We support two versions of the gather intrinsics. One with scalar mask and
23677 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
23678 if (Mask.getValueType() != MaskVT)
23679 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23680
23681 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
23682 // If source is undef or we know it won't be used, use a zero vector
23683 // to break register dependency.
23684 // TODO: use undef instead and let BreakFalseDeps deal with it?
23685 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
23686 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
23687
23688 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23689
23690 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
23691 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
23692 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23693 return DAG.getMergeValues({ Res, Res.getValue(2) }, dl);
23694}
23695
23696static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23697 SDValue Src, SDValue Mask, SDValue Base,
23698 SDValue Index, SDValue ScaleOp, SDValue Chain,
23699 const X86Subtarget &Subtarget) {
23700 SDLoc dl(Op);
23701 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23702 // Scale must be constant.
23703 if (!C)
23704 return SDValue();
23705 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23706 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
23707 TLI.getPointerTy(DAG.getDataLayout()));
23708 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
23709 Src.getSimpleValueType().getVectorNumElements());
23710 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
23711
23712 // We support two versions of the scatter intrinsics. One with scalar mask and
23713 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
23714 if (Mask.getValueType() != MaskVT)
23715 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23716
23717 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
23718
23719 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
23720 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
23721 SDValue Res = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
23722 VTs, Ops, dl, MemIntr->getMemoryVT(), MemIntr->getMemOperand());
23723 return Res.getValue(1);
23724}
23725
23726static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
23727 SDValue Mask, SDValue Base, SDValue Index,
23728 SDValue ScaleOp, SDValue Chain,
23729 const X86Subtarget &Subtarget) {
23730 SDLoc dl(Op);
23731 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
23732 // Scale must be constant.
23733 if (!C)
23734 return SDValue();
23735 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23736 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
23737 TLI.getPointerTy(DAG.getDataLayout()));
23738 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
23739 SDValue Segment = DAG.getRegister(0, MVT::i32);
23740 MVT MaskVT =
23741 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
23742 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
23743 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
23744 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
23745 return SDValue(Res, 0);
23746}
23747
23748/// Handles the lowering of builtin intrinsics with chain that return their
23749/// value into registers EDX:EAX.
23750/// If operand ScrReg is a valid register identifier, then operand 2 of N is
23751/// copied to SrcReg. The assumption is that SrcReg is an implicit input to
23752/// TargetOpcode.
23753/// Returns a Glue value which can be used to add extra copy-from-reg if the
23754/// expanded intrinsics implicitly defines extra registers (i.e. not just
23755/// EDX:EAX).
23756static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
23757 SelectionDAG &DAG,
23758 unsigned TargetOpcode,
23759 unsigned SrcReg,
23760 const X86Subtarget &Subtarget,
23761 SmallVectorImpl<SDValue> &Results) {
23762 SDValue Chain = N->getOperand(0);
23763 SDValue Glue;
23764
23765 if (SrcReg) {
23766 assert(N->getNumOperands() == 3 && "Unexpected number of operands!")((N->getNumOperands() == 3 && "Unexpected number of operands!"
) ? static_cast<void> (0) : __assert_fail ("N->getNumOperands() == 3 && \"Unexpected number of operands!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23766, __PRETTY_FUNCTION__))
;
23767 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
23768 Glue = Chain.getValue(1);
23769 }
23770
23771 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
23772 SDValue N1Ops[] = {Chain, Glue};
23773 SDNode *N1 = DAG.getMachineNode(
23774 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
23775 Chain = SDValue(N1, 0);
23776
23777 // Reads the content of XCR and returns it in registers EDX:EAX.
23778 SDValue LO, HI;
23779 if (Subtarget.is64Bit()) {
23780 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
23781 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
23782 LO.getValue(2));
23783 } else {
23784 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
23785 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
23786 LO.getValue(2));
23787 }
23788 Chain = HI.getValue(1);
23789 Glue = HI.getValue(2);
23790
23791 if (Subtarget.is64Bit()) {
23792 // Merge the two 32-bit values into a 64-bit one.
23793 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
23794 DAG.getConstant(32, DL, MVT::i8));
23795 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
23796 Results.push_back(Chain);
23797 return Glue;
23798 }
23799
23800 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
23801 SDValue Ops[] = { LO, HI };
23802 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
23803 Results.push_back(Pair);
23804 Results.push_back(Chain);
23805 return Glue;
23806}
23807
23808/// Handles the lowering of builtin intrinsics that read the time stamp counter
23809/// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
23810/// READCYCLECOUNTER nodes.
23811static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
23812 SelectionDAG &DAG,
23813 const X86Subtarget &Subtarget,
23814 SmallVectorImpl<SDValue> &Results) {
23815 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
23816 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
23817 // and the EAX register is loaded with the low-order 32 bits.
23818 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
23819 /* NoRegister */0, Subtarget,
23820 Results);
23821 if (Opcode != X86::RDTSCP)
23822 return;
23823
23824 SDValue Chain = Results[1];
23825 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
23826 // the ECX register. Add 'ecx' explicitly to the chain.
23827 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
23828 Results[1] = ecx;
23829 Results.push_back(ecx.getValue(1));
23830}
23831
23832static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
23833 SelectionDAG &DAG) {
23834 SmallVector<SDValue, 3> Results;
23835 SDLoc DL(Op);
23836 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
23837 Results);
23838 return DAG.getMergeValues(Results, DL);
23839}
23840
23841static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
23842 MachineFunction &MF = DAG.getMachineFunction();
23843 SDValue Chain = Op.getOperand(0);
23844 SDValue RegNode = Op.getOperand(2);
23845 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
23846 if (!EHInfo)
23847 report_fatal_error("EH registrations only live in functions using WinEH");
23848
23849 // Cast the operand to an alloca, and remember the frame index.
23850 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
23851 if (!FINode)
23852 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
23853 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
23854
23855 // Return the chain operand without making any DAG nodes.
23856 return Chain;
23857}
23858
23859static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
23860 MachineFunction &MF = DAG.getMachineFunction();
23861 SDValue Chain = Op.getOperand(0);
23862 SDValue EHGuard = Op.getOperand(2);
23863 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
23864 if (!EHInfo)
23865 report_fatal_error("EHGuard only live in functions using WinEH");
23866
23867 // Cast the operand to an alloca, and remember the frame index.
23868 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
23869 if (!FINode)
23870 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
23871 EHInfo->EHGuardFrameIndex = FINode->getIndex();
23872
23873 // Return the chain operand without making any DAG nodes.
23874 return Chain;
23875}
23876
23877/// Emit Truncating Store with signed or unsigned saturation.
23878static SDValue
23879EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
23880 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
23881 SelectionDAG &DAG) {
23882
23883 SDVTList VTs = DAG.getVTList(MVT::Other);
23884 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
23885 SDValue Ops[] = { Chain, Val, Ptr, Undef };
23886 return SignedSat ?
23887 DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
23888 DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
23889}
23890
23891/// Emit Masked Truncating Store with signed or unsigned saturation.
23892static SDValue
23893EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
23894 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
23895 MachineMemOperand *MMO, SelectionDAG &DAG) {
23896
23897 SDVTList VTs = DAG.getVTList(MVT::Other);
23898 SDValue Ops[] = { Chain, Val, Ptr, Mask };
23899 return SignedSat ?
23900 DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) :
23901 DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO);
23902}
23903
23904static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
23905 SelectionDAG &DAG) {
23906 unsigned IntNo = Op.getConstantOperandVal(1);
23907 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
23908 if (!IntrData) {
23909 switch (IntNo) {
23910 case llvm::Intrinsic::x86_seh_ehregnode:
23911 return MarkEHRegistrationNode(Op, DAG);
23912 case llvm::Intrinsic::x86_seh_ehguard:
23913 return MarkEHGuard(Op, DAG);
23914 case llvm::Intrinsic::x86_rdpkru: {
23915 SDLoc dl(Op);
23916 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23917 // Create a RDPKRU node and pass 0 to the ECX parameter.
23918 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
23919 DAG.getConstant(0, dl, MVT::i32));
23920 }
23921 case llvm::Intrinsic::x86_wrpkru: {
23922 SDLoc dl(Op);
23923 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
23924 // to the EDX and ECX parameters.
23925 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
23926 Op.getOperand(0), Op.getOperand(2),
23927 DAG.getConstant(0, dl, MVT::i32),
23928 DAG.getConstant(0, dl, MVT::i32));
23929 }
23930 case llvm::Intrinsic::x86_flags_read_u32:
23931 case llvm::Intrinsic::x86_flags_read_u64:
23932 case llvm::Intrinsic::x86_flags_write_u32:
23933 case llvm::Intrinsic::x86_flags_write_u64: {
23934 // We need a frame pointer because this will get lowered to a PUSH/POP
23935 // sequence.
23936 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
23937 MFI.setHasCopyImplyingStackAdjustment(true);
23938 // Don't do anything here, we will expand these intrinsics out later
23939 // during FinalizeISel in EmitInstrWithCustomInserter.
23940 return SDValue();
23941 }
23942 case Intrinsic::x86_lwpins32:
23943 case Intrinsic::x86_lwpins64:
23944 case Intrinsic::x86_umwait:
23945 case Intrinsic::x86_tpause: {
23946 SDLoc dl(Op);
23947 SDValue Chain = Op->getOperand(0);
23948 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23949 unsigned Opcode;
23950
23951 switch (IntNo) {
23952 default: llvm_unreachable("Impossible intrinsic")::llvm::llvm_unreachable_internal("Impossible intrinsic", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23952)
;
23953 case Intrinsic::x86_umwait:
23954 Opcode = X86ISD::UMWAIT;
23955 break;
23956 case Intrinsic::x86_tpause:
23957 Opcode = X86ISD::TPAUSE;
23958 break;
23959 case Intrinsic::x86_lwpins32:
23960 case Intrinsic::x86_lwpins64:
23961 Opcode = X86ISD::LWPINS;
23962 break;
23963 }
23964
23965 SDValue Operation =
23966 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
23967 Op->getOperand(3), Op->getOperand(4));
23968 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
23969 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
23970 Operation.getValue(1));
23971 }
23972 case Intrinsic::x86_enqcmd:
23973 case Intrinsic::x86_enqcmds: {
23974 SDLoc dl(Op);
23975 SDValue Chain = Op.getOperand(0);
23976 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
23977 unsigned Opcode;
23978 switch (IntNo) {
23979 default: llvm_unreachable("Impossible intrinsic!")::llvm::llvm_unreachable_internal("Impossible intrinsic!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23979)
;
23980 case Intrinsic::x86_enqcmd:
23981 Opcode = X86ISD::ENQCMD;
23982 break;
23983 case Intrinsic::x86_enqcmds:
23984 Opcode = X86ISD::ENQCMDS;
23985 break;
23986 }
23987 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
23988 Op.getOperand(3));
23989 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
23990 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
23991 Operation.getValue(1));
23992 }
23993 }
23994 return SDValue();
23995 }
23996
23997 SDLoc dl(Op);
23998 switch(IntrData->Type) {
23999 default: llvm_unreachable("Unknown Intrinsic Type")::llvm::llvm_unreachable_internal("Unknown Intrinsic Type", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 23999)
;
24000 case RDSEED:
24001 case RDRAND: {
24002 // Emit the node with the right value type.
24003 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
24004 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24005
24006 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
24007 // Otherwise return the value from Rand, which is always 0, casted to i32.
24008 SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
24009 DAG.getConstant(1, dl, Op->getValueType(1)),
24010 DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
24011 SDValue(Result.getNode(), 1)};
24012 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
24013
24014 // Return { result, isValid, chain }.
24015 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
24016 SDValue(Result.getNode(), 2));
24017 }
24018 case GATHER_AVX2: {
24019 SDValue Chain = Op.getOperand(0);
24020 SDValue Src = Op.getOperand(2);
24021 SDValue Base = Op.getOperand(3);
24022 SDValue Index = Op.getOperand(4);
24023 SDValue Mask = Op.getOperand(5);
24024 SDValue Scale = Op.getOperand(6);
24025 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24026 Scale, Chain, Subtarget);
24027 }
24028 case GATHER: {
24029 //gather(v1, mask, index, base, scale);
24030 SDValue Chain = Op.getOperand(0);
24031 SDValue Src = Op.getOperand(2);
24032 SDValue Base = Op.getOperand(3);
24033 SDValue Index = Op.getOperand(4);
24034 SDValue Mask = Op.getOperand(5);
24035 SDValue Scale = Op.getOperand(6);
24036 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
24037 Chain, Subtarget);
24038 }
24039 case SCATTER: {
24040 //scatter(base, mask, index, v1, scale);
24041 SDValue Chain = Op.getOperand(0);
24042 SDValue Base = Op.getOperand(2);
24043 SDValue Mask = Op.getOperand(3);
24044 SDValue Index = Op.getOperand(4);
24045 SDValue Src = Op.getOperand(5);
24046 SDValue Scale = Op.getOperand(6);
24047 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
24048 Scale, Chain, Subtarget);
24049 }
24050 case PREFETCH: {
24051 const APInt &HintVal = Op.getConstantOperandAPInt(6);
24052 assert((HintVal == 2 || HintVal == 3) &&(((HintVal == 2 || HintVal == 3) && "Wrong prefetch hint in intrinsic: should be 2 or 3"
) ? static_cast<void> (0) : __assert_fail ("(HintVal == 2 || HintVal == 3) && \"Wrong prefetch hint in intrinsic: should be 2 or 3\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24053, __PRETTY_FUNCTION__))
24053 "Wrong prefetch hint in intrinsic: should be 2 or 3")(((HintVal == 2 || HintVal == 3) && "Wrong prefetch hint in intrinsic: should be 2 or 3"
) ? static_cast<void> (0) : __assert_fail ("(HintVal == 2 || HintVal == 3) && \"Wrong prefetch hint in intrinsic: should be 2 or 3\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24053, __PRETTY_FUNCTION__))
;
24054 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
24055 SDValue Chain = Op.getOperand(0);
24056 SDValue Mask = Op.getOperand(2);
24057 SDValue Index = Op.getOperand(3);
24058 SDValue Base = Op.getOperand(4);
24059 SDValue Scale = Op.getOperand(5);
24060 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
24061 Subtarget);
24062 }
24063 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
24064 case RDTSC: {
24065 SmallVector<SDValue, 2> Results;
24066 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
24067 Results);
24068 return DAG.getMergeValues(Results, dl);
24069 }
24070 // Read Performance Monitoring Counters.
24071 case RDPMC:
24072 // GetExtended Control Register.
24073 case XGETBV: {
24074 SmallVector<SDValue, 2> Results;
24075
24076 // RDPMC uses ECX to select the index of the performance counter to read.
24077 // XGETBV uses ECX to select the index of the XCR register to return.
24078 // The result is stored into registers EDX:EAX.
24079 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
24080 Subtarget, Results);
24081 return DAG.getMergeValues(Results, dl);
24082 }
24083 // XTEST intrinsics.
24084 case XTEST: {
24085 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
24086 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
24087
24088 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
24089 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
24090 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
24091 Ret, SDValue(InTrans.getNode(), 1));
24092 }
24093 case TRUNCATE_TO_MEM_VI8:
24094 case TRUNCATE_TO_MEM_VI16:
24095 case TRUNCATE_TO_MEM_VI32: {
24096 SDValue Mask = Op.getOperand(4);
24097 SDValue DataToTruncate = Op.getOperand(3);
24098 SDValue Addr = Op.getOperand(2);
24099 SDValue Chain = Op.getOperand(0);
24100
24101 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
24102 assert(MemIntr && "Expected MemIntrinsicSDNode!")((MemIntr && "Expected MemIntrinsicSDNode!") ? static_cast
<void> (0) : __assert_fail ("MemIntr && \"Expected MemIntrinsicSDNode!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24102, __PRETTY_FUNCTION__))
;
24103
24104 EVT MemVT = MemIntr->getMemoryVT();
24105
24106 uint16_t TruncationOp = IntrData->Opc0;
24107 switch (TruncationOp) {
24108 case X86ISD::VTRUNC: {
24109 if (isAllOnesConstant(Mask)) // return just a truncate store
24110 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
24111 MemIntr->getMemOperand());
24112
24113 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
24114 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24115
24116 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, MemVT,
24117 MemIntr->getMemOperand(), true /* truncating */);
24118 }
24119 case X86ISD::VTRUNCUS:
24120 case X86ISD::VTRUNCS: {
24121 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
24122 if (isAllOnesConstant(Mask))
24123 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
24124 MemIntr->getMemOperand(), DAG);
24125
24126 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
24127 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24128
24129 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
24130 VMask, MemVT, MemIntr->getMemOperand(), DAG);
24131 }
24132 default:
24133 llvm_unreachable("Unsupported truncstore intrinsic")::llvm::llvm_unreachable_internal("Unsupported truncstore intrinsic"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24133)
;
24134 }
24135 }
24136 }
24137}
24138
24139SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
24140 SelectionDAG &DAG) const {
24141 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
24142 MFI.setReturnAddressIsTaken(true);
24143
24144 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
24145 return SDValue();
24146
24147 unsigned Depth = Op.getConstantOperandVal(0);
24148 SDLoc dl(Op);
24149 EVT PtrVT = getPointerTy(DAG.getDataLayout());
24150
24151 if (Depth > 0) {
24152 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
24153 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24154 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
24155 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
24156 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
24157 MachinePointerInfo());
24158 }
24159
24160 // Just load the return address.
24161 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
24162 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
24163 MachinePointerInfo());
24164}
24165
24166SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
24167 SelectionDAG &DAG) const {
24168 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
24169 return getReturnAddressFrameIndex(DAG);
24170}
24171
24172SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
24173 MachineFunction &MF = DAG.getMachineFunction();
24174 MachineFrameInfo &MFI = MF.getFrameInfo();
24175 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
24176 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24177 EVT VT = Op.getValueType();
24178
24179 MFI.setFrameAddressIsTaken(true);
24180
24181 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
24182 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
24183 // is not possible to crawl up the stack without looking at the unwind codes
24184 // simultaneously.
24185 int FrameAddrIndex = FuncInfo->getFAIndex();
24186 if (!FrameAddrIndex) {
24187 // Set up a frame object for the return address.
24188 unsigned SlotSize = RegInfo->getSlotSize();
24189 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
24190 SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
24191 FuncInfo->setFAIndex(FrameAddrIndex);
24192 }
24193 return DAG.getFrameIndex(FrameAddrIndex, VT);
24194 }
24195
24196 unsigned FrameReg =
24197 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
24198 SDLoc dl(Op); // FIXME probably not meaningful
24199 unsigned Depth = Op.getConstantOperandVal(0);
24200 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||((((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg
== X86::EBP && VT == MVT::i32)) && "Invalid Frame Register!"
) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && \"Invalid Frame Register!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24202, __PRETTY_FUNCTION__))
24201 (FrameReg == X86::EBP && VT == MVT::i32)) &&((((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg
== X86::EBP && VT == MVT::i32)) && "Invalid Frame Register!"
) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && \"Invalid Frame Register!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24202, __PRETTY_FUNCTION__))
24202 "Invalid Frame Register!")((((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg
== X86::EBP && VT == MVT::i32)) && "Invalid Frame Register!"
) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && \"Invalid Frame Register!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24202, __PRETTY_FUNCTION__))
;
24203 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
24204 while (Depth--)
24205 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
24206 MachinePointerInfo());
24207 return FrameAddr;
24208}
24209
24210// FIXME? Maybe this could be a TableGen attribute on some registers and
24211// this table could be generated automatically from RegInfo.
24212Register X86TargetLowering::getRegisterByName(const char* RegName, EVT VT,
24213 const MachineFunction &MF) const {
24214 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
24215
24216 Register Reg = StringSwitch<unsigned>(RegName)
24217 .Case("esp", X86::ESP)
24218 .Case("rsp", X86::RSP)
24219 .Case("ebp", X86::EBP)
24220 .Case("rbp", X86::RBP)
24221 .Default(0);
24222
24223 if (Reg == X86::EBP || Reg == X86::RBP) {
24224 if (!TFI.hasFP(MF))
24225 report_fatal_error("register " + StringRef(RegName) +
24226 " is allocatable: function has no frame pointer");
24227#ifndef NDEBUG
24228 else {
24229 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24230 Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
24231 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&(((FrameReg == X86::EBP || FrameReg == X86::RBP) && "Invalid Frame Register!"
) ? static_cast<void> (0) : __assert_fail ("(FrameReg == X86::EBP || FrameReg == X86::RBP) && \"Invalid Frame Register!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24232, __PRETTY_FUNCTION__))
24232 "Invalid Frame Register!")(((FrameReg == X86::EBP || FrameReg == X86::RBP) && "Invalid Frame Register!"
) ? static_cast<void> (0) : __assert_fail ("(FrameReg == X86::EBP || FrameReg == X86::RBP) && \"Invalid Frame Register!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24232, __PRETTY_FUNCTION__))
;
24233 }
24234#endif
24235 }
24236
24237 if (Reg)
24238 return Reg;
24239
24240 report_fatal_error("Invalid register name global variable");
24241}
24242
24243SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
24244 SelectionDAG &DAG) const {
24245 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24246 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
24247}
24248
24249unsigned X86TargetLowering::getExceptionPointerRegister(
24250 const Constant *PersonalityFn) const {
24251 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
24252 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
24253
24254 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
24255}
24256
24257unsigned X86TargetLowering::getExceptionSelectorRegister(
24258 const Constant *PersonalityFn) const {
24259 // Funclet personalities don't use selectors (the runtime does the selection).
24260 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)))((!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn
))) ? static_cast<void> (0) : __assert_fail ("!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn))"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24260, __PRETTY_FUNCTION__))
;
24261 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
24262}
24263
24264bool X86TargetLowering::needsFixedCatchObjects() const {
24265 return Subtarget.isTargetWin64();
24266}
24267
24268SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
24269 SDValue Chain = Op.getOperand(0);
24270 SDValue Offset = Op.getOperand(1);
24271 SDValue Handler = Op.getOperand(2);
24272 SDLoc dl (Op);
24273
24274 EVT PtrVT = getPointerTy(DAG.getDataLayout());
24275 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24276 Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
24277 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||((((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg
== X86::EBP && PtrVT == MVT::i32)) && "Invalid Frame Register!"
) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && \"Invalid Frame Register!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24279, __PRETTY_FUNCTION__))
24278 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&((((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg
== X86::EBP && PtrVT == MVT::i32)) && "Invalid Frame Register!"
) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && \"Invalid Frame Register!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24279, __PRETTY_FUNCTION__))
24279 "Invalid Frame Register!")((((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg
== X86::EBP && PtrVT == MVT::i32)) && "Invalid Frame Register!"
) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && \"Invalid Frame Register!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24279, __PRETTY_FUNCTION__))
;
24280 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
24281 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
24282
24283 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
24284 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
24285 dl));
24286 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
24287 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
24288 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
24289
24290 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
24291 DAG.getRegister(StoreAddrReg, PtrVT));
24292}
24293
24294SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
24295 SelectionDAG &DAG) const {
24296 SDLoc DL(Op);
24297 // If the subtarget is not 64bit, we may need the global base reg
24298 // after isel expand pseudo, i.e., after CGBR pass ran.
24299 // Therefore, ask for the GlobalBaseReg now, so that the pass
24300 // inserts the code for us in case we need it.
24301 // Otherwise, we will end up in a situation where we will
24302 // reference a virtual register that is not defined!
24303 if (!Subtarget.is64Bit()) {
24304 const X86InstrInfo *TII = Subtarget.getInstrInfo();
24305 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
24306 }
24307 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
24308 DAG.getVTList(MVT::i32, MVT::Other),
24309 Op.getOperand(0), Op.getOperand(1));
24310}
24311
24312SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
24313 SelectionDAG &DAG) const {
24314 SDLoc DL(Op);
24315 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
24316 Op.getOperand(0), Op.getOperand(1));
24317}
24318
24319SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
24320 SelectionDAG &DAG) const {
24321 SDLoc DL(Op);
24322 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
24323 Op.getOperand(0));
24324}
24325
24326static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
24327 return Op.getOperand(0);
24328}
24329
24330SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
24331 SelectionDAG &DAG) const {
24332 SDValue Root = Op.getOperand(0);
24333 SDValue Trmp = Op.getOperand(1); // trampoline
24334 SDValue FPtr = Op.getOperand(2); // nested function
24335 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
24336 SDLoc dl (Op);
24337
24338 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
24339 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
24340
24341 if (Subtarget.is64Bit()) {
24342 SDValue OutChains[6];
24343
24344 // Large code-model.
24345 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
24346 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
24347
24348 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
24349 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
24350
24351 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
24352
24353 // Load the pointer to the nested function into R11.
24354 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
24355 SDValue Addr = Trmp;
24356 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
24357 Addr, MachinePointerInfo(TrmpAddr));
24358
24359 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24360 DAG.getConstant(2, dl, MVT::i64));
24361 OutChains[1] =
24362 DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
24363 /* Alignment = */ 2);
24364
24365 // Load the 'nest' parameter value into R10.
24366 // R10 is specified in X86CallingConv.td
24367 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
24368 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24369 DAG.getConstant(10, dl, MVT::i64));
24370 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
24371 Addr, MachinePointerInfo(TrmpAddr, 10));
24372
24373 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24374 DAG.getConstant(12, dl, MVT::i64));
24375 OutChains[3] =
24376 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
24377 /* Alignment = */ 2);
24378
24379 // Jump to the nested function.
24380 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
24381 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24382 DAG.getConstant(20, dl, MVT::i64));
24383 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
24384 Addr, MachinePointerInfo(TrmpAddr, 20));
24385
24386 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
24387 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
24388 DAG.getConstant(22, dl, MVT::i64));
24389 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
24390 Addr, MachinePointerInfo(TrmpAddr, 22));
24391
24392 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
24393 } else {
24394 const Function *Func =
24395 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
24396 CallingConv::ID CC = Func->getCallingConv();
24397 unsigned NestReg;
24398
24399 switch (CC) {
24400 default:
24401 llvm_unreachable("Unsupported calling convention")::llvm::llvm_unreachable_internal("Unsupported calling convention"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24401)
;
24402 case CallingConv::C:
24403 case CallingConv::X86_StdCall: {
24404 // Pass 'nest' parameter in ECX.
24405 // Must be kept in sync with X86CallingConv.td
24406 NestReg = X86::ECX;
24407
24408 // Check that ECX wasn't needed by an 'inreg' parameter.
24409 FunctionType *FTy = Func->getFunctionType();
24410 const AttributeList &Attrs = Func->getAttributes();
24411
24412 if (!Attrs.isEmpty() && !Func->isVarArg()) {
24413 unsigned InRegCount = 0;
24414 unsigned Idx = 1;
24415
24416 for (FunctionType::param_iterator I = FTy->param_begin(),
24417 E = FTy->param_end(); I != E; ++I, ++Idx)
24418 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
24419 auto &DL = DAG.getDataLayout();
24420 // FIXME: should only count parameters that are lowered to integers.
24421 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
24422 }
24423
24424 if (InRegCount > 2) {
24425 report_fatal_error("Nest register in use - reduce number of inreg"
24426 " parameters!");
24427 }
24428 }
24429 break;
24430 }
24431 case CallingConv::X86_FastCall:
24432 case CallingConv::X86_ThisCall:
24433 case CallingConv::Fast:
24434 case CallingConv::Tail:
24435 // Pass 'nest' parameter in EAX.
24436 // Must be kept in sync with X86CallingConv.td
24437 NestReg = X86::EAX;
24438 break;
24439 }
24440
24441 SDValue OutChains[4];
24442 SDValue Addr, Disp;
24443
24444 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24445 DAG.getConstant(10, dl, MVT::i32));
24446 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
24447
24448 // This is storing the opcode for MOV32ri.
24449 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
24450 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
24451 OutChains[0] =
24452 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
24453 Trmp, MachinePointerInfo(TrmpAddr));
24454
24455 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24456 DAG.getConstant(1, dl, MVT::i32));
24457 OutChains[1] =
24458 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
24459 /* Alignment = */ 1);
24460
24461 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
24462 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24463 DAG.getConstant(5, dl, MVT::i32));
24464 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
24465 Addr, MachinePointerInfo(TrmpAddr, 5),
24466 /* Alignment = */ 1);
24467
24468 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
24469 DAG.getConstant(6, dl, MVT::i32));
24470 OutChains[3] =
24471 DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
24472 /* Alignment = */ 1);
24473
24474 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
24475 }
24476}
24477
24478SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
24479 SelectionDAG &DAG) const {
24480 /*
24481 The rounding mode is in bits 11:10 of FPSR, and has the following
24482 settings:
24483 00 Round to nearest
24484 01 Round to -inf
24485 10 Round to +inf
24486 11 Round to 0
24487
24488 FLT_ROUNDS, on the other hand, expects the following:
24489 -1 Undefined
24490 0 Round to 0
24491 1 Round to nearest
24492 2 Round to +inf
24493 3 Round to -inf
24494
24495 To perform the conversion, we do:
24496 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
24497 */
24498
24499 MachineFunction &MF = DAG.getMachineFunction();
24500 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
24501 unsigned StackAlignment = TFI.getStackAlignment();
24502 MVT VT = Op.getSimpleValueType();
24503 SDLoc DL(Op);
24504
24505 // Save FP Control Word to stack slot
24506 int SSFI = MF.getFrameInfo().CreateStackObject(2, StackAlignment, false);
24507 SDValue StackSlot =
24508 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
24509
24510 MachineMemOperand *MMO =
24511 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI),
24512 MachineMemOperand::MOStore, 2, 2);
24513
24514 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
24515 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
24516 DAG.getVTList(MVT::Other),
24517 Ops, MVT::i16, MMO);
24518
24519 // Load FP Control Word from stack slot
24520 SDValue CWD =
24521 DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo());
24522
24523 // Transform as necessary
24524 SDValue CWD1 =
24525 DAG.getNode(ISD::SRL, DL, MVT::i16,
24526 DAG.getNode(ISD::AND, DL, MVT::i16,
24527 CWD, DAG.getConstant(0x800, DL, MVT::i16)),
24528 DAG.getConstant(11, DL, MVT::i8));
24529 SDValue CWD2 =
24530 DAG.getNode(ISD::SRL, DL, MVT::i16,
24531 DAG.getNode(ISD::AND, DL, MVT::i16,
24532 CWD, DAG.getConstant(0x400, DL, MVT::i16)),
24533 DAG.getConstant(9, DL, MVT::i8));
24534
24535 SDValue RetVal =
24536 DAG.getNode(ISD::AND, DL, MVT::i16,
24537 DAG.getNode(ISD::ADD, DL, MVT::i16,
24538 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
24539 DAG.getConstant(1, DL, MVT::i16)),
24540 DAG.getConstant(3, DL, MVT::i16));
24541
24542 return DAG.getNode((VT.getSizeInBits() < 16 ?
24543 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
24544}
24545
24546// Split an unary integer op into 2 half sized ops.
24547static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
24548 MVT VT = Op.getSimpleValueType();
24549 unsigned NumElems = VT.getVectorNumElements();
24550 unsigned SizeInBits = VT.getSizeInBits();
24551 MVT EltVT = VT.getVectorElementType();
24552 SDValue Src = Op.getOperand(0);
24553 assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&((EltVT == Src.getSimpleValueType().getVectorElementType() &&
"Src and Op should have the same element type!") ? static_cast
<void> (0) : __assert_fail ("EltVT == Src.getSimpleValueType().getVectorElementType() && \"Src and Op should have the same element type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24554, __PRETTY_FUNCTION__))
24554 "Src and Op should have the same element type!")((EltVT == Src.getSimpleValueType().getVectorElementType() &&
"Src and Op should have the same element type!") ? static_cast
<void> (0) : __assert_fail ("EltVT == Src.getSimpleValueType().getVectorElementType() && \"Src and Op should have the same element type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24554, __PRETTY_FUNCTION__))
;
24555
24556 // Extract the Lo/Hi vectors
24557 SDLoc dl(Op);
24558 SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2);
24559 SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2);
24560
24561 MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2);
24562 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24563 DAG.getNode(Op.getOpcode(), dl, NewVT, Lo),
24564 DAG.getNode(Op.getOpcode(), dl, NewVT, Hi));
24565}
24566
24567// Decompose 256-bit ops into smaller 128-bit ops.
24568static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) {
24569 assert(Op.getSimpleValueType().is256BitVector() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24571, __PRETTY_FUNCTION__))
24570 Op.getSimpleValueType().isInteger() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24571, __PRETTY_FUNCTION__))
24571 "Only handle AVX 256-bit vector integer operation")((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24571, __PRETTY_FUNCTION__))
;
24572 return LowerVectorIntUnary(Op, DAG);
24573}
24574
24575// Decompose 512-bit ops into smaller 256-bit ops.
24576static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) {
24577 assert(Op.getSimpleValueType().is512BitVector() &&((Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 512-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 512-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24579, __PRETTY_FUNCTION__))
24578 Op.getSimpleValueType().isInteger() &&((Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 512-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 512-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24579, __PRETTY_FUNCTION__))
24579 "Only handle AVX 512-bit vector integer operation")((Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 512-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 512-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24579, __PRETTY_FUNCTION__))
;
24580 return LowerVectorIntUnary(Op, DAG);
24581}
24582
24583/// Lower a vector CTLZ using native supported vector CTLZ instruction.
24584//
24585// i8/i16 vector implemented using dword LZCNT vector instruction
24586// ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
24587// split the vector, perform operation on it's Lo a Hi part and
24588// concatenate the results.
24589static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
24590 const X86Subtarget &Subtarget) {
24591 assert(Op.getOpcode() == ISD::CTLZ)((Op.getOpcode() == ISD::CTLZ) ? static_cast<void> (0) :
__assert_fail ("Op.getOpcode() == ISD::CTLZ", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24591, __PRETTY_FUNCTION__))
;
24592 SDLoc dl(Op);
24593 MVT VT = Op.getSimpleValueType();
24594 MVT EltVT = VT.getVectorElementType();
24595 unsigned NumElems = VT.getVectorNumElements();
24596
24597 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&(((EltVT == MVT::i8 || EltVT == MVT::i16) && "Unsupported element type"
) ? static_cast<void> (0) : __assert_fail ("(EltVT == MVT::i8 || EltVT == MVT::i16) && \"Unsupported element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24598, __PRETTY_FUNCTION__))
24598 "Unsupported element type")(((EltVT == MVT::i8 || EltVT == MVT::i16) && "Unsupported element type"
) ? static_cast<void> (0) : __assert_fail ("(EltVT == MVT::i8 || EltVT == MVT::i16) && \"Unsupported element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24598, __PRETTY_FUNCTION__))
;
24599
24600 // Split vector, it's Lo and Hi parts will be handled in next iteration.
24601 if (NumElems > 16 ||
24602 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
24603 return LowerVectorIntUnary(Op, DAG);
24604
24605 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
24606 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&(((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
"Unsupported value type for operation") ? static_cast<void
> (0) : __assert_fail ("(NewVT.is256BitVector() || NewVT.is512BitVector()) && \"Unsupported value type for operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24607, __PRETTY_FUNCTION__))
24607 "Unsupported value type for operation")(((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
"Unsupported value type for operation") ? static_cast<void
> (0) : __assert_fail ("(NewVT.is256BitVector() || NewVT.is512BitVector()) && \"Unsupported value type for operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24607, __PRETTY_FUNCTION__))
;
24608
24609 // Use native supported vector instruction vplzcntd.
24610 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
24611 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
24612 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
24613 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
24614
24615 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
24616}
24617
24618// Lower CTLZ using a PSHUFB lookup table implementation.
24619static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
24620 const X86Subtarget &Subtarget,
24621 SelectionDAG &DAG) {
24622 MVT VT = Op.getSimpleValueType();
24623 int NumElts = VT.getVectorNumElements();
24624 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
24625 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
24626
24627 // Per-nibble leading zero PSHUFB lookup table.
24628 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
24629 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
24630 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
24631 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
24632
24633 SmallVector<SDValue, 64> LUTVec;
24634 for (int i = 0; i < NumBytes; ++i)
24635 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
24636 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
24637
24638 // Begin by bitcasting the input to byte vector, then split those bytes
24639 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
24640 // If the hi input nibble is zero then we add both results together, otherwise
24641 // we just take the hi result (by masking the lo result to zero before the
24642 // add).
24643 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
24644 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
24645
24646 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
24647 SDValue Lo = Op0;
24648 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
24649 SDValue HiZ;
24650 if (CurrVT.is512BitVector()) {
24651 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
24652 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
24653 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
24654 } else {
24655 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
24656 }
24657
24658 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
24659 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
24660 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
24661 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
24662
24663 // Merge result back from vXi8 back to VT, working on the lo/hi halves
24664 // of the current vector width in the same way we did for the nibbles.
24665 // If the upper half of the input element is zero then add the halves'
24666 // leading zero counts together, otherwise just use the upper half's.
24667 // Double the width of the result until we are at target width.
24668 while (CurrVT != VT) {
24669 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
24670 int CurrNumElts = CurrVT.getVectorNumElements();
24671 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
24672 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
24673 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
24674
24675 // Check if the upper half of the input element is zero.
24676 if (CurrVT.is512BitVector()) {
24677 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
24678 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
24679 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
24680 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
24681 } else {
24682 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
24683 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
24684 }
24685 HiZ = DAG.getBitcast(NextVT, HiZ);
24686
24687 // Move the upper/lower halves to the lower bits as we'll be extending to
24688 // NextVT. Mask the lower result to zero if HiZ is true and add the results
24689 // together.
24690 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
24691 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
24692 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
24693 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
24694 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
24695 CurrVT = NextVT;
24696 }
24697
24698 return Res;
24699}
24700
24701static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
24702 const X86Subtarget &Subtarget,
24703 SelectionDAG &DAG) {
24704 MVT VT = Op.getSimpleValueType();
24705
24706 if (Subtarget.hasCDI() &&
24707 // vXi8 vectors need to be promoted to 512-bits for vXi32.
24708 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
24709 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
24710
24711 // Decompose 256-bit ops into smaller 128-bit ops.
24712 if (VT.is256BitVector() && !Subtarget.hasInt256())
24713 return Lower256IntUnary(Op, DAG);
24714
24715 // Decompose 512-bit ops into smaller 256-bit ops.
24716 if (VT.is512BitVector() && !Subtarget.hasBWI())
24717 return Lower512IntUnary(Op, DAG);
24718
24719 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB")((Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSSE3() && \"Expected SSSE3 support for PSHUFB\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24719, __PRETTY_FUNCTION__))
;
24720 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
24721}
24722
24723static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
24724 SelectionDAG &DAG) {
24725 MVT VT = Op.getSimpleValueType();
24726 MVT OpVT = VT;
24727 unsigned NumBits = VT.getSizeInBits();
24728 SDLoc dl(Op);
24729 unsigned Opc = Op.getOpcode();
24730
24731 if (VT.isVector())
24732 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
24733
24734 Op = Op.getOperand(0);
24735 if (VT == MVT::i8) {
24736 // Zero extend to i32 since there is not an i8 bsr.
24737 OpVT = MVT::i32;
24738 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
24739 }
24740
24741 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
24742 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
24743 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
24744
24745 if (Opc == ISD::CTLZ) {
24746 // If src is zero (i.e. bsr sets ZF), returns NumBits.
24747 SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
24748 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
24749 Op.getValue(1)};
24750 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
24751 }
24752
24753 // Finally xor with NumBits-1.
24754 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
24755 DAG.getConstant(NumBits - 1, dl, OpVT));
24756
24757 if (VT == MVT::i8)
24758 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
24759 return Op;
24760}
24761
24762static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
24763 SelectionDAG &DAG) {
24764 MVT VT = Op.getSimpleValueType();
24765 unsigned NumBits = VT.getScalarSizeInBits();
24766 SDValue N0 = Op.getOperand(0);
24767 SDLoc dl(Op);
24768
24769 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&((!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
"Only scalar CTTZ requires custom lowering") ? static_cast<
void> (0) : __assert_fail ("!VT.isVector() && Op.getOpcode() == ISD::CTTZ && \"Only scalar CTTZ requires custom lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24770, __PRETTY_FUNCTION__))
24770 "Only scalar CTTZ requires custom lowering")((!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
"Only scalar CTTZ requires custom lowering") ? static_cast<
void> (0) : __assert_fail ("!VT.isVector() && Op.getOpcode() == ISD::CTTZ && \"Only scalar CTTZ requires custom lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24770, __PRETTY_FUNCTION__))
;
24771
24772 // Issue a bsf (scan bits forward) which also sets EFLAGS.
24773 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
24774 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
24775
24776 // If src is zero (i.e. bsf sets ZF), returns NumBits.
24777 SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
24778 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
24779 Op.getValue(1)};
24780 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
24781}
24782
24783/// Break a 256-bit integer operation into two new 128-bit ones and then
24784/// concatenate the result back.
24785static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) {
24786 MVT VT = Op.getSimpleValueType();
24787
24788 assert(VT.is256BitVector() && VT.isInteger() &&((VT.is256BitVector() && VT.isInteger() && "Unsupported value type for operation"
) ? static_cast<void> (0) : __assert_fail ("VT.is256BitVector() && VT.isInteger() && \"Unsupported value type for operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24789, __PRETTY_FUNCTION__))
24789 "Unsupported value type for operation")((VT.is256BitVector() && VT.isInteger() && "Unsupported value type for operation"
) ? static_cast<void> (0) : __assert_fail ("VT.is256BitVector() && VT.isInteger() && \"Unsupported value type for operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24789, __PRETTY_FUNCTION__))
;
24790
24791 unsigned NumElems = VT.getVectorNumElements();
24792 SDLoc dl(Op);
24793
24794 // Extract the LHS vectors
24795 SDValue LHS = Op.getOperand(0);
24796 SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl);
24797 SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl);
24798
24799 // Extract the RHS vectors
24800 SDValue RHS = Op.getOperand(1);
24801 SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl);
24802 SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl);
24803
24804 MVT EltVT = VT.getVectorElementType();
24805 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
24806
24807 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24808 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
24809 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
24810}
24811
24812/// Break a 512-bit integer operation into two new 256-bit ones and then
24813/// concatenate the result back.
24814static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) {
24815 MVT VT = Op.getSimpleValueType();
24816
24817 assert(VT.is512BitVector() && VT.isInteger() &&((VT.is512BitVector() && VT.isInteger() && "Unsupported value type for operation"
) ? static_cast<void> (0) : __assert_fail ("VT.is512BitVector() && VT.isInteger() && \"Unsupported value type for operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24818, __PRETTY_FUNCTION__))
24818 "Unsupported value type for operation")((VT.is512BitVector() && VT.isInteger() && "Unsupported value type for operation"
) ? static_cast<void> (0) : __assert_fail ("VT.is512BitVector() && VT.isInteger() && \"Unsupported value type for operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24818, __PRETTY_FUNCTION__))
;
24819
24820 unsigned NumElems = VT.getVectorNumElements();
24821 SDLoc dl(Op);
24822
24823 // Extract the LHS vectors
24824 SDValue LHS = Op.getOperand(0);
24825 SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl);
24826 SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl);
24827
24828 // Extract the RHS vectors
24829 SDValue RHS = Op.getOperand(1);
24830 SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl);
24831 SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl);
24832
24833 MVT EltVT = VT.getVectorElementType();
24834 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
24835
24836 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24837 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
24838 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
24839}
24840
24841static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
24842 const X86Subtarget &Subtarget) {
24843 MVT VT = Op.getSimpleValueType();
24844 if (VT == MVT::i16 || VT == MVT::i32)
24845 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
24846
24847 if (VT.getScalarType() == MVT::i1)
24848 return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
24849 Op.getOperand(0), Op.getOperand(1));
24850
24851 assert(Op.getSimpleValueType().is256BitVector() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24853, __PRETTY_FUNCTION__))
24852 Op.getSimpleValueType().isInteger() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24853, __PRETTY_FUNCTION__))
24853 "Only handle AVX 256-bit vector integer operation")((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24853, __PRETTY_FUNCTION__))
;
24854 return split256IntArith(Op, DAG);
24855}
24856
24857static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
24858 const X86Subtarget &Subtarget) {
24859 MVT VT = Op.getSimpleValueType();
24860 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
24861 unsigned Opcode = Op.getOpcode();
24862 if (VT.getScalarType() == MVT::i1) {
24863 SDLoc dl(Op);
24864 switch (Opcode) {
24865 default: llvm_unreachable("Expected saturated arithmetic opcode")::llvm::llvm_unreachable_internal("Expected saturated arithmetic opcode"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24865)
;
24866 case ISD::UADDSAT:
24867 case ISD::SADDSAT:
24868 // *addsat i1 X, Y --> X | Y
24869 return DAG.getNode(ISD::OR, dl, VT, X, Y);
24870 case ISD::USUBSAT:
24871 case ISD::SSUBSAT:
24872 // *subsat i1 X, Y --> X & ~Y
24873 return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
24874 }
24875 }
24876
24877 if (VT.is128BitVector()) {
24878 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
24879 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24880 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
24881 *DAG.getContext(), VT);
24882 SDLoc DL(Op);
24883 if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
24884 // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
24885 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
24886 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
24887 return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
24888 }
24889 if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
24890 // usubsat X, Y --> (X >u Y) ? X - Y : 0
24891 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
24892 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
24893 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
24894 }
24895 // Use default expansion.
24896 return SDValue();
24897 }
24898
24899 assert(Op.getSimpleValueType().is256BitVector() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24901, __PRETTY_FUNCTION__))
24900 Op.getSimpleValueType().isInteger() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24901, __PRETTY_FUNCTION__))
24901 "Only handle AVX 256-bit vector integer operation")((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType
().isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24901, __PRETTY_FUNCTION__))
;
24902 return split256IntArith(Op, DAG);
24903}
24904
24905static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
24906 SelectionDAG &DAG) {
24907 MVT VT = Op.getSimpleValueType();
24908 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
24909 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24910 // 8-bit integer abs to NEG and CMOV.
24911 SDLoc DL(Op);
24912 SDValue N0 = Op.getOperand(0);
24913 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24914 DAG.getConstant(0, DL, VT), N0);
24915 SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_GE, DL, MVT::i8),
24916 SDValue(Neg.getNode(), 1)};
24917 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
24918 }
24919
24920 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
24921 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
24922 SDLoc DL(Op);
24923 SDValue Src = Op.getOperand(0);
24924 SDValue Sub =
24925 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
24926 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
24927 }
24928
24929 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
24930 assert(VT.isInteger() &&((VT.isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("VT.isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24931, __PRETTY_FUNCTION__))
24931 "Only handle AVX 256-bit vector integer operation")((VT.isInteger() && "Only handle AVX 256-bit vector integer operation"
) ? static_cast<void> (0) : __assert_fail ("VT.isInteger() && \"Only handle AVX 256-bit vector integer operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24931, __PRETTY_FUNCTION__))
;
24932 return Lower256IntUnary(Op, DAG);
24933 }
24934
24935 // Default to expand.
24936 return SDValue();
24937}
24938
24939static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
24940 MVT VT = Op.getSimpleValueType();
24941
24942 // For AVX1 cases, split to use legal ops (everything but v4i64).
24943 if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
24944 return split256IntArith(Op, DAG);
24945
24946 SDLoc DL(Op);
24947 unsigned Opcode = Op.getOpcode();
24948 SDValue N0 = Op.getOperand(0);
24949 SDValue N1 = Op.getOperand(1);
24950
24951 // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
24952 // using the SMIN/SMAX instructions and flipping the signbit back.
24953 if (VT == MVT::v8i16) {
24954 assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&(((Opcode == ISD::UMIN || Opcode == ISD::UMAX) && "Unexpected MIN/MAX opcode"
) ? static_cast<void> (0) : __assert_fail ("(Opcode == ISD::UMIN || Opcode == ISD::UMAX) && \"Unexpected MIN/MAX opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24955, __PRETTY_FUNCTION__))
24955 "Unexpected MIN/MAX opcode")(((Opcode == ISD::UMIN || Opcode == ISD::UMAX) && "Unexpected MIN/MAX opcode"
) ? static_cast<void> (0) : __assert_fail ("(Opcode == ISD::UMIN || Opcode == ISD::UMAX) && \"Unexpected MIN/MAX opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24955, __PRETTY_FUNCTION__))
;
24956 SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
24957 N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
24958 N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
24959 Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
24960 SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
24961 return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
24962 }
24963
24964 // Else, expand to a compare/select.
24965 ISD::CondCode CC;
24966 switch (Opcode) {
24967 case ISD::SMIN: CC = ISD::CondCode::SETLT; break;
24968 case ISD::SMAX: CC = ISD::CondCode::SETGT; break;
24969 case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
24970 case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
24971 default: llvm_unreachable("Unknown MINMAX opcode")::llvm::llvm_unreachable_internal("Unknown MINMAX opcode", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 24971)
;
24972 }
24973
24974 SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
24975 return DAG.getSelect(DL, VT, Cond, N0, N1);
24976}
24977
24978static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
24979 SelectionDAG &DAG) {
24980 SDLoc dl(Op);
24981 MVT VT = Op.getSimpleValueType();
24982
24983 if (VT.getScalarType() == MVT::i1)
24984 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
24985
24986 // Decompose 256-bit ops into 128-bit ops.
24987 if (VT.is256BitVector() && !Subtarget.hasInt256())
24988 return split256IntArith(Op, DAG);
24989
24990 SDValue A = Op.getOperand(0);
24991 SDValue B = Op.getOperand(1);
24992
24993 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
24994 // vector pairs, multiply and truncate.
24995 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
24996 unsigned NumElts = VT.getVectorNumElements();
24997
24998 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
24999 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
25000 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
25001 return DAG.getNode(
25002 ISD::TRUNCATE, dl, VT,
25003 DAG.getNode(ISD::MUL, dl, ExVT,
25004 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
25005 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
25006 }
25007
25008 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25009
25010 // Extract the lo/hi parts to any extend to i16.
25011 // We're going to mask off the low byte of each result element of the
25012 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
25013 // element.
25014 SDValue Undef = DAG.getUNDEF(VT);
25015 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
25016 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
25017
25018 SDValue BLo, BHi;
25019 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
25020 // If the LHS is a constant, manually unpackl/unpackh.
25021 SmallVector<SDValue, 16> LoOps, HiOps;
25022 for (unsigned i = 0; i != NumElts; i += 16) {
25023 for (unsigned j = 0; j != 8; ++j) {
25024 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
25025 MVT::i16));
25026 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
25027 MVT::i16));
25028 }
25029 }
25030
25031 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
25032 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
25033 } else {
25034 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
25035 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
25036 }
25037
25038 // Multiply, mask the lower 8bits of the lo/hi results and pack.
25039 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
25040 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
25041 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
25042 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
25043 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
25044 }
25045
25046 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
25047 if (VT == MVT::v4i32) {
25048 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&((Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
"Should not custom lower when pmulld is available!") ? static_cast
<void> (0) : __assert_fail ("Subtarget.hasSSE2() && !Subtarget.hasSSE41() && \"Should not custom lower when pmulld is available!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25049, __PRETTY_FUNCTION__))
25049 "Should not custom lower when pmulld is available!")((Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
"Should not custom lower when pmulld is available!") ? static_cast
<void> (0) : __assert_fail ("Subtarget.hasSSE2() && !Subtarget.hasSSE41() && \"Should not custom lower when pmulld is available!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25049, __PRETTY_FUNCTION__))
;
25050
25051 // Extract the odd parts.
25052 static const int UnpackMask[] = { 1, -1, 3, -1 };
25053 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
25054 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
25055
25056 // Multiply the even parts.
25057 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25058 DAG.getBitcast(MVT::v2i64, A),
25059 DAG.getBitcast(MVT::v2i64, B));
25060 // Now multiply odd parts.
25061 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
25062 DAG.getBitcast(MVT::v2i64, Aodds),
25063 DAG.getBitcast(MVT::v2i64, Bodds));
25064
25065 Evens = DAG.getBitcast(VT, Evens);
25066 Odds = DAG.getBitcast(VT, Odds);
25067
25068 // Merge the two vectors back together with a shuffle. This expands into 2
25069 // shuffles.
25070 static const int ShufMask[] = { 0, 4, 2, 6 };
25071 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
25072 }
25073
25074 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&(((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
"Only know how to lower V2I64/V4I64/V8I64 multiply") ? static_cast
<void> (0) : __assert_fail ("(VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) && \"Only know how to lower V2I64/V4I64/V8I64 multiply\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25075, __PRETTY_FUNCTION__))
25075 "Only know how to lower V2I64/V4I64/V8I64 multiply")(((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
"Only know how to lower V2I64/V4I64/V8I64 multiply") ? static_cast
<void> (0) : __assert_fail ("(VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) && \"Only know how to lower V2I64/V4I64/V8I64 multiply\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25075, __PRETTY_FUNCTION__))
;
25076 assert(!Subtarget.hasDQI() && "DQI should use MULLQ")((!Subtarget.hasDQI() && "DQI should use MULLQ") ? static_cast
<void> (0) : __assert_fail ("!Subtarget.hasDQI() && \"DQI should use MULLQ\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25076, __PRETTY_FUNCTION__))
;
25077
25078 // Ahi = psrlqi(a, 32);
25079 // Bhi = psrlqi(b, 32);
25080 //
25081 // AloBlo = pmuludq(a, b);
25082 // AloBhi = pmuludq(a, Bhi);
25083 // AhiBlo = pmuludq(Ahi, b);
25084 //
25085 // Hi = psllqi(AloBhi + AhiBlo, 32);
25086 // return AloBlo + Hi;
25087 KnownBits AKnown = DAG.computeKnownBits(A);
25088 KnownBits BKnown = DAG.computeKnownBits(B);
25089
25090 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
25091 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
25092 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
25093
25094 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
25095 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
25096 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
25097
25098 SDValue Zero = DAG.getConstant(0, dl, VT);
25099
25100 // Only multiply lo/hi halves that aren't known to be zero.
25101 SDValue AloBlo = Zero;
25102 if (!ALoIsZero && !BLoIsZero)
25103 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
25104
25105 SDValue AloBhi = Zero;
25106 if (!ALoIsZero && !BHiIsZero) {
25107 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
25108 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
25109 }
25110
25111 SDValue AhiBlo = Zero;
25112 if (!AHiIsZero && !BLoIsZero) {
25113 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
25114 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
25115 }
25116
25117 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
25118 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
25119
25120 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
25121}
25122
25123static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
25124 SelectionDAG &DAG) {
25125 SDLoc dl(Op);
25126 MVT VT = Op.getSimpleValueType();
25127 bool IsSigned = Op->getOpcode() == ISD::MULHS;
25128 unsigned NumElts = VT.getVectorNumElements();
25129 SDValue A = Op.getOperand(0);
25130 SDValue B = Op.getOperand(1);
25131
25132 // Decompose 256-bit ops into 128-bit ops.
25133 if (VT.is256BitVector() && !Subtarget.hasInt256())
25134 return split256IntArith(Op, DAG);
25135
25136 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
25137 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||(((VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT ==
MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT::
v16i32 && Subtarget.hasAVX512())) ? static_cast<void
> (0) : __assert_fail ("(VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT == MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT::v16i32 && Subtarget.hasAVX512())"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25139, __PRETTY_FUNCTION__))
25138 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||(((VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT ==
MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT::
v16i32 && Subtarget.hasAVX512())) ? static_cast<void
> (0) : __assert_fail ("(VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT == MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT::v16i32 && Subtarget.hasAVX512())"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25139, __PRETTY_FUNCTION__))
25139 (VT == MVT::v16i32 && Subtarget.hasAVX512()))(((VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT ==
MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT::
v16i32 && Subtarget.hasAVX512())) ? static_cast<void
> (0) : __assert_fail ("(VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT == MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT::v16i32 && Subtarget.hasAVX512())"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25139, __PRETTY_FUNCTION__))
;
25140
25141 // PMULxD operations multiply each even value (starting at 0) of LHS with
25142 // the related value of RHS and produce a widen result.
25143 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
25144 // => <2 x i64> <ae|cg>
25145 //
25146 // In other word, to have all the results, we need to perform two PMULxD:
25147 // 1. one with the even values.
25148 // 2. one with the odd values.
25149 // To achieve #2, with need to place the odd values at an even position.
25150 //
25151 // Place the odd value at an even position (basically, shift all values 1
25152 // step to the left):
25153 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
25154 9, -1, 11, -1, 13, -1, 15, -1};
25155 // <a|b|c|d> => <b|undef|d|undef>
25156 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
25157 makeArrayRef(&Mask[0], NumElts));
25158 // <e|f|g|h> => <f|undef|h|undef>
25159 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
25160 makeArrayRef(&Mask[0], NumElts));
25161
25162 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
25163 // ints.
25164 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
25165 unsigned Opcode =
25166 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
25167 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
25168 // => <2 x i64> <ae|cg>
25169 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
25170 DAG.getBitcast(MulVT, A),
25171 DAG.getBitcast(MulVT, B)));
25172 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
25173 // => <2 x i64> <bf|dh>
25174 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
25175 DAG.getBitcast(MulVT, Odd0),
25176 DAG.getBitcast(MulVT, Odd1)));
25177
25178 // Shuffle it back into the right order.
25179 SmallVector<int, 16> ShufMask(NumElts);
25180 for (int i = 0; i != (int)NumElts; ++i)
25181 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
25182
25183 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
25184
25185 // If we have a signed multiply but no PMULDQ fix up the result of an
25186 // unsigned multiply.
25187 if (IsSigned && !Subtarget.hasSSE41()) {
25188 SDValue Zero = DAG.getConstant(0, dl, VT);
25189 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
25190 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
25191 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
25192 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
25193
25194 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
25195 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
25196 }
25197
25198 return Res;
25199 }
25200
25201 // Only i8 vectors should need custom lowering after this.
25202 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||(((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget
.hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI
())) && "Unsupported vector type") ? static_cast<void
> (0) : __assert_fail ("(VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI())) && \"Unsupported vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25204, __PRETTY_FUNCTION__))
25203 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&(((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget
.hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI
())) && "Unsupported vector type") ? static_cast<void
> (0) : __assert_fail ("(VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI())) && \"Unsupported vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25204, __PRETTY_FUNCTION__))
25204 "Unsupported vector type")(((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget
.hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI
())) && "Unsupported vector type") ? static_cast<void
> (0) : __assert_fail ("(VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI())) && \"Unsupported vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25204, __PRETTY_FUNCTION__))
;
25205
25206 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
25207 // logical shift down the upper half and pack back to i8.
25208
25209 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
25210 // and then ashr/lshr the upper bits down to the lower bits before multiply.
25211 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
25212
25213 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
25214 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
25215 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
25216 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
25217 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
25218 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
25219 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
25220 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
25221 }
25222
25223 // For signed 512-bit vectors, split into 256-bit vectors to allow the
25224 // sign-extension to occur.
25225 if (VT == MVT::v64i8 && IsSigned)
25226 return split512IntArith(Op, DAG);
25227
25228 // Signed AVX2 implementation - extend xmm subvectors to ymm.
25229 if (VT == MVT::v32i8 && IsSigned) {
25230 MVT ExVT = MVT::v16i16;
25231 SDValue ALo = extract128BitVector(A, 0, DAG, dl);
25232 SDValue BLo = extract128BitVector(B, 0, DAG, dl);
25233 SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl);
25234 SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl);
25235 ALo = DAG.getNode(ExAVX, dl, ExVT, ALo);
25236 BLo = DAG.getNode(ExAVX, dl, ExVT, BLo);
25237 AHi = DAG.getNode(ExAVX, dl, ExVT, AHi);
25238 BHi = DAG.getNode(ExAVX, dl, ExVT, BHi);
25239 SDValue Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
25240 SDValue Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
25241 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG);
25242 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG);
25243
25244 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
25245 // Shuffle lowering should turn this into PACKUS+PERMQ
25246 Lo = DAG.getBitcast(VT, Lo);
25247 Hi = DAG.getBitcast(VT, Hi);
25248 return DAG.getVectorShuffle(VT, dl, Lo, Hi,
25249 { 0, 2, 4, 6, 8, 10, 12, 14,
25250 16, 18, 20, 22, 24, 26, 28, 30,
25251 32, 34, 36, 38, 40, 42, 44, 46,
25252 48, 50, 52, 54, 56, 58, 60, 62});
25253 }
25254
25255 // For signed v16i8 and all unsigned vXi8 we will unpack the low and high
25256 // half of each 128 bit lane to widen to a vXi16 type. Do the multiplies,
25257 // shift the results and pack the half lane results back together.
25258
25259 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25260
25261 static const int PSHUFDMask[] = { 8, 9, 10, 11, 12, 13, 14, 15,
25262 -1, -1, -1, -1, -1, -1, -1, -1};
25263
25264 // Extract the lo parts and zero/sign extend to i16.
25265 // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
25266 // shifts to sign extend. Using unpack for unsigned only requires an xor to
25267 // create zeros and a copy due to tied registers contraints pre-avx. But using
25268 // zero_extend_vector_inreg would require an additional pshufd for the high
25269 // part.
25270
25271 SDValue ALo, AHi;
25272 if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
25273 ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
25274
25275 AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
25276 AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
25277 } else if (IsSigned) {
25278 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
25279 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
25280
25281 ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
25282 AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
25283 } else {
25284 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
25285 DAG.getConstant(0, dl, VT)));
25286 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
25287 DAG.getConstant(0, dl, VT)));
25288 }
25289
25290 SDValue BLo, BHi;
25291 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
25292 // If the LHS is a constant, manually unpackl/unpackh and extend.
25293 SmallVector<SDValue, 16> LoOps, HiOps;
25294 for (unsigned i = 0; i != NumElts; i += 16) {
25295 for (unsigned j = 0; j != 8; ++j) {
25296 SDValue LoOp = B.getOperand(i + j);
25297 SDValue HiOp = B.getOperand(i + j + 8);
25298
25299 if (IsSigned) {
25300 LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
25301 HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
25302 } else {
25303 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
25304 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
25305 }
25306
25307 LoOps.push_back(LoOp);
25308 HiOps.push_back(HiOp);
25309 }
25310 }
25311
25312 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
25313 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
25314 } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
25315 BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
25316
25317 BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
25318 BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
25319 } else if (IsSigned) {
25320 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
25321 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
25322
25323 BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
25324 BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
25325 } else {
25326 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
25327 DAG.getConstant(0, dl, VT)));
25328 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
25329 DAG.getConstant(0, dl, VT)));
25330 }
25331
25332 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
25333 // pack back to vXi8.
25334 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
25335 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
25336 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
25337 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
25338
25339 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
25340 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
25341}
25342
25343SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
25344 assert(Subtarget.isTargetWin64() && "Unexpected target")((Subtarget.isTargetWin64() && "Unexpected target") ?
static_cast<void> (0) : __assert_fail ("Subtarget.isTargetWin64() && \"Unexpected target\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25344, __PRETTY_FUNCTION__))
;
25345 EVT VT = Op.getValueType();
25346 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&((VT.isInteger() && VT.getSizeInBits() == 128 &&
"Unexpected return type for lowering") ? static_cast<void
> (0) : __assert_fail ("VT.isInteger() && VT.getSizeInBits() == 128 && \"Unexpected return type for lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25347, __PRETTY_FUNCTION__))
25347 "Unexpected return type for lowering")((VT.isInteger() && VT.getSizeInBits() == 128 &&
"Unexpected return type for lowering") ? static_cast<void
> (0) : __assert_fail ("VT.isInteger() && VT.getSizeInBits() == 128 && \"Unexpected return type for lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25347, __PRETTY_FUNCTION__))
;
25348
25349 RTLIB::Libcall LC;
25350 bool isSigned;
25351 switch (Op->getOpcode()) {
25352 default: llvm_unreachable("Unexpected request for libcall!")::llvm::llvm_unreachable_internal("Unexpected request for libcall!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25352)
;
25353 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
25354 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
25355 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
25356 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
25357 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
25358 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
25359 }
25360
25361 SDLoc dl(Op);
25362 SDValue InChain = DAG.getEntryNode();
25363
25364 TargetLowering::ArgListTy Args;
25365 TargetLowering::ArgListEntry Entry;
25366 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
25367 EVT ArgVT = Op->getOperand(i).getValueType();
25368 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&((ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
"Unexpected argument type for lowering") ? static_cast<void
> (0) : __assert_fail ("ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 && \"Unexpected argument type for lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25369, __PRETTY_FUNCTION__))
25369 "Unexpected argument type for lowering")((ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
"Unexpected argument type for lowering") ? static_cast<void
> (0) : __assert_fail ("ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 && \"Unexpected argument type for lowering\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25369, __PRETTY_FUNCTION__))
;
25370 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
25371 Entry.Node = StackPtr;
25372 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
25373 MachinePointerInfo(), /* Alignment = */ 16);
25374 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
25375 Entry.Ty = PointerType::get(ArgTy,0);
25376 Entry.IsSExt = false;
25377 Entry.IsZExt = false;
25378 Args.push_back(Entry);
25379 }
25380
25381 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
25382 getPointerTy(DAG.getDataLayout()));
25383
25384 TargetLowering::CallLoweringInfo CLI(DAG);
25385 CLI.setDebugLoc(dl)
25386 .setChain(InChain)
25387 .setLibCallee(
25388 getLibcallCallingConv(LC),
25389 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
25390 std::move(Args))
25391 .setInRegister()
25392 .setSExtResult(isSigned)
25393 .setZExtResult(!isSigned);
25394
25395 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
25396 return DAG.getBitcast(VT, CallInfo.first);
25397}
25398
25399// Return true if the required (according to Opcode) shift-imm form is natively
25400// supported by the Subtarget
25401static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
25402 unsigned Opcode) {
25403 if (VT.getScalarSizeInBits() < 16)
25404 return false;
25405
25406 if (VT.is512BitVector() && Subtarget.hasAVX512() &&
25407 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
25408 return true;
25409
25410 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
25411 (VT.is256BitVector() && Subtarget.hasInt256());
25412
25413 bool AShift = LShift && (Subtarget.hasAVX512() ||
25414 (VT != MVT::v2i64 && VT != MVT::v4i64));
25415 return (Opcode == ISD::SRA) ? AShift : LShift;
25416}
25417
25418// The shift amount is a variable, but it is the same for all vector lanes.
25419// These instructions are defined together with shift-immediate.
25420static
25421bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
25422 unsigned Opcode) {
25423 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
25424}
25425
25426// Return true if the required (according to Opcode) variable-shift form is
25427// natively supported by the Subtarget
25428static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
25429 unsigned Opcode) {
25430
25431 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
25432 return false;
25433
25434 // vXi16 supported only on AVX-512, BWI
25435 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
25436 return false;
25437
25438 if (Subtarget.hasAVX512())
25439 return true;
25440
25441 bool LShift = VT.is128BitVector() || VT.is256BitVector();
25442 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
25443 return (Opcode == ISD::SRA) ? AShift : LShift;
25444}
25445
25446static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
25447 const X86Subtarget &Subtarget) {
25448 MVT VT = Op.getSimpleValueType();
25449 SDLoc dl(Op);
25450 SDValue R = Op.getOperand(0);
25451 SDValue Amt = Op.getOperand(1);
25452 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
25453
25454 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
25455 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type")(((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v2i64 || VT == MVT::v4i64) && \"Unexpected SRA type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25455, __PRETTY_FUNCTION__))
;
25456 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
25457 SDValue Ex = DAG.getBitcast(ExVT, R);
25458
25459 // ashr(R, 63) === cmp_slt(R, 0)
25460 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
25461 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&(((VT != MVT::v4i64 || Subtarget.hasInt256()) && "Unsupported PCMPGT op"
) ? static_cast<void> (0) : __assert_fail ("(VT != MVT::v4i64 || Subtarget.hasInt256()) && \"Unsupported PCMPGT op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25462, __PRETTY_FUNCTION__))
25462 "Unsupported PCMPGT op")(((VT != MVT::v4i64 || Subtarget.hasInt256()) && "Unsupported PCMPGT op"
) ? static_cast<void> (0) : __assert_fail ("(VT != MVT::v4i64 || Subtarget.hasInt256()) && \"Unsupported PCMPGT op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25462, __PRETTY_FUNCTION__))
;
25463 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
25464 }
25465
25466 if (ShiftAmt >= 32) {
25467 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
25468 SDValue Upper =
25469 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
25470 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
25471 ShiftAmt - 32, DAG);
25472 if (VT == MVT::v2i64)
25473 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
25474 if (VT == MVT::v4i64)
25475 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
25476 {9, 1, 11, 3, 13, 5, 15, 7});
25477 } else {
25478 // SRA upper i32, SRL whole i64 and select lower i32.
25479 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
25480 ShiftAmt, DAG);
25481 SDValue Lower =
25482 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
25483 Lower = DAG.getBitcast(ExVT, Lower);
25484 if (VT == MVT::v2i64)
25485 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
25486 if (VT == MVT::v4i64)
25487 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
25488 {8, 1, 10, 3, 12, 5, 14, 7});
25489 }
25490 return DAG.getBitcast(VT, Ex);
25491 };
25492
25493 // Optimize shl/srl/sra with constant shift amount.
25494 APInt APIntShiftAmt;
25495 if (!X86::isConstantSplat(Amt, APIntShiftAmt))
25496 return SDValue();
25497
25498 // If the shift amount is out of range, return undef.
25499 if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
25500 return DAG.getUNDEF(VT);
25501
25502 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
25503
25504 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
25505 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
25506
25507 // i64 SRA needs to be performed as partial shifts.
25508 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
25509 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
25510 Op.getOpcode() == ISD::SRA)
25511 return ArithmeticShiftRight64(ShiftAmt);
25512
25513 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
25514 VT == MVT::v64i8) {
25515 unsigned NumElts = VT.getVectorNumElements();
25516 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25517
25518 // Simple i8 add case
25519 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
25520 return DAG.getNode(ISD::ADD, dl, VT, R, R);
25521
25522 // ashr(R, 7) === cmp_slt(R, 0)
25523 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
25524 SDValue Zeros = DAG.getConstant(0, dl, VT);
25525 if (VT.is512BitVector()) {
25526 assert(VT == MVT::v64i8 && "Unexpected element type!")((VT == MVT::v64i8 && "Unexpected element type!") ? static_cast
<void> (0) : __assert_fail ("VT == MVT::v64i8 && \"Unexpected element type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25526, __PRETTY_FUNCTION__))
;
25527 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
25528 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
25529 }
25530 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
25531 }
25532
25533 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
25534 if (VT == MVT::v16i8 && Subtarget.hasXOP())
25535 return SDValue();
25536
25537 if (Op.getOpcode() == ISD::SHL) {
25538 // Make a large shift.
25539 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
25540 ShiftAmt, DAG);
25541 SHL = DAG.getBitcast(VT, SHL);
25542 // Zero out the rightmost bits.
25543 APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
25544 return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
25545 }
25546 if (Op.getOpcode() == ISD::SRL) {
25547 // Make a large shift.
25548 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
25549 ShiftAmt, DAG);
25550 SRL = DAG.getBitcast(VT, SRL);
25551 // Zero out the leftmost bits.
25552 return DAG.getNode(ISD::AND, dl, VT, SRL,
25553 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
25554 }
25555 if (Op.getOpcode() == ISD::SRA) {
25556 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
25557 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
25558
25559 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
25560 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
25561 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
25562 return Res;
25563 }
25564 llvm_unreachable("Unknown shift opcode.")::llvm::llvm_unreachable_internal("Unknown shift opcode.", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25564)
;
25565 }
25566
25567 return SDValue();
25568}
25569
25570static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
25571 const X86Subtarget &Subtarget) {
25572 MVT VT = Op.getSimpleValueType();
25573 SDLoc dl(Op);
25574 SDValue R = Op.getOperand(0);
25575 SDValue Amt = Op.getOperand(1);
25576 unsigned Opcode = Op.getOpcode();
25577 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
25578 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
25579
25580 if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
25581 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
25582 MVT EltVT = VT.getVectorElementType();
25583 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!")((EltVT.bitsLE(MVT::i64) && "Unexpected element type!"
) ? static_cast<void> (0) : __assert_fail ("EltVT.bitsLE(MVT::i64) && \"Unexpected element type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25583, __PRETTY_FUNCTION__))
;
25584 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
25585 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
25586 else if (EltVT.bitsLT(MVT::i32))
25587 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
25588
25589 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
25590 }
25591
25592 // vXi8 shifts - shift as v8i16 + mask result.
25593 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
25594 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
25595 VT == MVT::v64i8) &&
25596 !Subtarget.hasXOP()) {
25597 unsigned NumElts = VT.getVectorNumElements();
25598 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
25599 if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
25600 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
25601 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
25602 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
25603
25604 // Create the mask using vXi16 shifts. For shift-rights we need to move
25605 // the upper byte down before splatting the vXi8 mask.
25606 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
25607 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
25608 BaseShAmt, Subtarget, DAG);
25609 if (Opcode != ISD::SHL)
25610 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
25611 8, DAG);
25612 BitMask = DAG.getBitcast(VT, BitMask);
25613 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
25614 SmallVector<int, 64>(NumElts, 0));
25615
25616 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
25617 DAG.getBitcast(ExtVT, R), BaseShAmt,
25618 Subtarget, DAG);
25619 Res = DAG.getBitcast(VT, Res);
25620 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
25621
25622 if (Opcode == ISD::SRA) {
25623 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
25624 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
25625 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
25626 SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
25627 BaseShAmt, Subtarget, DAG);
25628 SignMask = DAG.getBitcast(VT, SignMask);
25629 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
25630 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
25631 }
25632 return Res;
25633 }
25634 }
25635 }
25636
25637 // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
25638 if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
25639 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
25640 Amt = Amt.getOperand(0);
25641 unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
25642 std::vector<SDValue> Vals(Ratio);
25643 for (unsigned i = 0; i != Ratio; ++i)
25644 Vals[i] = Amt.getOperand(i);
25645 for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
25646 for (unsigned j = 0; j != Ratio; ++j)
25647 if (Vals[j] != Amt.getOperand(i + j))
25648 return SDValue();
25649 }
25650
25651 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
25652 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
25653 }
25654 return SDValue();
25655}
25656
25657// Convert a shift/rotate left amount to a multiplication scale factor.
25658static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
25659 const X86Subtarget &Subtarget,
25660 SelectionDAG &DAG) {
25661 MVT VT = Amt.getSimpleValueType();
25662 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
25663 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
25664 (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
25665 return SDValue();
25666
25667 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
25668 SmallVector<SDValue, 8> Elts;
25669 MVT SVT = VT.getVectorElementType();
25670 unsigned SVTBits = SVT.getSizeInBits();
25671 APInt One(SVTBits, 1);
25672 unsigned NumElems = VT.getVectorNumElements();
25673
25674 for (unsigned i = 0; i != NumElems; ++i) {
25675 SDValue Op = Amt->getOperand(i);
25676 if (Op->isUndef()) {
25677 Elts.push_back(Op);
25678 continue;
25679 }
25680
25681 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
25682 APInt C(SVTBits, ND->getZExtValue());
25683 uint64_t ShAmt = C.getZExtValue();
25684 if (ShAmt >= SVTBits) {
25685 Elts.push_back(DAG.getUNDEF(SVT));
25686 continue;
25687 }
25688 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
25689 }
25690 return DAG.getBuildVector(VT, dl, Elts);
25691 }
25692
25693 // If the target doesn't support variable shifts, use either FP conversion
25694 // or integer multiplication to avoid shifting each element individually.
25695 if (VT == MVT::v4i32) {
25696 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
25697 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
25698 DAG.getConstant(0x3f800000U, dl, VT));
25699 Amt = DAG.getBitcast(MVT::v4f32, Amt);
25700 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
25701 }
25702
25703 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
25704 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
25705 SDValue Z = DAG.getConstant(0, dl, VT);
25706 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
25707 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
25708 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
25709 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
25710 if (Subtarget.hasSSE41())
25711 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
25712
25713 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
25714 DAG.getBitcast(VT, Hi),
25715 {0, 2, 4, 6, 8, 10, 12, 14});
25716 }
25717
25718 return SDValue();
25719}
25720
25721static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
25722 SelectionDAG &DAG) {
25723 MVT VT = Op.getSimpleValueType();
25724 SDLoc dl(Op);
25725 SDValue R = Op.getOperand(0);
25726 SDValue Amt = Op.getOperand(1);
25727 unsigned EltSizeInBits = VT.getScalarSizeInBits();
25728 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
25729
25730 unsigned Opc = Op.getOpcode();
25731 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
25732 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
25733
25734 assert(VT.isVector() && "Custom lowering only for vector shifts!")((VT.isVector() && "Custom lowering only for vector shifts!"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && \"Custom lowering only for vector shifts!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25734, __PRETTY_FUNCTION__))
;
25735 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!")((Subtarget.hasSSE2() && "Only custom lower when we have SSE2!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Only custom lower when we have SSE2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25735, __PRETTY_FUNCTION__))
;
25736
25737 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
25738 return V;
25739
25740 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
25741 return V;
25742
25743 if (SupportedVectorVarShift(VT, Subtarget, Opc))
25744 return Op;
25745
25746 // XOP has 128-bit variable logical/arithmetic shifts.
25747 // +ve/-ve Amt = shift left/right.
25748 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
25749 VT == MVT::v8i16 || VT == MVT::v16i8)) {
25750 if (Opc == ISD::SRL || Opc == ISD::SRA) {
25751 SDValue Zero = DAG.getConstant(0, dl, VT);
25752 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
25753 }
25754 if (Opc == ISD::SHL || Opc == ISD::SRL)
25755 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
25756 if (Opc == ISD::SRA)
25757 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
25758 }
25759
25760 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
25761 // shifts per-lane and then shuffle the partial results back together.
25762 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
25763 // Splat the shift amounts so the scalar shifts above will catch it.
25764 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
25765 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
25766 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
25767 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
25768 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
25769 }
25770
25771 // i64 vector arithmetic shift can be emulated with the transform:
25772 // M = lshr(SIGN_MASK, Amt)
25773 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
25774 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
25775 Opc == ISD::SRA) {
25776 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
25777 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
25778 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
25779 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
25780 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
25781 return R;
25782 }
25783
25784 // If possible, lower this shift as a sequence of two shifts by
25785 // constant plus a BLENDing shuffle instead of scalarizing it.
25786 // Example:
25787 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
25788 //
25789 // Could be rewritten as:
25790 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
25791 //
25792 // The advantage is that the two shifts from the example would be
25793 // lowered as X86ISD::VSRLI nodes in parallel before blending.
25794 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
25795 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
25796 SDValue Amt1, Amt2;
25797 unsigned NumElts = VT.getVectorNumElements();
25798 SmallVector<int, 8> ShuffleMask;
25799 for (unsigned i = 0; i != NumElts; ++i) {
25800 SDValue A = Amt->getOperand(i);
25801 if (A.isUndef()) {
25802 ShuffleMask.push_back(SM_SentinelUndef);
25803 continue;
25804 }
25805 if (!Amt1 || Amt1 == A) {
25806 ShuffleMask.push_back(i);
25807 Amt1 = A;
25808 continue;
25809 }
25810 if (!Amt2 || Amt2 == A) {
25811 ShuffleMask.push_back(i + NumElts);
25812 Amt2 = A;
25813 continue;
25814 }
25815 break;
25816 }
25817
25818 // Only perform this blend if we can perform it without loading a mask.
25819 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
25820 (VT != MVT::v16i16 ||
25821 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
25822 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
25823 canWidenShuffleElements(ShuffleMask))) {
25824 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
25825 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
25826 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
25827 Cst2->getAPIntValue().ult(EltSizeInBits)) {
25828 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
25829 Cst1->getZExtValue(), DAG);
25830 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
25831 Cst2->getZExtValue(), DAG);
25832 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
25833 }
25834 }
25835 }
25836
25837 // If possible, lower this packed shift into a vector multiply instead of
25838 // expanding it into a sequence of scalar shifts.
25839 if (Opc == ISD::SHL)
25840 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
25841 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
25842
25843 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
25844 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
25845 if (Opc == ISD::SRL && ConstantAmt &&
25846 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
25847 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
25848 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
25849 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
25850 SDValue Zero = DAG.getConstant(0, dl, VT);
25851 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
25852 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
25853 return DAG.getSelect(dl, VT, ZAmt, R, Res);
25854 }
25855 }
25856
25857 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
25858 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
25859 // TODO: Special case handling for shift by 0/1, really we can afford either
25860 // of these cases in pre-SSE41/XOP/AVX512 but not both.
25861 if (Opc == ISD::SRA && ConstantAmt &&
25862 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
25863 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
25864 !Subtarget.hasAVX512()) ||
25865 DAG.isKnownNeverZero(Amt))) {
25866 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
25867 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
25868 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
25869 SDValue Amt0 =
25870 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
25871 SDValue Amt1 =
25872 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
25873 SDValue Sra1 =
25874 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
25875 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
25876 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
25877 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
25878 }
25879 }
25880
25881 // v4i32 Non Uniform Shifts.
25882 // If the shift amount is constant we can shift each lane using the SSE2
25883 // immediate shifts, else we need to zero-extend each lane to the lower i64
25884 // and shift using the SSE2 variable shifts.
25885 // The separate results can then be blended together.
25886 if (VT == MVT::v4i32) {
25887 SDValue Amt0, Amt1, Amt2, Amt3;
25888 if (ConstantAmt) {
25889 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
25890 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
25891 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
25892 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
25893 } else {
25894 // The SSE2 shifts use the lower i64 as the same shift amount for
25895 // all lanes and the upper i64 is ignored. On AVX we're better off
25896 // just zero-extending, but for SSE just duplicating the top 16-bits is
25897 // cheaper and has the same effect for out of range values.
25898 if (Subtarget.hasAVX()) {
25899 SDValue Z = DAG.getConstant(0, dl, VT);
25900 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
25901 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
25902 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
25903 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
25904 } else {
25905 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
25906 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25907 {4, 5, 6, 7, -1, -1, -1, -1});
25908 Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25909 {0, 1, 1, 1, -1, -1, -1, -1});
25910 Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
25911 {2, 3, 3, 3, -1, -1, -1, -1});
25912 Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
25913 {0, 1, 1, 1, -1, -1, -1, -1});
25914 Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
25915 {2, 3, 3, 3, -1, -1, -1, -1});
25916 }
25917 }
25918
25919 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
25920 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
25921 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
25922 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
25923 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
25924
25925 // Merge the shifted lane results optimally with/without PBLENDW.
25926 // TODO - ideally shuffle combining would handle this.
25927 if (Subtarget.hasSSE41()) {
25928 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
25929 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
25930 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
25931 }
25932 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
25933 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
25934 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
25935 }
25936
25937 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
25938 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
25939 // make the existing SSE solution better.
25940 // NOTE: We honor prefered vector width before promoting to 512-bits.
25941 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
25942 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
25943 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
25944 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
25945 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
25946 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&(((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8
) && "Unexpected vector type") ? static_cast<void>
(0) : __assert_fail ("(!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) && \"Unexpected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25947, __PRETTY_FUNCTION__))
25947 "Unexpected vector type")(((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8
) && "Unexpected vector type") ? static_cast<void>
(0) : __assert_fail ("(!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) && \"Unexpected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25947, __PRETTY_FUNCTION__))
;
25948 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
25949 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
25950 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
25951 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
25952 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
25953 return DAG.getNode(ISD::TRUNCATE, dl, VT,
25954 DAG.getNode(Opc, dl, ExtVT, R, Amt));
25955 }
25956
25957 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
25958 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
25959 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
25960 (VT == MVT::v16i8 || VT == MVT::v64i8 ||
25961 (VT == MVT::v32i8 && Subtarget.hasInt256())) &&
25962 !Subtarget.hasXOP()) {
25963 int NumElts = VT.getVectorNumElements();
25964 SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
25965
25966 // Extend constant shift amount to vXi16 (it doesn't matter if the type
25967 // isn't legal).
25968 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
25969 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
25970 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
25971 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
25972 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&((ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
"Constant build vector expected") ? static_cast<void> (
0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) && \"Constant build vector expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25973, __PRETTY_FUNCTION__))
25973 "Constant build vector expected")((ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
"Constant build vector expected") ? static_cast<void> (
0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) && \"Constant build vector expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 25973, __PRETTY_FUNCTION__))
;
25974
25975 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
25976 R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
25977 : DAG.getZExtOrTrunc(R, dl, ExVT);
25978 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
25979 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
25980 return DAG.getZExtOrTrunc(R, dl, VT);
25981 }
25982
25983 SmallVector<SDValue, 16> LoAmt, HiAmt;
25984 for (int i = 0; i != NumElts; i += 16) {
25985 for (int j = 0; j != 8; ++j) {
25986 LoAmt.push_back(Amt.getOperand(i + j));
25987 HiAmt.push_back(Amt.getOperand(i + j + 8));
25988 }
25989 }
25990
25991 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
25992 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
25993 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
25994
25995 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
25996 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
25997 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
25998 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
25999 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
26000 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
26001 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
26002 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
26003 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
26004 }
26005
26006 if (VT == MVT::v16i8 ||
26007 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
26008 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
26009 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
26010
26011 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
26012 if (VT.is512BitVector()) {
26013 // On AVX512BW targets we make use of the fact that VSELECT lowers
26014 // to a masked blend which selects bytes based just on the sign bit
26015 // extracted to a mask.
26016 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
26017 V0 = DAG.getBitcast(VT, V0);
26018 V1 = DAG.getBitcast(VT, V1);
26019 Sel = DAG.getBitcast(VT, Sel);
26020 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
26021 ISD::SETGT);
26022 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26023 } else if (Subtarget.hasSSE41()) {
26024 // On SSE41 targets we make use of the fact that VSELECT lowers
26025 // to PBLENDVB which selects bytes based just on the sign bit.
26026 V0 = DAG.getBitcast(VT, V0);
26027 V1 = DAG.getBitcast(VT, V1);
26028 Sel = DAG.getBitcast(VT, Sel);
26029 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
26030 }
26031 // On pre-SSE41 targets we test for the sign bit by comparing to
26032 // zero - a negative value will set all bits of the lanes to true
26033 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
26034 SDValue Z = DAG.getConstant(0, dl, SelVT);
26035 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
26036 return DAG.getSelect(dl, SelVT, C, V0, V1);
26037 };
26038
26039 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
26040 // We can safely do this using i16 shifts as we're only interested in
26041 // the 3 lower bits of each byte.
26042 Amt = DAG.getBitcast(ExtVT, Amt);
26043 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
26044 Amt = DAG.getBitcast(VT, Amt);
26045
26046 if (Opc == ISD::SHL || Opc == ISD::SRL) {
26047 // r = VSELECT(r, shift(r, 4), a);
26048 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
26049 R = SignBitSelect(VT, Amt, M, R);
26050
26051 // a += a
26052 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26053
26054 // r = VSELECT(r, shift(r, 2), a);
26055 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
26056 R = SignBitSelect(VT, Amt, M, R);
26057
26058 // a += a
26059 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26060
26061 // return VSELECT(r, shift(r, 1), a);
26062 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
26063 R = SignBitSelect(VT, Amt, M, R);
26064 return R;
26065 }
26066
26067 if (Opc == ISD::SRA) {
26068 // For SRA we need to unpack each byte to the higher byte of a i16 vector
26069 // so we can correctly sign extend. We don't care what happens to the
26070 // lower byte.
26071 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26072 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
26073 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
26074 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
26075 ALo = DAG.getBitcast(ExtVT, ALo);
26076 AHi = DAG.getBitcast(ExtVT, AHi);
26077 RLo = DAG.getBitcast(ExtVT, RLo);
26078 RHi = DAG.getBitcast(ExtVT, RHi);
26079
26080 // r = VSELECT(r, shift(r, 4), a);
26081 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
26082 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
26083 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26084 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26085
26086 // a += a
26087 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
26088 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
26089
26090 // r = VSELECT(r, shift(r, 2), a);
26091 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
26092 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
26093 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26094 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26095
26096 // a += a
26097 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
26098 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
26099
26100 // r = VSELECT(r, shift(r, 1), a);
26101 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
26102 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
26103 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
26104 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
26105
26106 // Logical shift the result back to the lower byte, leaving a zero upper
26107 // byte meaning that we can safely pack with PACKUSWB.
26108 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
26109 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
26110 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
26111 }
26112 }
26113
26114 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
26115 MVT ExtVT = MVT::v8i32;
26116 SDValue Z = DAG.getConstant(0, dl, VT);
26117 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
26118 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
26119 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
26120 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
26121 ALo = DAG.getBitcast(ExtVT, ALo);
26122 AHi = DAG.getBitcast(ExtVT, AHi);
26123 RLo = DAG.getBitcast(ExtVT, RLo);
26124 RHi = DAG.getBitcast(ExtVT, RHi);
26125 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
26126 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
26127 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
26128 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
26129 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
26130 }
26131
26132 if (VT == MVT::v8i16) {
26133 // If we have a constant shift amount, the non-SSE41 path is best as
26134 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
26135 bool UseSSE41 = Subtarget.hasSSE41() &&
26136 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
26137
26138 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
26139 // On SSE41 targets we make use of the fact that VSELECT lowers
26140 // to PBLENDVB which selects bytes based just on the sign bit.
26141 if (UseSSE41) {
26142 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
26143 V0 = DAG.getBitcast(ExtVT, V0);
26144 V1 = DAG.getBitcast(ExtVT, V1);
26145 Sel = DAG.getBitcast(ExtVT, Sel);
26146 return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1));
26147 }
26148 // On pre-SSE41 targets we splat the sign bit - a negative value will
26149 // set all bits of the lanes to true and VSELECT uses that in
26150 // its OR(AND(V0,C),AND(V1,~C)) lowering.
26151 SDValue C =
26152 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
26153 return DAG.getSelect(dl, VT, C, V0, V1);
26154 };
26155
26156 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
26157 if (UseSSE41) {
26158 // On SSE41 targets we need to replicate the shift mask in both
26159 // bytes for PBLENDVB.
26160 Amt = DAG.getNode(
26161 ISD::OR, dl, VT,
26162 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
26163 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
26164 } else {
26165 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
26166 }
26167
26168 // r = VSELECT(r, shift(r, 8), a);
26169 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
26170 R = SignBitSelect(Amt, M, R);
26171
26172 // a += a
26173 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26174
26175 // r = VSELECT(r, shift(r, 4), a);
26176 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
26177 R = SignBitSelect(Amt, M, R);
26178
26179 // a += a
26180 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26181
26182 // r = VSELECT(r, shift(r, 2), a);
26183 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
26184 R = SignBitSelect(Amt, M, R);
26185
26186 // a += a
26187 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
26188
26189 // return VSELECT(r, shift(r, 1), a);
26190 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
26191 R = SignBitSelect(Amt, M, R);
26192 return R;
26193 }
26194
26195 // Decompose 256-bit shifts into 128-bit shifts.
26196 if (VT.is256BitVector())
26197 return split256IntArith(Op, DAG);
26198
26199 return SDValue();
26200}
26201
26202static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
26203 SelectionDAG &DAG) {
26204 MVT VT = Op.getSimpleValueType();
26205 assert(VT.isVector() && "Custom lowering only for vector rotates!")((VT.isVector() && "Custom lowering only for vector rotates!"
) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && \"Custom lowering only for vector rotates!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26205, __PRETTY_FUNCTION__))
;
26206
26207 SDLoc DL(Op);
26208 SDValue R = Op.getOperand(0);
26209 SDValue Amt = Op.getOperand(1);
26210 unsigned Opcode = Op.getOpcode();
26211 unsigned EltSizeInBits = VT.getScalarSizeInBits();
26212 int NumElts = VT.getVectorNumElements();
26213
26214 // Check for constant splat rotation amount.
26215 APInt UndefElts;
26216 SmallVector<APInt, 32> EltBits;
26217 int CstSplatIndex = -1;
26218 if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits))
26219 for (int i = 0; i != NumElts; ++i)
26220 if (!UndefElts[i]) {
26221 if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) {
26222 CstSplatIndex = i;
26223 continue;
26224 }
26225 CstSplatIndex = -1;
26226 break;
26227 }
26228
26229 // AVX512 implicitly uses modulo rotation amounts.
26230 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
26231 // Attempt to rotate by immediate.
26232 if (0 <= CstSplatIndex) {
26233 unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
26234 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
26235 return DAG.getNode(Op, DL, VT, R,
26236 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
26237 }
26238
26239 // Else, fall-back on VPROLV/VPRORV.
26240 return Op;
26241 }
26242
26243 assert((Opcode == ISD::ROTL) && "Only ROTL supported")(((Opcode == ISD::ROTL) && "Only ROTL supported") ? static_cast
<void> (0) : __assert_fail ("(Opcode == ISD::ROTL) && \"Only ROTL supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26243, __PRETTY_FUNCTION__))
;
26244
26245 // XOP has 128-bit vector variable + immediate rotates.
26246 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
26247 // XOP implicitly uses modulo rotation amounts.
26248 if (Subtarget.hasXOP()) {
26249 if (VT.is256BitVector())
26250 return split256IntArith(Op, DAG);
26251 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!")((VT.is128BitVector() && "Only rotate 128-bit vectors!"
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only rotate 128-bit vectors!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26251, __PRETTY_FUNCTION__))
;
26252
26253 // Attempt to rotate by immediate.
26254 if (0 <= CstSplatIndex) {
26255 uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits);
26256 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
26257 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
26258 }
26259
26260 // Use general rotate by variable (per-element).
26261 return Op;
26262 }
26263
26264 // Split 256-bit integers on pre-AVX2 targets.
26265 if (VT.is256BitVector() && !Subtarget.hasAVX2())
26266 return split256IntArith(Op, DAG);
26267
26268 assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||(((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8)
&& Subtarget.hasAVX2())) && "Only vXi32/vXi16/vXi8 vector rotates supported"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && \"Only vXi32/vXi16/vXi8 vector rotates supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26271, __PRETTY_FUNCTION__))
26269 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&(((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8)
&& Subtarget.hasAVX2())) && "Only vXi32/vXi16/vXi8 vector rotates supported"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && \"Only vXi32/vXi16/vXi8 vector rotates supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26271, __PRETTY_FUNCTION__))
26270 Subtarget.hasAVX2())) &&(((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8)
&& Subtarget.hasAVX2())) && "Only vXi32/vXi16/vXi8 vector rotates supported"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && \"Only vXi32/vXi16/vXi8 vector rotates supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26271, __PRETTY_FUNCTION__))
26271 "Only vXi32/vXi16/vXi8 vector rotates supported")(((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8)
&& Subtarget.hasAVX2())) && "Only vXi32/vXi16/vXi8 vector rotates supported"
) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && \"Only vXi32/vXi16/vXi8 vector rotates supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26271, __PRETTY_FUNCTION__))
;
26272
26273 // Rotate by an uniform constant - expand back to shifts.
26274 if (0 <= CstSplatIndex)
26275 return SDValue();
26276
26277 bool IsSplatAmt = DAG.isSplatValue(Amt);
26278
26279 // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
26280 // the amount bit.
26281 if (EltSizeInBits == 8 && !IsSplatAmt) {
26282 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
26283 return SDValue();
26284
26285 // We don't need ModuloAmt here as we just peek at individual bits.
26286 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26287
26288 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
26289 if (Subtarget.hasSSE41()) {
26290 // On SSE41 targets we make use of the fact that VSELECT lowers
26291 // to PBLENDVB which selects bytes based just on the sign bit.
26292 V0 = DAG.getBitcast(VT, V0);
26293 V1 = DAG.getBitcast(VT, V1);
26294 Sel = DAG.getBitcast(VT, Sel);
26295 return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1));
26296 }
26297 // On pre-SSE41 targets we test for the sign bit by comparing to
26298 // zero - a negative value will set all bits of the lanes to true
26299 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
26300 SDValue Z = DAG.getConstant(0, DL, SelVT);
26301 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
26302 return DAG.getSelect(DL, SelVT, C, V0, V1);
26303 };
26304
26305 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
26306 // We can safely do this using i16 shifts as we're only interested in
26307 // the 3 lower bits of each byte.
26308 Amt = DAG.getBitcast(ExtVT, Amt);
26309 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
26310 Amt = DAG.getBitcast(VT, Amt);
26311
26312 // r = VSELECT(r, rot(r, 4), a);
26313 SDValue M;
26314 M = DAG.getNode(
26315 ISD::OR, DL, VT,
26316 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
26317 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
26318 R = SignBitSelect(VT, Amt, M, R);
26319
26320 // a += a
26321 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
26322
26323 // r = VSELECT(r, rot(r, 2), a);
26324 M = DAG.getNode(
26325 ISD::OR, DL, VT,
26326 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
26327 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
26328 R = SignBitSelect(VT, Amt, M, R);
26329
26330 // a += a
26331 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
26332
26333 // return VSELECT(r, rot(r, 1), a);
26334 M = DAG.getNode(
26335 ISD::OR, DL, VT,
26336 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
26337 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
26338 return SignBitSelect(VT, Amt, M, R);
26339 }
26340
26341 // ISD::ROT* uses modulo rotate amounts.
26342 Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
26343 DAG.getConstant(EltSizeInBits - 1, DL, VT));
26344
26345 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
26346 bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
26347 SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
26348
26349 // Fallback for splats + all supported variable shifts.
26350 // Fallback for non-constants AVX2 vXi16 as well.
26351 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
26352 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
26353 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
26354 SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
26355 SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
26356 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
26357 }
26358
26359 // As with shifts, convert the rotation amount to a multiplication factor.
26360 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
26361 assert(Scale && "Failed to convert ROTL amount to scale")((Scale && "Failed to convert ROTL amount to scale") ?
static_cast<void> (0) : __assert_fail ("Scale && \"Failed to convert ROTL amount to scale\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26361, __PRETTY_FUNCTION__))
;
26362
26363 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
26364 if (EltSizeInBits == 16) {
26365 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
26366 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
26367 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
26368 }
26369
26370 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
26371 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
26372 // that can then be OR'd with the lower 32-bits.
26373 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected")((VT == MVT::v4i32 && "Only v4i32 vector rotate expected"
) ? static_cast<void> (0) : __assert_fail ("VT == MVT::v4i32 && \"Only v4i32 vector rotate expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26373, __PRETTY_FUNCTION__))
;
26374 static const int OddMask[] = {1, -1, 3, -1};
26375 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
26376 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
26377
26378 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
26379 DAG.getBitcast(MVT::v2i64, R),
26380 DAG.getBitcast(MVT::v2i64, Scale));
26381 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
26382 DAG.getBitcast(MVT::v2i64, R13),
26383 DAG.getBitcast(MVT::v2i64, Scale13));
26384 Res02 = DAG.getBitcast(VT, Res02);
26385 Res13 = DAG.getBitcast(VT, Res13);
26386
26387 return DAG.getNode(ISD::OR, DL, VT,
26388 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
26389 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
26390}
26391
26392/// Returns true if the operand type is exactly twice the native width, and
26393/// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
26394/// Used to know whether to use cmpxchg8/16b when expanding atomic operations
26395/// (otherwise we leave them alone to become __sync_fetch_and_... calls).
26396bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
26397 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
26398
26399 if (OpWidth == 64)
26400 return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
26401 if (OpWidth == 128)
26402 return Subtarget.hasCmpxchg16b();
26403
26404 return false;
26405}
26406
26407// TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
26408// TODO: In 32-bit mode, use FISTP when X87 is available?
26409bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
26410 Type *MemType = SI->getValueOperand()->getType();
26411
26412 bool NoImplicitFloatOps =
26413 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
26414 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
26415 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2())
26416 return false;
26417
26418 return needsCmpXchgNb(MemType);
26419}
26420
26421// Note: this turns large loads into lock cmpxchg8b/16b.
26422// TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
26423TargetLowering::AtomicExpansionKind
26424X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
26425 Type *MemType = LI->getType();
26426
26427 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
26428 // can use movq to do the load. If we have X87 we can load into an 80-bit
26429 // X87 register and store it to a stack temporary.
26430 bool NoImplicitFloatOps =
26431 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
26432 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
26433 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
26434 (Subtarget.hasSSE2() || Subtarget.hasX87()))
26435 return AtomicExpansionKind::None;
26436
26437 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
26438 : AtomicExpansionKind::None;
26439}
26440
26441TargetLowering::AtomicExpansionKind
26442X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
26443 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
26444 Type *MemType = AI->getType();
26445
26446 // If the operand is too big, we must see if cmpxchg8/16b is available
26447 // and default to library calls otherwise.
26448 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
26449 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
26450 : AtomicExpansionKind::None;
26451 }
26452
26453 AtomicRMWInst::BinOp Op = AI->getOperation();
26454 switch (Op) {
26455 default:
26456 llvm_unreachable("Unknown atomic operation")::llvm::llvm_unreachable_internal("Unknown atomic operation",
"/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26456)
;
26457 case AtomicRMWInst::Xchg:
26458 case AtomicRMWInst::Add:
26459 case AtomicRMWInst::Sub:
26460 // It's better to use xadd, xsub or xchg for these in all cases.
26461 return AtomicExpansionKind::None;
26462 case AtomicRMWInst::Or:
26463 case AtomicRMWInst::And:
26464 case AtomicRMWInst::Xor:
26465 // If the atomicrmw's result isn't actually used, we can just add a "lock"
26466 // prefix to a normal instruction for these operations.
26467 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
26468 : AtomicExpansionKind::None;
26469 case AtomicRMWInst::Nand:
26470 case AtomicRMWInst::Max:
26471 case AtomicRMWInst::Min:
26472 case AtomicRMWInst::UMax:
26473 case AtomicRMWInst::UMin:
26474 case AtomicRMWInst::FAdd:
26475 case AtomicRMWInst::FSub:
26476 // These always require a non-trivial set of data operations on x86. We must
26477 // use a cmpxchg loop.
26478 return AtomicExpansionKind::CmpXChg;
26479 }
26480}
26481
26482LoadInst *
26483X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
26484 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
26485 Type *MemType = AI->getType();
26486 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
26487 // there is no benefit in turning such RMWs into loads, and it is actually
26488 // harmful as it introduces a mfence.
26489 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
26490 return nullptr;
26491
26492 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
26493 // lowering available in lowerAtomicArith.
26494 // TODO: push more cases through this path.
26495 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
26496 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
26497 AI->use_empty())
26498 return nullptr;
26499
26500 auto Builder = IRBuilder<>(AI);
26501 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
26502 auto SSID = AI->getSyncScopeID();
26503 // We must restrict the ordering to avoid generating loads with Release or
26504 // ReleaseAcquire orderings.
26505 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
26506
26507 // Before the load we need a fence. Here is an example lifted from
26508 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
26509 // is required:
26510 // Thread 0:
26511 // x.store(1, relaxed);
26512 // r1 = y.fetch_add(0, release);
26513 // Thread 1:
26514 // y.fetch_add(42, acquire);
26515 // r2 = x.load(relaxed);
26516 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
26517 // lowered to just a load without a fence. A mfence flushes the store buffer,
26518 // making the optimization clearly correct.
26519 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
26520 // otherwise, we might be able to be more aggressive on relaxed idempotent
26521 // rmw. In practice, they do not look useful, so we don't try to be
26522 // especially clever.
26523 if (SSID == SyncScope::SingleThread)
26524 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
26525 // the IR level, so we must wrap it in an intrinsic.
26526 return nullptr;
26527
26528 if (!Subtarget.hasMFence())
26529 // FIXME: it might make sense to use a locked operation here but on a
26530 // different cache-line to prevent cache-line bouncing. In practice it
26531 // is probably a small win, and x86 processors without mfence are rare
26532 // enough that we do not bother.
26533 return nullptr;
26534
26535 Function *MFence =
26536 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
26537 Builder.CreateCall(MFence, {});
26538
26539 // Finally we can emit the atomic load.
26540 LoadInst *Loaded =
26541 Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
26542 AI->getType()->getPrimitiveSizeInBits());
26543 Loaded->setAtomic(Order, SSID);
26544 AI->replaceAllUsesWith(Loaded);
26545 AI->eraseFromParent();
26546 return Loaded;
26547}
26548
26549bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
26550 if (!SI.isUnordered())
26551 return false;
26552 return ExperimentalUnorderedISEL;
26553}
26554bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
26555 if (!LI.isUnordered())
26556 return false;
26557 return ExperimentalUnorderedISEL;
26558}
26559
26560
26561/// Emit a locked operation on a stack location which does not change any
26562/// memory location, but does involve a lock prefix. Location is chosen to be
26563/// a) very likely accessed only by a single thread to minimize cache traffic,
26564/// and b) definitely dereferenceable. Returns the new Chain result.
26565static SDValue emitLockedStackOp(SelectionDAG &DAG,
26566 const X86Subtarget &Subtarget,
26567 SDValue Chain, SDLoc DL) {
26568 // Implementation notes:
26569 // 1) LOCK prefix creates a full read/write reordering barrier for memory
26570 // operations issued by the current processor. As such, the location
26571 // referenced is not relevant for the ordering properties of the instruction.
26572 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
26573 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
26574 // 2) Using an immediate operand appears to be the best encoding choice
26575 // here since it doesn't require an extra register.
26576 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
26577 // is small enough it might just be measurement noise.)
26578 // 4) When choosing offsets, there are several contributing factors:
26579 // a) If there's no redzone, we default to TOS. (We could allocate a cache
26580 // line aligned stack object to improve this case.)
26581 // b) To minimize our chances of introducing a false dependence, we prefer
26582 // to offset the stack usage from TOS slightly.
26583 // c) To minimize concerns about cross thread stack usage - in particular,
26584 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
26585 // captures state in the TOS frame and accesses it from many threads -
26586 // we want to use an offset such that the offset is in a distinct cache
26587 // line from the TOS frame.
26588 //
26589 // For a general discussion of the tradeoffs and benchmark results, see:
26590 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
26591
26592 auto &MF = DAG.getMachineFunction();
26593 auto &TFL = *Subtarget.getFrameLowering();
26594 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
26595
26596 if (Subtarget.is64Bit()) {
26597 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
26598 SDValue Ops[] = {
26599 DAG.getRegister(X86::RSP, MVT::i64), // Base
26600 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
26601 DAG.getRegister(0, MVT::i64), // Index
26602 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
26603 DAG.getRegister(0, MVT::i16), // Segment.
26604 Zero,
26605 Chain};
26606 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
26607 MVT::Other, Ops);
26608 return SDValue(Res, 1);
26609 }
26610
26611 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
26612 SDValue Ops[] = {
26613 DAG.getRegister(X86::ESP, MVT::i32), // Base
26614 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
26615 DAG.getRegister(0, MVT::i32), // Index
26616 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
26617 DAG.getRegister(0, MVT::i16), // Segment.
26618 Zero,
26619 Chain
26620 };
26621 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
26622 MVT::Other, Ops);
26623 return SDValue(Res, 1);
26624}
26625
26626static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
26627 SelectionDAG &DAG) {
26628 SDLoc dl(Op);
26629 AtomicOrdering FenceOrdering =
26630 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
26631 SyncScope::ID FenceSSID =
26632 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
26633
26634 // The only fence that needs an instruction is a sequentially-consistent
26635 // cross-thread fence.
26636 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
26637 FenceSSID == SyncScope::System) {
26638 if (Subtarget.hasMFence())
26639 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
26640
26641 SDValue Chain = Op.getOperand(0);
26642 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
26643 }
26644
26645 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
26646 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
26647}
26648
26649static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
26650 SelectionDAG &DAG) {
26651 MVT T = Op.getSimpleValueType();
26652 SDLoc DL(Op);
26653 unsigned Reg = 0;
26654 unsigned size = 0;
26655 switch(T.SimpleTy) {
26656 default: llvm_unreachable("Invalid value type!")::llvm::llvm_unreachable_internal("Invalid value type!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26656)
;
26657 case MVT::i8: Reg = X86::AL; size = 1; break;
26658 case MVT::i16: Reg = X86::AX; size = 2; break;
26659 case MVT::i32: Reg = X86::EAX; size = 4; break;
26660 case MVT::i64:
26661 assert(Subtarget.is64Bit() && "Node not type legal!")((Subtarget.is64Bit() && "Node not type legal!") ? static_cast
<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"Node not type legal!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26661, __PRETTY_FUNCTION__))
;
26662 Reg = X86::RAX; size = 8;
26663 break;
26664 }
26665 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
26666 Op.getOperand(2), SDValue());
26667 SDValue Ops[] = { cpIn.getValue(0),
26668 Op.getOperand(1),
26669 Op.getOperand(3),
26670 DAG.getTargetConstant(size, DL, MVT::i8),
26671 cpIn.getValue(1) };
26672 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
26673 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
26674 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
26675 Ops, T, MMO);
26676
26677 SDValue cpOut =
26678 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
26679 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
26680 MVT::i32, cpOut.getValue(2));
26681 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
26682
26683 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26684 cpOut, Success, EFLAGS.getValue(1));
26685}
26686
26687// Create MOVMSKB, taking into account whether we need to split for AVX1.
26688static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
26689 const X86Subtarget &Subtarget) {
26690 MVT InVT = V.getSimpleValueType();
26691
26692 if (InVT == MVT::v64i8) {
26693 SDValue Lo, Hi;
26694 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
26695 Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
26696 Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
26697 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
26698 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
26699 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
26700 DAG.getConstant(32, DL, MVT::i8));
26701 return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
26702 }
26703 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
26704 SDValue Lo, Hi;
26705 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
26706 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
26707 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
26708 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
26709 DAG.getConstant(16, DL, MVT::i8));
26710 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
26711 }
26712
26713 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
26714}
26715
26716static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
26717 SelectionDAG &DAG) {
26718 SDValue Src = Op.getOperand(0);
26719 MVT SrcVT = Src.getSimpleValueType();
26720 MVT DstVT = Op.getSimpleValueType();
26721
26722 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
26723 // half to v32i1 and concatenating the result.
26724 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
26725 assert(!Subtarget.is64Bit() && "Expected 32-bit mode")((!Subtarget.is64Bit() && "Expected 32-bit mode") ? static_cast
<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Expected 32-bit mode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26725, __PRETTY_FUNCTION__))
;
26726 assert(Subtarget.hasBWI() && "Expected BWI target")((Subtarget.hasBWI() && "Expected BWI target") ? static_cast
<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected BWI target\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26726, __PRETTY_FUNCTION__))
;
26727 SDLoc dl(Op);
26728 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
26729 DAG.getIntPtrConstant(0, dl));
26730 Lo = DAG.getBitcast(MVT::v32i1, Lo);
26731 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
26732 DAG.getIntPtrConstant(1, dl));
26733 Hi = DAG.getBitcast(MVT::v32i1, Hi);
26734 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
26735 }
26736
26737 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
26738 if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() &&
26739 DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) {
26740 SDLoc dl(Op);
26741 SDValue Lo, Hi;
26742 std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl);
26743 MVT CastVT = DstVT.getHalfNumVectorElementsVT();
26744 Lo = DAG.getBitcast(CastVT, Lo);
26745 Hi = DAG.getBitcast(CastVT, Hi);
26746 return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
26747 }
26748
26749 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
26750 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
26751 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512")((!Subtarget.hasAVX512() && "Should use K-registers with AVX512"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.hasAVX512() && \"Should use K-registers with AVX512\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26751, __PRETTY_FUNCTION__))
;
26752 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
26753 SDLoc DL(Op);
26754 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
26755 V = getPMOVMSKB(DL, V, DAG, Subtarget);
26756 return DAG.getZExtOrTrunc(V, DL, DstVT);
26757 }
26758
26759 assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||(((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT
::v8i8 || SrcVT == MVT::i64) && "Unexpected VT!") ? static_cast
<void> (0) : __assert_fail ("(SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 || SrcVT == MVT::i64) && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26760, __PRETTY_FUNCTION__))
26760 SrcVT == MVT::i64) && "Unexpected VT!")(((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT
::v8i8 || SrcVT == MVT::i64) && "Unexpected VT!") ? static_cast
<void> (0) : __assert_fail ("(SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 || SrcVT == MVT::i64) && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26760, __PRETTY_FUNCTION__))
;
26761
26762 assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26762, __PRETTY_FUNCTION__))
;
26763 if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
26764 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
26765 // This conversion needs to be expanded.
26766 return SDValue();
26767
26768 SDLoc dl(Op);
26769 if (SrcVT.isVector()) {
26770 // Widen the vector in input in the case of MVT::v2i32.
26771 // Example: from MVT::v2i32 to MVT::v4i32.
26772 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
26773 SrcVT.getVectorNumElements() * 2);
26774 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
26775 DAG.getUNDEF(SrcVT));
26776 } else {
26777 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&((SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
"Unexpected source type in LowerBITCAST") ? static_cast<void
> (0) : __assert_fail ("SrcVT == MVT::i64 && !Subtarget.is64Bit() && \"Unexpected source type in LowerBITCAST\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26778, __PRETTY_FUNCTION__))
26778 "Unexpected source type in LowerBITCAST")((SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
"Unexpected source type in LowerBITCAST") ? static_cast<void
> (0) : __assert_fail ("SrcVT == MVT::i64 && !Subtarget.is64Bit() && \"Unexpected source type in LowerBITCAST\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26778, __PRETTY_FUNCTION__))
;
26779 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
26780 }
26781
26782 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
26783 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
26784
26785 if (DstVT == MVT::x86mmx)
26786 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
26787
26788 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
26789 DAG.getIntPtrConstant(0, dl));
26790}
26791
26792/// Compute the horizontal sum of bytes in V for the elements of VT.
26793///
26794/// Requires V to be a byte vector and VT to be an integer vector type with
26795/// wider elements than V's type. The width of the elements of VT determines
26796/// how many bytes of V are summed horizontally to produce each element of the
26797/// result.
26798static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
26799 const X86Subtarget &Subtarget,
26800 SelectionDAG &DAG) {
26801 SDLoc DL(V);
26802 MVT ByteVecVT = V.getSimpleValueType();
26803 MVT EltVT = VT.getVectorElementType();
26804 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&((ByteVecVT.getVectorElementType() == MVT::i8 && "Expected value to have byte element type."
) ? static_cast<void> (0) : __assert_fail ("ByteVecVT.getVectorElementType() == MVT::i8 && \"Expected value to have byte element type.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26805, __PRETTY_FUNCTION__))
26805 "Expected value to have byte element type.")((ByteVecVT.getVectorElementType() == MVT::i8 && "Expected value to have byte element type."
) ? static_cast<void> (0) : __assert_fail ("ByteVecVT.getVectorElementType() == MVT::i8 && \"Expected value to have byte element type.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26805, __PRETTY_FUNCTION__))
;
26806 assert(EltVT != MVT::i8 &&((EltVT != MVT::i8 && "Horizontal byte sum only makes sense for wider elements!"
) ? static_cast<void> (0) : __assert_fail ("EltVT != MVT::i8 && \"Horizontal byte sum only makes sense for wider elements!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26807, __PRETTY_FUNCTION__))
26807 "Horizontal byte sum only makes sense for wider elements!")((EltVT != MVT::i8 && "Horizontal byte sum only makes sense for wider elements!"
) ? static_cast<void> (0) : __assert_fail ("EltVT != MVT::i8 && \"Horizontal byte sum only makes sense for wider elements!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26807, __PRETTY_FUNCTION__))
;
26808 unsigned VecSize = VT.getSizeInBits();
26809 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!")((ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!"
) ? static_cast<void> (0) : __assert_fail ("ByteVecVT.getSizeInBits() == VecSize && \"Cannot change vector size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26809, __PRETTY_FUNCTION__))
;
26810
26811 // PSADBW instruction horizontally add all bytes and leave the result in i64
26812 // chunks, thus directly computes the pop count for v2i64 and v4i64.
26813 if (EltVT == MVT::i64) {
26814 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
26815 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
26816 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
26817 return DAG.getBitcast(VT, V);
26818 }
26819
26820 if (EltVT == MVT::i32) {
26821 // We unpack the low half and high half into i32s interleaved with zeros so
26822 // that we can use PSADBW to horizontally sum them. The most useful part of
26823 // this is that it lines up the results of two PSADBW instructions to be
26824 // two v2i64 vectors which concatenated are the 4 population counts. We can
26825 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
26826 SDValue Zeros = DAG.getConstant(0, DL, VT);
26827 SDValue V32 = DAG.getBitcast(VT, V);
26828 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
26829 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
26830
26831 // Do the horizontal sums into two v2i64s.
26832 Zeros = DAG.getConstant(0, DL, ByteVecVT);
26833 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
26834 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
26835 DAG.getBitcast(ByteVecVT, Low), Zeros);
26836 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
26837 DAG.getBitcast(ByteVecVT, High), Zeros);
26838
26839 // Merge them together.
26840 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
26841 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
26842 DAG.getBitcast(ShortVecVT, Low),
26843 DAG.getBitcast(ShortVecVT, High));
26844
26845 return DAG.getBitcast(VT, V);
26846 }
26847
26848 // The only element type left is i16.
26849 assert(EltVT == MVT::i16 && "Unknown how to handle type")((EltVT == MVT::i16 && "Unknown how to handle type") ?
static_cast<void> (0) : __assert_fail ("EltVT == MVT::i16 && \"Unknown how to handle type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26849, __PRETTY_FUNCTION__))
;
26850
26851 // To obtain pop count for each i16 element starting from the pop count for
26852 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
26853 // right by 8. It is important to shift as i16s as i8 vector shift isn't
26854 // directly supported.
26855 SDValue ShifterV = DAG.getConstant(8, DL, VT);
26856 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
26857 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
26858 DAG.getBitcast(ByteVecVT, V));
26859 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
26860}
26861
26862static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
26863 const X86Subtarget &Subtarget,
26864 SelectionDAG &DAG) {
26865 MVT VT = Op.getSimpleValueType();
26866 MVT EltVT = VT.getVectorElementType();
26867 int NumElts = VT.getVectorNumElements();
26868 (void)EltVT;
26869 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.")((EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported."
) ? static_cast<void> (0) : __assert_fail ("EltVT == MVT::i8 && \"Only vXi8 vector CTPOP lowering supported.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26869, __PRETTY_FUNCTION__))
;
26870
26871 // Implement a lookup table in register by using an algorithm based on:
26872 // http://wm.ite.pl/articles/sse-popcount.html
26873 //
26874 // The general idea is that every lower byte nibble in the input vector is an
26875 // index into a in-register pre-computed pop count table. We then split up the
26876 // input vector in two new ones: (1) a vector with only the shifted-right
26877 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
26878 // masked out higher ones) for each byte. PSHUFB is used separately with both
26879 // to index the in-register table. Next, both are added and the result is a
26880 // i8 vector where each element contains the pop count for input byte.
26881 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
26882 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
26883 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
26884 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
26885
26886 SmallVector<SDValue, 64> LUTVec;
26887 for (int i = 0; i < NumElts; ++i)
26888 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
26889 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
26890 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
26891
26892 // High nibbles
26893 SDValue FourV = DAG.getConstant(4, DL, VT);
26894 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
26895
26896 // Low nibbles
26897 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
26898
26899 // The input vector is used as the shuffle mask that index elements into the
26900 // LUT. After counting low and high nibbles, add the vector to obtain the
26901 // final pop count per i8 element.
26902 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
26903 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
26904 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
26905}
26906
26907// Please ensure that any codegen change from LowerVectorCTPOP is reflected in
26908// updated cost models in X86TTIImpl::getIntrinsicInstrCost.
26909static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
26910 SelectionDAG &DAG) {
26911 MVT VT = Op.getSimpleValueType();
26912 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&(((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector
()) && "Unknown CTPOP type to handle") ? static_cast<
void> (0) : __assert_fail ("(VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) && \"Unknown CTPOP type to handle\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26913, __PRETTY_FUNCTION__))
26913 "Unknown CTPOP type to handle")(((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector
()) && "Unknown CTPOP type to handle") ? static_cast<
void> (0) : __assert_fail ("(VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) && \"Unknown CTPOP type to handle\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26913, __PRETTY_FUNCTION__))
;
26914 SDLoc DL(Op.getNode());
26915 SDValue Op0 = Op.getOperand(0);
26916
26917 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
26918 if (Subtarget.hasVPOPCNTDQ()) {
26919 unsigned NumElems = VT.getVectorNumElements();
26920 assert((VT.getVectorElementType() == MVT::i8 ||(((VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType
() == MVT::i16) && "Unexpected type") ? static_cast<
void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType() == MVT::i16) && \"Unexpected type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26921, __PRETTY_FUNCTION__))
26921 VT.getVectorElementType() == MVT::i16) && "Unexpected type")(((VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType
() == MVT::i16) && "Unexpected type") ? static_cast<
void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType() == MVT::i16) && \"Unexpected type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26921, __PRETTY_FUNCTION__))
;
26922 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
26923 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
26924 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
26925 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
26926 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
26927 }
26928 }
26929
26930 // Decompose 256-bit ops into smaller 128-bit ops.
26931 if (VT.is256BitVector() && !Subtarget.hasInt256())
26932 return Lower256IntUnary(Op, DAG);
26933
26934 // Decompose 512-bit ops into smaller 256-bit ops.
26935 if (VT.is512BitVector() && !Subtarget.hasBWI())
26936 return Lower512IntUnary(Op, DAG);
26937
26938 // For element types greater than i8, do vXi8 pop counts and a bytesum.
26939 if (VT.getScalarType() != MVT::i8) {
26940 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
26941 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
26942 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
26943 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
26944 }
26945
26946 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
26947 if (!Subtarget.hasSSSE3())
26948 return SDValue();
26949
26950 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
26951}
26952
26953static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
26954 SelectionDAG &DAG) {
26955 assert(Op.getSimpleValueType().isVector() &&((Op.getSimpleValueType().isVector() && "We only do custom lowering for vector population count."
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().isVector() && \"We only do custom lowering for vector population count.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26956, __PRETTY_FUNCTION__))
26956 "We only do custom lowering for vector population count.")((Op.getSimpleValueType().isVector() && "We only do custom lowering for vector population count."
) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().isVector() && \"We only do custom lowering for vector population count.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26956, __PRETTY_FUNCTION__))
;
26957 return LowerVectorCTPOP(Op, Subtarget, DAG);
26958}
26959
26960static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
26961 MVT VT = Op.getSimpleValueType();
26962 SDValue In = Op.getOperand(0);
26963 SDLoc DL(Op);
26964
26965 // For scalars, its still beneficial to transfer to/from the SIMD unit to
26966 // perform the BITREVERSE.
26967 if (!VT.isVector()) {
26968 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
26969 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
26970 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
26971 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
26972 DAG.getIntPtrConstant(0, DL));
26973 }
26974
26975 int NumElts = VT.getVectorNumElements();
26976 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
26977
26978 // Decompose 256-bit ops into smaller 128-bit ops.
26979 if (VT.is256BitVector())
26980 return Lower256IntUnary(Op, DAG);
26981
26982 assert(VT.is128BitVector() &&((VT.is128BitVector() && "Only 128-bit vector bitreverse lowering supported."
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only 128-bit vector bitreverse lowering supported.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26983, __PRETTY_FUNCTION__))
26983 "Only 128-bit vector bitreverse lowering supported.")((VT.is128BitVector() && "Only 128-bit vector bitreverse lowering supported."
) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only 128-bit vector bitreverse lowering supported.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 26983, __PRETTY_FUNCTION__))
;
26984
26985 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
26986 // perform the BSWAP in the shuffle.
26987 // Its best to shuffle using the second operand as this will implicitly allow
26988 // memory folding for multiple vectors.
26989 SmallVector<SDValue, 16> MaskElts;
26990 for (int i = 0; i != NumElts; ++i) {
26991 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
26992 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
26993 int PermuteByte = SourceByte | (2 << 5);
26994 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
26995 }
26996 }
26997
26998 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
26999 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
27000 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
27001 Res, Mask);
27002 return DAG.getBitcast(VT, Res);
27003}
27004
27005static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
27006 SelectionDAG &DAG) {
27007 MVT VT = Op.getSimpleValueType();
27008
27009 if (Subtarget.hasXOP() && !VT.is512BitVector())
27010 return LowerBITREVERSE_XOP(Op, DAG);
27011
27012 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE")((Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSSE3() && \"SSSE3 required for BITREVERSE\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27012, __PRETTY_FUNCTION__))
;
27013
27014 SDValue In = Op.getOperand(0);
27015 SDLoc DL(Op);
27016
27017 // Split v8i64/v16i32 without BWI so that we can still use the PSHUFB
27018 // lowering.
27019 if (VT == MVT::v8i64 || VT == MVT::v16i32) {
27020 assert(!Subtarget.hasBWI() && "BWI should Expand BITREVERSE")((!Subtarget.hasBWI() && "BWI should Expand BITREVERSE"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.hasBWI() && \"BWI should Expand BITREVERSE\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27020, __PRETTY_FUNCTION__))
;
27021 return Lower512IntUnary(Op, DAG);
27022 }
27023
27024 unsigned NumElts = VT.getVectorNumElements();
27025 assert(VT.getScalarType() == MVT::i8 &&((VT.getScalarType() == MVT::i8 && "Only byte vector BITREVERSE supported"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Only byte vector BITREVERSE supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27026, __PRETTY_FUNCTION__))
27026 "Only byte vector BITREVERSE supported")((VT.getScalarType() == MVT::i8 && "Only byte vector BITREVERSE supported"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Only byte vector BITREVERSE supported\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27026, __PRETTY_FUNCTION__))
;
27027
27028 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
27029 if (VT.is256BitVector() && !Subtarget.hasInt256())
27030 return Lower256IntUnary(Op, DAG);
27031
27032 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
27033 // two nibbles and a PSHUFB lookup to find the bitreverse of each
27034 // 0-15 value (moved to the other nibble).
27035 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
27036 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
27037 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
27038
27039 const int LoLUT[16] = {
27040 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
27041 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
27042 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
27043 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
27044 const int HiLUT[16] = {
27045 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
27046 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
27047 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
27048 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
27049
27050 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
27051 for (unsigned i = 0; i < NumElts; ++i) {
27052 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
27053 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
27054 }
27055
27056 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
27057 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
27058 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
27059 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
27060 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27061}
27062
27063static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
27064 const X86Subtarget &Subtarget) {
27065 unsigned NewOpc = 0;
27066 switch (N->getOpcode()) {
27067 case ISD::ATOMIC_LOAD_ADD:
27068 NewOpc = X86ISD::LADD;
27069 break;
27070 case ISD::ATOMIC_LOAD_SUB:
27071 NewOpc = X86ISD::LSUB;
27072 break;
27073 case ISD::ATOMIC_LOAD_OR:
27074 NewOpc = X86ISD::LOR;
27075 break;
27076 case ISD::ATOMIC_LOAD_XOR:
27077 NewOpc = X86ISD::LXOR;
27078 break;
27079 case ISD::ATOMIC_LOAD_AND:
27080 NewOpc = X86ISD::LAND;
27081 break;
27082 default:
27083 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode")::llvm::llvm_unreachable_internal("Unknown ATOMIC_LOAD_ opcode"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27083)
;
27084 }
27085
27086 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
27087
27088 return DAG.getMemIntrinsicNode(
27089 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
27090 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
27091 /*MemVT=*/N->getSimpleValueType(0), MMO);
27092}
27093
27094/// Lower atomic_load_ops into LOCK-prefixed operations.
27095static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
27096 const X86Subtarget &Subtarget) {
27097 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
27098 SDValue Chain = N->getOperand(0);
27099 SDValue LHS = N->getOperand(1);
27100 SDValue RHS = N->getOperand(2);
27101 unsigned Opc = N->getOpcode();
27102 MVT VT = N->getSimpleValueType(0);
27103 SDLoc DL(N);
27104
27105 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
27106 // can only be lowered when the result is unused. They should have already
27107 // been transformed into a cmpxchg loop in AtomicExpand.
27108 if (N->hasAnyUseOfValue(0)) {
27109 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
27110 // select LXADD if LOCK_SUB can't be selected.
27111 if (Opc == ISD::ATOMIC_LOAD_SUB) {
27112 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
27113 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
27114 RHS, AN->getMemOperand());
27115 }
27116 assert(Opc == ISD::ATOMIC_LOAD_ADD &&((Opc == ISD::ATOMIC_LOAD_ADD && "Used AtomicRMW ops other than Add should have been expanded!"
) ? static_cast<void> (0) : __assert_fail ("Opc == ISD::ATOMIC_LOAD_ADD && \"Used AtomicRMW ops other than Add should have been expanded!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27117, __PRETTY_FUNCTION__))
27117 "Used AtomicRMW ops other than Add should have been expanded!")((Opc == ISD::ATOMIC_LOAD_ADD && "Used AtomicRMW ops other than Add should have been expanded!"
) ? static_cast<void> (0) : __assert_fail ("Opc == ISD::ATOMIC_LOAD_ADD && \"Used AtomicRMW ops other than Add should have been expanded!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27117, __PRETTY_FUNCTION__))
;
27118 return N;
27119 }
27120
27121 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
27122 // The core idea here is that since the memory location isn't actually
27123 // changing, all we need is a lowering for the *ordering* impacts of the
27124 // atomicrmw. As such, we can chose a different operation and memory
27125 // location to minimize impact on other code.
27126 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
27127 // On X86, the only ordering which actually requires an instruction is
27128 // seq_cst which isn't SingleThread, everything just needs to be preserved
27129 // during codegen and then dropped. Note that we expect (but don't assume),
27130 // that orderings other than seq_cst and acq_rel have been canonicalized to
27131 // a store or load.
27132 if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
27133 AN->getSyncScopeID() == SyncScope::System) {
27134 // Prefer a locked operation against a stack location to minimize cache
27135 // traffic. This assumes that stack locations are very likely to be
27136 // accessed only by the owning thread.
27137 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
27138 assert(!N->hasAnyUseOfValue(0))((!N->hasAnyUseOfValue(0)) ? static_cast<void> (0) :
__assert_fail ("!N->hasAnyUseOfValue(0)", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27138, __PRETTY_FUNCTION__))
;
27139 // NOTE: The getUNDEF is needed to give something for the unused result 0.
27140 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
27141 DAG.getUNDEF(VT), NewChain);
27142 }
27143 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
27144 SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
27145 assert(!N->hasAnyUseOfValue(0))((!N->hasAnyUseOfValue(0)) ? static_cast<void> (0) :
__assert_fail ("!N->hasAnyUseOfValue(0)", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27145, __PRETTY_FUNCTION__))
;
27146 // NOTE: The getUNDEF is needed to give something for the unused result 0.
27147 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
27148 DAG.getUNDEF(VT), NewChain);
27149 }
27150
27151 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
27152 // RAUW the chain, but don't worry about the result, as it's unused.
27153 assert(!N->hasAnyUseOfValue(0))((!N->hasAnyUseOfValue(0)) ? static_cast<void> (0) :
__assert_fail ("!N->hasAnyUseOfValue(0)", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27153, __PRETTY_FUNCTION__))
;
27154 // NOTE: The getUNDEF is needed to give something for the unused result 0.
27155 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
27156 DAG.getUNDEF(VT), LockOp.getValue(1));
27157}
27158
27159static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
27160 const X86Subtarget &Subtarget) {
27161 auto *Node = cast<AtomicSDNode>(Op.getNode());
27162 SDLoc dl(Node);
27163 EVT VT = Node->getMemoryVT();
27164
27165 bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
27166 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
27167
27168 // If this store is not sequentially consistent and the type is legal
27169 // we can just keep it.
27170 if (!IsSeqCst && IsTypeLegal)
27171 return Op;
27172
27173 if (VT == MVT::i64 && !IsTypeLegal) {
27174 // For illegal i64 atomic_stores, we can try to use MOVQ if SSE2 is enabled.
27175 // FIXME: Use movlps with SSE1.
27176 // FIXME: Use fist with X87.
27177 bool NoImplicitFloatOps =
27178 DAG.getMachineFunction().getFunction().hasFnAttribute(
27179 Attribute::NoImplicitFloat);
27180 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
27181 Subtarget.hasSSE2()) {
27182 SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
27183 Node->getOperand(2));
27184 SDVTList Tys = DAG.getVTList(MVT::Other);
27185 SDValue Ops[] = { Node->getChain(), SclToVec, Node->getBasePtr() };
27186 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys,
27187 Ops, MVT::i64,
27188 Node->getMemOperand());
27189
27190 // If this is a sequentially consistent store, also emit an appropriate
27191 // barrier.
27192 if (IsSeqCst)
27193 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
27194
27195 return Chain;
27196 }
27197 }
27198
27199 // Convert seq_cst store -> xchg
27200 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
27201 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
27202 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
27203 Node->getMemoryVT(),
27204 Node->getOperand(0),
27205 Node->getOperand(1), Node->getOperand(2),
27206 Node->getMemOperand());
27207 return Swap.getValue(1);
27208}
27209
27210static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
27211 SDNode *N = Op.getNode();
27212 MVT VT = N->getSimpleValueType(0);
27213
27214 // Let legalize expand this if it isn't a legal type yet.
27215 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
27216 return SDValue();
27217
27218 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
27219 SDLoc DL(N);
27220
27221 // Set the carry flag.
27222 SDValue Carry = Op.getOperand(2);
27223 EVT CarryVT = Carry.getValueType();
27224 APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits());
27225 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
27226 Carry, DAG.getConstant(NegOne, DL, CarryVT));
27227
27228 unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
27229 SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
27230 Op.getOperand(1), Carry.getValue(1));
27231
27232 SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
27233 if (N->getValueType(1) == MVT::i1)
27234 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
27235
27236 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
27237}
27238
27239static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
27240 SelectionDAG &DAG) {
27241 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit())((Subtarget.isTargetDarwin() && Subtarget.is64Bit()) ?
static_cast<void> (0) : __assert_fail ("Subtarget.isTargetDarwin() && Subtarget.is64Bit()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27241, __PRETTY_FUNCTION__))
;
27242
27243 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
27244 // which returns the values as { float, float } (in XMM0) or
27245 // { double, double } (which is returned in XMM0, XMM1).
27246 SDLoc dl(Op);
27247 SDValue Arg = Op.getOperand(0);
27248 EVT ArgVT = Arg.getValueType();
27249 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
27250
27251 TargetLowering::ArgListTy Args;
27252 TargetLowering::ArgListEntry Entry;
27253
27254 Entry.Node = Arg;
27255 Entry.Ty = ArgTy;
27256 Entry.IsSExt = false;
27257 Entry.IsZExt = false;
27258 Args.push_back(Entry);
27259
27260 bool isF64 = ArgVT == MVT::f64;
27261 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
27262 // the small struct {f32, f32} is returned in (eax, edx). For f64,
27263 // the results are returned via SRet in memory.
27264 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27265 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
27266 const char *LibcallName = TLI.getLibcallName(LC);
27267 SDValue Callee =
27268 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
27269
27270 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
27271 : (Type *)VectorType::get(ArgTy, 4);
27272
27273 TargetLowering::CallLoweringInfo CLI(DAG);
27274 CLI.setDebugLoc(dl)
27275 .setChain(DAG.getEntryNode())
27276 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
27277
27278 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
27279
27280 if (isF64)
27281 // Returned in xmm0 and xmm1.
27282 return CallResult.first;
27283
27284 // Returned in bits 0:31 and 32:64 xmm0.
27285 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
27286 CallResult.first, DAG.getIntPtrConstant(0, dl));
27287 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
27288 CallResult.first, DAG.getIntPtrConstant(1, dl));
27289 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
27290 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
27291}
27292
27293/// Widen a vector input to a vector of NVT. The
27294/// input vector must have the same element type as NVT.
27295static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
27296 bool FillWithZeroes = false) {
27297 // Check if InOp already has the right width.
27298 MVT InVT = InOp.getSimpleValueType();
27299 if (InVT == NVT)
27300 return InOp;
27301
27302 if (InOp.isUndef())
27303 return DAG.getUNDEF(NVT);
27304
27305 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&((InVT.getVectorElementType() == NVT.getVectorElementType() &&
"input and widen element type must match") ? static_cast<
void> (0) : __assert_fail ("InVT.getVectorElementType() == NVT.getVectorElementType() && \"input and widen element type must match\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27306, __PRETTY_FUNCTION__))
27306 "input and widen element type must match")((InVT.getVectorElementType() == NVT.getVectorElementType() &&
"input and widen element type must match") ? static_cast<
void> (0) : __assert_fail ("InVT.getVectorElementType() == NVT.getVectorElementType() && \"input and widen element type must match\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27306, __PRETTY_FUNCTION__))
;
27307
27308 unsigned InNumElts = InVT.getVectorNumElements();
27309 unsigned WidenNumElts = NVT.getVectorNumElements();
27310 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&((WidenNumElts > InNumElts && WidenNumElts % InNumElts
== 0 && "Unexpected request for vector widening") ? static_cast
<void> (0) : __assert_fail ("WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 && \"Unexpected request for vector widening\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27311, __PRETTY_FUNCTION__))
27311 "Unexpected request for vector widening")((WidenNumElts > InNumElts && WidenNumElts % InNumElts
== 0 && "Unexpected request for vector widening") ? static_cast
<void> (0) : __assert_fail ("WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 && \"Unexpected request for vector widening\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27311, __PRETTY_FUNCTION__))
;
27312
27313 SDLoc dl(InOp);
27314 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
27315 InOp.getNumOperands() == 2) {
27316 SDValue N1 = InOp.getOperand(1);
27317 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
27318 N1.isUndef()) {
27319 InOp = InOp.getOperand(0);
27320 InVT = InOp.getSimpleValueType();
27321 InNumElts = InVT.getVectorNumElements();
27322 }
27323 }
27324 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
27325 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
27326 SmallVector<SDValue, 16> Ops;
27327 for (unsigned i = 0; i < InNumElts; ++i)
27328 Ops.push_back(InOp.getOperand(i));
27329
27330 EVT EltVT = InOp.getOperand(0).getValueType();
27331
27332 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
27333 DAG.getUNDEF(EltVT);
27334 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
27335 Ops.push_back(FillVal);
27336 return DAG.getBuildVector(NVT, dl, Ops);
27337 }
27338 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
27339 DAG.getUNDEF(NVT);
27340 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
27341 InOp, DAG.getIntPtrConstant(0, dl));
27342}
27343
27344static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
27345 SelectionDAG &DAG) {
27346 assert(Subtarget.hasAVX512() &&((Subtarget.hasAVX512() && "MGATHER/MSCATTER are supported on AVX-512 arch only"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"MGATHER/MSCATTER are supported on AVX-512 arch only\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27347, __PRETTY_FUNCTION__))
27347 "MGATHER/MSCATTER are supported on AVX-512 arch only")((Subtarget.hasAVX512() && "MGATHER/MSCATTER are supported on AVX-512 arch only"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"MGATHER/MSCATTER are supported on AVX-512 arch only\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27347, __PRETTY_FUNCTION__))
;
27348
27349 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
27350 SDValue Src = N->getValue();
27351 MVT VT = Src.getSimpleValueType();
27352 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op")((VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() >= 32 && \"Unsupported scatter op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27352, __PRETTY_FUNCTION__))
;
27353 SDLoc dl(Op);
27354
27355 SDValue Scale = N->getScale();
27356 SDValue Index = N->getIndex();
27357 SDValue Mask = N->getMask();
27358 SDValue Chain = N->getChain();
27359 SDValue BasePtr = N->getBasePtr();
27360
27361 if (VT == MVT::v2f32 || VT == MVT::v2i32) {
27362 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type")((Mask.getValueType() == MVT::v2i1 && "Unexpected mask type"
) ? static_cast<void> (0) : __assert_fail ("Mask.getValueType() == MVT::v2i1 && \"Unexpected mask type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27362, __PRETTY_FUNCTION__))
;
27363 // If the index is v2i64 and we have VLX we can use xmm for data and index.
27364 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
27365 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27366 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
27367 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
27368 SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other);
27369 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
27370 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
27371 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
27372 return SDValue(NewScatter.getNode(), 1);
27373 }
27374 return SDValue();
27375 }
27376
27377 MVT IndexVT = Index.getSimpleValueType();
27378 MVT MaskVT = Mask.getSimpleValueType();
27379
27380 // If the index is v2i32, we're being called by type legalization and we
27381 // should just let the default handling take care of it.
27382 if (IndexVT == MVT::v2i32)
27383 return SDValue();
27384
27385 // If we don't have VLX and neither the passthru or index is 512-bits, we
27386 // need to widen until one is.
27387 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
27388 !Index.getSimpleValueType().is512BitVector()) {
27389 // Determine how much we need to widen by to get a 512-bit type.
27390 unsigned Factor = std::min(512/VT.getSizeInBits(),
27391 512/IndexVT.getSizeInBits());
27392 unsigned NumElts = VT.getVectorNumElements() * Factor;
27393
27394 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
27395 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
27396 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
27397
27398 Src = ExtendToType(Src, VT, DAG);
27399 Index = ExtendToType(Index, IndexVT, DAG);
27400 Mask = ExtendToType(Mask, MaskVT, DAG, true);
27401 }
27402
27403 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
27404 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
27405 SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>(
27406 VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand());
27407 return SDValue(NewScatter.getNode(), 1);
27408}
27409
27410static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
27411 SelectionDAG &DAG) {
27412
27413 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
27414 MVT VT = Op.getSimpleValueType();
27415 MVT ScalarVT = VT.getScalarType();
27416 SDValue Mask = N->getMask();
27417 MVT MaskVT = Mask.getSimpleValueType();
27418 SDValue PassThru = N->getPassThru();
27419 SDLoc dl(Op);
27420
27421 // Handle AVX masked loads which don't support passthru other than 0.
27422 if (MaskVT.getVectorElementType() != MVT::i1) {
27423 // We also allow undef in the isel pattern.
27424 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
27425 return Op;
27426
27427 SDValue NewLoad = DAG.getMaskedLoad(VT, dl, N->getChain(),
27428 N->getBasePtr(), Mask,
27429 getZeroVector(VT, Subtarget, DAG, dl),
27430 N->getMemoryVT(), N->getMemOperand(),
27431 N->getExtensionType(),
27432 N->isExpandingLoad());
27433 // Emit a blend.
27434 SDValue Select = DAG.getNode(ISD::VSELECT, dl, MaskVT, Mask, NewLoad,
27435 PassThru);
27436 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
27437 }
27438
27439 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&(((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
"Expanding masked load is supported on AVX-512 target only!"
) ? static_cast<void> (0) : __assert_fail ("(!N->isExpandingLoad() || Subtarget.hasAVX512()) && \"Expanding masked load is supported on AVX-512 target only!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27440, __PRETTY_FUNCTION__))
27440 "Expanding masked load is supported on AVX-512 target only!")(((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
"Expanding masked load is supported on AVX-512 target only!"
) ? static_cast<void> (0) : __assert_fail ("(!N->isExpandingLoad() || Subtarget.hasAVX512()) && \"Expanding masked load is supported on AVX-512 target only!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27440, __PRETTY_FUNCTION__))
;
27441
27442 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&(((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >=
32) && "Expanding masked load is supported for 32 and 64-bit types only!"
) ? static_cast<void> (0) : __assert_fail ("(!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) && \"Expanding masked load is supported for 32 and 64-bit types only!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27443, __PRETTY_FUNCTION__))
27443 "Expanding masked load is supported for 32 and 64-bit types only!")(((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >=
32) && "Expanding masked load is supported for 32 and 64-bit types only!"
) ? static_cast<void> (0) : __assert_fail ("(!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) && \"Expanding masked load is supported for 32 and 64-bit types only!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27443, __PRETTY_FUNCTION__))
;
27444
27445 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&((Subtarget.hasAVX512() && !Subtarget.hasVLX() &&
!VT.is512BitVector() && "Cannot lower masked load op."
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && \"Cannot lower masked load op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27446, __PRETTY_FUNCTION__))
27446 "Cannot lower masked load op.")((Subtarget.hasAVX512() && !Subtarget.hasVLX() &&
!VT.is512BitVector() && "Cannot lower masked load op."
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && \"Cannot lower masked load op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27446, __PRETTY_FUNCTION__))
;
27447
27448 assert((ScalarVT.getSizeInBits() >= 32 ||(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked load op."
) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked load op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27451, __PRETTY_FUNCTION__))
27449 (Subtarget.hasBWI() &&(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked load op."
) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked load op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27451, __PRETTY_FUNCTION__))
27450 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked load op."
) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked load op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27451, __PRETTY_FUNCTION__))
27451 "Unsupported masked load op.")(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked load op."
) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked load op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27451, __PRETTY_FUNCTION__))
;
27452
27453 // This operation is legal for targets with VLX, but without
27454 // VLX the vector should be widened to 512 bit
27455 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
27456 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
27457 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
27458
27459 // Mask element has to be i1.
27460 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&((Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
"Unexpected mask type") ? static_cast<void> (0) : __assert_fail
("Mask.getSimpleValueType().getScalarType() == MVT::i1 && \"Unexpected mask type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27461, __PRETTY_FUNCTION__))
27461 "Unexpected mask type")((Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
"Unexpected mask type") ? static_cast<void> (0) : __assert_fail
("Mask.getSimpleValueType().getScalarType() == MVT::i1 && \"Unexpected mask type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27461, __PRETTY_FUNCTION__))
;
27462
27463 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
27464
27465 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
27466 SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(),
27467 N->getBasePtr(), Mask, PassThru,
27468 N->getMemoryVT(), N->getMemOperand(),
27469 N->getExtensionType(),
27470 N->isExpandingLoad());
27471
27472 SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT,
27473 NewLoad.getValue(0),
27474 DAG.getIntPtrConstant(0, dl));
27475 SDValue RetOps[] = {Exract, NewLoad.getValue(1)};
27476 return DAG.getMergeValues(RetOps, dl);
27477}
27478
27479static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
27480 SelectionDAG &DAG) {
27481 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
27482 SDValue DataToStore = N->getValue();
27483 MVT VT = DataToStore.getSimpleValueType();
27484 MVT ScalarVT = VT.getScalarType();
27485 SDValue Mask = N->getMask();
27486 SDLoc dl(Op);
27487
27488 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&(((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
"Expanding masked load is supported on AVX-512 target only!"
) ? static_cast<void> (0) : __assert_fail ("(!N->isCompressingStore() || Subtarget.hasAVX512()) && \"Expanding masked load is supported on AVX-512 target only!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27489, __PRETTY_FUNCTION__))
27489 "Expanding masked load is supported on AVX-512 target only!")(((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
"Expanding masked load is supported on AVX-512 target only!"
) ? static_cast<void> (0) : __assert_fail ("(!N->isCompressingStore() || Subtarget.hasAVX512()) && \"Expanding masked load is supported on AVX-512 target only!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27489, __PRETTY_FUNCTION__))
;
27490
27491 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&(((!N->isCompressingStore() || ScalarVT.getSizeInBits() >=
32) && "Expanding masked load is supported for 32 and 64-bit types only!"
) ? static_cast<void> (0) : __assert_fail ("(!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) && \"Expanding masked load is supported for 32 and 64-bit types only!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27492, __PRETTY_FUNCTION__))
27492 "Expanding masked load is supported for 32 and 64-bit types only!")(((!N->isCompressingStore() || ScalarVT.getSizeInBits() >=
32) && "Expanding masked load is supported for 32 and 64-bit types only!"
) ? static_cast<void> (0) : __assert_fail ("(!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) && \"Expanding masked load is supported for 32 and 64-bit types only!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27492, __PRETTY_FUNCTION__))
;
27493
27494 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&((Subtarget.hasAVX512() && !Subtarget.hasVLX() &&
!VT.is512BitVector() && "Cannot lower masked store op."
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && \"Cannot lower masked store op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27495, __PRETTY_FUNCTION__))
27495 "Cannot lower masked store op.")((Subtarget.hasAVX512() && !Subtarget.hasVLX() &&
!VT.is512BitVector() && "Cannot lower masked store op."
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && \"Cannot lower masked store op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27495, __PRETTY_FUNCTION__))
;
27496
27497 assert((ScalarVT.getSizeInBits() >= 32 ||(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked store op."
) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked store op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27500, __PRETTY_FUNCTION__))
27498 (Subtarget.hasBWI() &&(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked store op."
) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked store op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27500, __PRETTY_FUNCTION__))
27499 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked store op."
) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked store op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27500, __PRETTY_FUNCTION__))
27500 "Unsupported masked store op.")(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() &&
(ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked store op."
) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked store op.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27500, __PRETTY_FUNCTION__))
;
27501
27502 // This operation is legal for targets with VLX, but without
27503 // VLX the vector should be widened to 512 bit
27504 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
27505 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
27506
27507 // Mask element has to be i1.
27508 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&((Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
"Unexpected mask type") ? static_cast<void> (0) : __assert_fail
("Mask.getSimpleValueType().getScalarType() == MVT::i1 && \"Unexpected mask type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27509, __PRETTY_FUNCTION__))
27509 "Unexpected mask type")((Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
"Unexpected mask type") ? static_cast<void> (0) : __assert_fail
("Mask.getSimpleValueType().getScalarType() == MVT::i1 && \"Unexpected mask type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27509, __PRETTY_FUNCTION__))
;
27510
27511 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
27512
27513 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
27514 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
27515 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
27516 Mask, N->getMemoryVT(), N->getMemOperand(),
27517 N->isTruncatingStore(), N->isCompressingStore());
27518}
27519
27520static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
27521 SelectionDAG &DAG) {
27522 assert(Subtarget.hasAVX2() &&((Subtarget.hasAVX2() && "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27523, __PRETTY_FUNCTION__))
27523 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only")((Subtarget.hasAVX2() && "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27523, __PRETTY_FUNCTION__))
;
27524
27525 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
27526 SDLoc dl(Op);
27527 MVT VT = Op.getSimpleValueType();
27528 SDValue Index = N->getIndex();
27529 SDValue Mask = N->getMask();
27530 SDValue PassThru = N->getPassThru();
27531 MVT IndexVT = Index.getSimpleValueType();
27532 MVT MaskVT = Mask.getSimpleValueType();
27533
27534 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op")((VT.getScalarSizeInBits() >= 32 && "Unsupported gather op"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() >= 32 && \"Unsupported gather op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27534, __PRETTY_FUNCTION__))
;
27535
27536 // If the index is v2i32, we're being called by type legalization.
27537 if (IndexVT == MVT::v2i32)
27538 return SDValue();
27539
27540 // If we don't have VLX and neither the passthru or index is 512-bits, we
27541 // need to widen until one is.
27542 MVT OrigVT = VT;
27543 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
27544 !IndexVT.is512BitVector()) {
27545 // Determine how much we need to widen by to get a 512-bit type.
27546 unsigned Factor = std::min(512/VT.getSizeInBits(),
27547 512/IndexVT.getSizeInBits());
27548
27549 unsigned NumElts = VT.getVectorNumElements() * Factor;
27550
27551 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
27552 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
27553 MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
27554
27555 PassThru = ExtendToType(PassThru, VT, DAG);
27556 Index = ExtendToType(Index, IndexVT, DAG);
27557 Mask = ExtendToType(Mask, MaskVT, DAG, true);
27558 }
27559
27560 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
27561 N->getScale() };
27562 SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
27563 DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(),
27564 N->getMemOperand());
27565 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
27566 NewGather, DAG.getIntPtrConstant(0, dl));
27567 return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl);
27568}
27569
27570SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op,
27571 SelectionDAG &DAG) const {
27572 // TODO: Eventually, the lowering of these nodes should be informed by or
27573 // deferred to the GC strategy for the function in which they appear. For
27574 // now, however, they must be lowered to something. Since they are logically
27575 // no-ops in the case of a null GC strategy (or a GC strategy which does not
27576 // require special handling for these nodes), lower them as literal NOOPs for
27577 // the time being.
27578 SmallVector<SDValue, 2> Ops;
27579
27580 Ops.push_back(Op.getOperand(0));
27581 if (Op->getGluedNode())
27582 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
27583
27584 SDLoc OpDL(Op);
27585 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
27586 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
27587
27588 return NOOP;
27589}
27590
27591SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op,
27592 SelectionDAG &DAG) const {
27593 // TODO: Eventually, the lowering of these nodes should be informed by or
27594 // deferred to the GC strategy for the function in which they appear. For
27595 // now, however, they must be lowered to something. Since they are logically
27596 // no-ops in the case of a null GC strategy (or a GC strategy which does not
27597 // require special handling for these nodes), lower them as literal NOOPs for
27598 // the time being.
27599 SmallVector<SDValue, 2> Ops;
27600
27601 Ops.push_back(Op.getOperand(0));
27602 if (Op->getGluedNode())
27603 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
27604
27605 SDLoc OpDL(Op);
27606 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
27607 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
27608
27609 return NOOP;
27610}
27611
27612SDValue X86TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
27613 RTLIB::Libcall Call) const {
27614 SmallVector<SDValue, 2> Ops(Op->op_begin(), Op->op_end());
27615 MakeLibCallOptions CallOptions;
27616 return makeLibCall(DAG, Call, MVT::f128, Ops, CallOptions, SDLoc(Op)).first;
27617}
27618
27619/// Provide custom lowering hooks for some operations.
27620SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
27621 switch (Op.getOpcode()) {
27622 default: llvm_unreachable("Should not custom lower this!")::llvm::llvm_unreachable_internal("Should not custom lower this!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27622)
;
27623 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
27624 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
27625 return LowerCMP_SWAP(Op, Subtarget, DAG);
27626 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
27627 case ISD::ATOMIC_LOAD_ADD:
27628 case ISD::ATOMIC_LOAD_SUB:
27629 case ISD::ATOMIC_LOAD_OR:
27630 case ISD::ATOMIC_LOAD_XOR:
27631 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
27632 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
27633 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
27634 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
27635 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
27636 case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG);
27637 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
27638 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
27639 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
27640 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
27641 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
27642 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
27643 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
27644 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
27645 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
27646 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
27647 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
27648 case ISD::SHL_PARTS:
27649 case ISD::SRA_PARTS:
27650 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
27651 case ISD::FSHL:
27652 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
27653 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
27654 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
27655 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
27656 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
27657 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
27658 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
27659 case ISD::ZERO_EXTEND_VECTOR_INREG:
27660 case ISD::SIGN_EXTEND_VECTOR_INREG:
27661 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
27662 case ISD::FP_TO_SINT:
27663 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
27664 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
27665 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
27666 case ISD::STRICT_FP_ROUND: return LowerSTRICT_FP_ROUND(Op, DAG);
27667 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
27668 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
27669 case ISD::FADD:
27670 case ISD::FSUB: return lowerFaddFsub(Op, DAG);
27671 case ISD::FMUL: return LowerF128Call(Op, DAG, RTLIB::MUL_F128);
27672 case ISD::FDIV: return LowerF128Call(Op, DAG, RTLIB::DIV_F128);
27673 case ISD::FABS:
27674 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
27675 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
27676 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
27677 case ISD::SETCC: return LowerSETCC(Op, DAG);
27678 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
27679 case ISD::SELECT: return LowerSELECT(Op, DAG);
27680 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
27681 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
27682 case ISD::VASTART: return LowerVASTART(Op, DAG);
27683 case ISD::VAARG: return LowerVAARG(Op, DAG);
27684 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
27685 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
27686 case ISD::INTRINSIC_VOID:
27687 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
27688 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
27689 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
27690 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
27691 case ISD::FRAME_TO_ARGS_OFFSET:
27692 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
27693 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
27694 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
27695 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
27696 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
27697 case ISD::EH_SJLJ_SETUP_DISPATCH:
27698 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
27699 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
27700 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
27701 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
27702 case ISD::CTLZ:
27703 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
27704 case ISD::CTTZ:
27705 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
27706 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
27707 case ISD::MULHS:
27708 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
27709 case ISD::ROTL:
27710 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
27711 case ISD::SRA:
27712 case ISD::SRL:
27713 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
27714 case ISD::SADDO:
27715 case ISD::UADDO:
27716 case ISD::SSUBO:
27717 case ISD::USUBO:
27718 case ISD::SMULO:
27719 case ISD::UMULO: return LowerXALUO(Op, DAG);
27720 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
27721 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
27722 case ISD::ADDCARRY:
27723 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
27724 case ISD::ADD:
27725 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
27726 case ISD::UADDSAT:
27727 case ISD::SADDSAT:
27728 case ISD::USUBSAT:
27729 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
27730 case ISD::SMAX:
27731 case ISD::SMIN:
27732 case ISD::UMAX:
27733 case ISD::UMIN: return LowerMINMAX(Op, DAG);
27734 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
27735 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
27736 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
27737 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
27738 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
27739 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
27740 case ISD::GC_TRANSITION_START:
27741 return LowerGC_TRANSITION_START(Op, DAG);
27742 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG);
27743 }
27744}
27745
27746/// Places new result values for the node in Results (their number
27747/// and types must exactly match those of the original return values of
27748/// the node), or leaves Results empty, which indicates that the node is not
27749/// to be custom lowered after all.
27750void X86TargetLowering::LowerOperationWrapper(SDNode *N,
27751 SmallVectorImpl<SDValue> &Results,
27752 SelectionDAG &DAG) const {
27753 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
27754
27755 if (!Res.getNode())
27756 return;
27757
27758 // If the original node has one result, take the return value from
27759 // LowerOperation as is. It might not be result number 0.
27760 if (N->getNumValues() == 1) {
27761 Results.push_back(Res);
27762 return;
27763 }
27764
27765 // If the original node has multiple results, then the return node should
27766 // have the same number of results.
27767 assert((N->getNumValues() == Res->getNumValues()) &&(((N->getNumValues() == Res->getNumValues()) &&
"Lowering returned the wrong number of results!") ? static_cast
<void> (0) : __assert_fail ("(N->getNumValues() == Res->getNumValues()) && \"Lowering returned the wrong number of results!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27768, __PRETTY_FUNCTION__))
27768 "Lowering returned the wrong number of results!")(((N->getNumValues() == Res->getNumValues()) &&
"Lowering returned the wrong number of results!") ? static_cast
<void> (0) : __assert_fail ("(N->getNumValues() == Res->getNumValues()) && \"Lowering returned the wrong number of results!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27768, __PRETTY_FUNCTION__))
;
27769
27770 // Places new result values base on N result number.
27771 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
27772 Results.push_back(Res.getValue(I));
27773}
27774
27775/// Replace a node with an illegal result type with a new node built out of
27776/// custom code.
27777void X86TargetLowering::ReplaceNodeResults(SDNode *N,
27778 SmallVectorImpl<SDValue>&Results,
27779 SelectionDAG &DAG) const {
27780 SDLoc dl(N);
27781 switch (N->getOpcode()) {
27782 default:
27783#ifndef NDEBUG
27784 dbgs() << "ReplaceNodeResults: ";
27785 N->dump(&DAG);
27786#endif
27787 llvm_unreachable("Do not know how to custom type legalize this operation!")::llvm::llvm_unreachable_internal("Do not know how to custom type legalize this operation!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27787)
;
27788 case ISD::CTPOP: {
27789 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!")((N->getValueType(0) == MVT::i64 && "Unexpected VT!"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27789, __PRETTY_FUNCTION__))
;
27790 // Use a v2i64 if possible.
27791 bool NoImplicitFloatOps =
27792 DAG.getMachineFunction().getFunction().hasFnAttribute(
27793 Attribute::NoImplicitFloat);
27794 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
27795 SDValue Wide =
27796 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
27797 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
27798 // Bit count should fit in 32-bits, extract it as that and then zero
27799 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
27800 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
27801 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
27802 DAG.getIntPtrConstant(0, dl));
27803 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
27804 Results.push_back(Wide);
27805 }
27806 return;
27807 }
27808 case ISD::MUL: {
27809 EVT VT = N->getValueType(0);
27810 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
VT.getVectorElementType() == MVT::i8 && "Unexpected VT!"
) ? static_cast<void> (0) : __assert_fail ("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && VT.getVectorElementType() == MVT::i8 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27811, __PRETTY_FUNCTION__))
27811 VT.getVectorElementType() == MVT::i8 && "Unexpected VT!")((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
VT.getVectorElementType() == MVT::i8 && "Unexpected VT!"
) ? static_cast<void> (0) : __assert_fail ("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && VT.getVectorElementType() == MVT::i8 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27811, __PRETTY_FUNCTION__))
;
27812 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
27813 // elements are needed.
27814 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
27815 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
27816 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
27817 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
27818 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
27819 unsigned NumConcats = 16 / VT.getVectorNumElements();
27820 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
27821 ConcatOps[0] = Res;
27822 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
27823 Results.push_back(Res);
27824 return;
27825 }
27826 case X86ISD::VPMADDWD:
27827 case X86ISD::AVG: {
27828 // Legalize types for ISD::UADDSAT/SADDSAT/USUBSAT/SSUBSAT and
27829 // X86ISD::AVG/VPMADDWD by widening.
27830 assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27830, __PRETTY_FUNCTION__))
;
27831
27832 EVT VT = N->getValueType(0);
27833 EVT InVT = N->getOperand(0).getValueType();
27834 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&((VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits
() == 0 && "Expected a VT that divides into 128 bits."
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 && \"Expected a VT that divides into 128 bits.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27835, __PRETTY_FUNCTION__))
27835 "Expected a VT that divides into 128 bits.")((VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits
() == 0 && "Expected a VT that divides into 128 bits."
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 && \"Expected a VT that divides into 128 bits.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27835, __PRETTY_FUNCTION__))
;
27836 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27837, __PRETTY_FUNCTION__))
27837 "Unexpected type action!")((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27837, __PRETTY_FUNCTION__))
;
27838 unsigned NumConcat = 128 / InVT.getSizeInBits();
27839
27840 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
27841 InVT.getVectorElementType(),
27842 NumConcat * InVT.getVectorNumElements());
27843 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
27844 VT.getVectorElementType(),
27845 NumConcat * VT.getVectorNumElements());
27846
27847 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
27848 Ops[0] = N->getOperand(0);
27849 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
27850 Ops[0] = N->getOperand(1);
27851 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
27852
27853 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
27854 Results.push_back(Res);
27855 return;
27856 }
27857 case ISD::ABS: {
27858 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27859 assert(N->getValueType(0) == MVT::i64 &&((N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS."
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"Unexpected type (!= i64) on ABS.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27860, __PRETTY_FUNCTION__))
27860 "Unexpected type (!= i64) on ABS.")((N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS."
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"Unexpected type (!= i64) on ABS.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27860, __PRETTY_FUNCTION__))
;
27861 MVT HalfT = MVT::i32;
27862 SDValue Lo, Hi, Tmp;
27863 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
27864
27865 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
27866 DAG.getConstant(0, dl, HalfT));
27867 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
27868 DAG.getConstant(1, dl, HalfT));
27869 Tmp = DAG.getNode(
27870 ISD::SRA, dl, HalfT, Hi,
27871 DAG.getConstant(HalfT.getSizeInBits() - 1, dl,
27872 TLI.getShiftAmountTy(HalfT, DAG.getDataLayout())));
27873 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
27874 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
27875 SDValue(Lo.getNode(), 1));
27876 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
27877 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
27878 Results.push_back(Lo);
27879 Results.push_back(Hi);
27880 return;
27881 }
27882 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
27883 case X86ISD::FMINC:
27884 case X86ISD::FMIN:
27885 case X86ISD::FMAXC:
27886 case X86ISD::FMAX: {
27887 EVT VT = N->getValueType(0);
27888 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.")((VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX."
) ? static_cast<void> (0) : __assert_fail ("VT == MVT::v2f32 && \"Unexpected type (!= v2f32) on FMIN/FMAX.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27888, __PRETTY_FUNCTION__))
;
27889 SDValue UNDEF = DAG.getUNDEF(VT);
27890 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27891 N->getOperand(0), UNDEF);
27892 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
27893 N->getOperand(1), UNDEF);
27894 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
27895 return;
27896 }
27897 case ISD::SDIV:
27898 case ISD::UDIV:
27899 case ISD::SREM:
27900 case ISD::UREM: {
27901 EVT VT = N->getValueType(0);
27902 if (VT.isVector()) {
27903 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27904, __PRETTY_FUNCTION__))
27904 "Unexpected type action!")((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 27904, __PRETTY_FUNCTION__))
;
27905 // If this RHS is a constant splat vector we can widen this and let
27906 // division/remainder by constant optimize it.
27907 // TODO: Can we do something for non-splat?
27908 APInt SplatVal;
27909 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
27910 unsigned NumConcats = 128 / VT.getSizeInBits();
27911 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
27912 Ops0[0] = N->getOperand(0);
27913 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
27914 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
27915 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
27916 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
27917 Results.push_back(Res);
27918 }
27919 return;
27920 }
27921
27922 LLVM_FALLTHROUGH[[gnu::fallthrough]];
27923 }
27924 case ISD::SDIVREM:
27925 case ISD::UDIVREM: {
27926 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
27927 Results.push_back(V);
27928 return;
27929 }
27930 case ISD::TRUNCATE: {
27931 MVT VT = N->getSimpleValueType(0);
27932 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
27933 return;
27934
27935 // The generic legalizer will try to widen the input type to the same
27936 // number of elements as the widened result type. But this isn't always
27937 // the best thing so do some custom legalization to avoid some cases.
27938 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
27939 SDValue In = N->getOperand(0);
27940 EVT InVT = In.getValueType();
27941
27942 unsigned InBits = InVT.getSizeInBits();
27943 if (128 % InBits == 0) {
27944 // 128 bit and smaller inputs should avoid truncate all together and
27945 // just use a build_vector that will become a shuffle.
27946 // TODO: Widen and use a shuffle directly?
27947 MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
27948 EVT EltVT = VT.getVectorElementType();
27949 unsigned WidenNumElts = WidenVT.getVectorNumElements();
27950 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
27951 // Use the original element count so we don't do more scalar opts than
27952 // necessary.
27953 unsigned MinElts = VT.getVectorNumElements();
27954 for (unsigned i=0; i < MinElts; ++i) {
27955 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
27956 DAG.getIntPtrConstant(i, dl));
27957 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
27958 }
27959 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
27960 return;
27961 }
27962 // With AVX512 there are some cases that can use a target specific
27963 // truncate node to go from 256/512 to less than 128 with zeros in the
27964 // upper elements of the 128 bit result.
27965 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
27966 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
27967 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
27968 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
27969 return;
27970 }
27971 // There's one case we can widen to 512 bits and use VTRUNC.
27972 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
27973 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
27974 DAG.getUNDEF(MVT::v4i64));
27975 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
27976 return;
27977 }
27978 }
27979 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
27980 getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
27981 isTypeLegal(MVT::v4i64)) {
27982 // Input needs to be split and output needs to widened. Let's use two
27983 // VTRUNCs, and shuffle their results together into the wider type.
27984 SDValue Lo, Hi;
27985 std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
27986
27987 Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
27988 Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
27989 SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
27990 { 0, 1, 2, 3, 16, 17, 18, 19,
27991 -1, -1, -1, -1, -1, -1, -1, -1 });
27992 Results.push_back(Res);
27993 return;
27994 }
27995
27996 return;
27997 }
27998 case ISD::ANY_EXTEND:
27999 // Right now, only MVT::v8i8 has Custom action for an illegal type.
28000 // It's intended to custom handle the input type.
28001 assert(N->getValueType(0) == MVT::v8i8 &&((N->getValueType(0) == MVT::v8i8 && "Do not know how to legalize this Node"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v8i8 && \"Do not know how to legalize this Node\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28002, __PRETTY_FUNCTION__))
28002 "Do not know how to legalize this Node")((N->getValueType(0) == MVT::v8i8 && "Do not know how to legalize this Node"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v8i8 && \"Do not know how to legalize this Node\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28002, __PRETTY_FUNCTION__))
;
28003 return;
28004 case ISD::SIGN_EXTEND:
28005 case ISD::ZERO_EXTEND: {
28006 EVT VT = N->getValueType(0);
28007 SDValue In = N->getOperand(0);
28008 EVT InVT = In.getValueType();
28009 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
28010 (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
28011 assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&((getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28012, __PRETTY_FUNCTION__))
28012 "Unexpected type action!")((getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28012, __PRETTY_FUNCTION__))
;
28013 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode")((N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::SIGN_EXTEND && \"Unexpected opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28013, __PRETTY_FUNCTION__))
;
28014 // Custom split this so we can extend i8/i16->i32 invec. This is better
28015 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
28016 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
28017 // we allow the sra from the extend to i32 to be shared by the split.
28018 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
28019
28020 // Fill a vector with sign bits for each element.
28021 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
28022 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
28023
28024 // Create an unpackl and unpackh to interleave the sign bits then bitcast
28025 // to v2i64.
28026 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28027 {0, 4, 1, 5});
28028 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
28029 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
28030 {2, 6, 3, 7});
28031 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
28032
28033 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28034 Results.push_back(Res);
28035 return;
28036 }
28037
28038 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
28039 if (!InVT.is128BitVector()) {
28040 // Not a 128 bit vector, but maybe type legalization will promote
28041 // it to 128 bits.
28042 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
28043 return;
28044 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
28045 if (!InVT.is128BitVector())
28046 return;
28047
28048 // Promote the input to 128 bits. Type legalization will turn this into
28049 // zext_inreg/sext_inreg.
28050 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
28051 }
28052
28053 // Perform custom splitting instead of the two stage extend we would get
28054 // by default.
28055 EVT LoVT, HiVT;
28056 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
28057 assert(isTypeLegal(LoVT) && "Split VT not legal?")((isTypeLegal(LoVT) && "Split VT not legal?") ? static_cast
<void> (0) : __assert_fail ("isTypeLegal(LoVT) && \"Split VT not legal?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28057, __PRETTY_FUNCTION__))
;
28058
28059 SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);
28060
28061 // We need to shift the input over by half the number of elements.
28062 unsigned NumElts = InVT.getVectorNumElements();
28063 unsigned HalfNumElts = NumElts / 2;
28064 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
28065 for (unsigned i = 0; i != HalfNumElts; ++i)
28066 ShufMask[i] = i + HalfNumElts;
28067
28068 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
28069 Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);
28070
28071 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28072 Results.push_back(Res);
28073 }
28074 return;
28075 }
28076 case ISD::FP_TO_SINT:
28077 case ISD::FP_TO_UINT: {
28078 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
28079 EVT VT = N->getValueType(0);
28080 SDValue Src = N->getOperand(0);
28081 EVT SrcVT = Src.getValueType();
28082
28083 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
28084 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28085, __PRETTY_FUNCTION__))
28085 "Unexpected type action!")((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28085, __PRETTY_FUNCTION__))
;
28086
28087 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
28088 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
28089 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
28090 VT.getVectorNumElements());
28091 SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
28092
28093 // Preserve what we know about the size of the original result. Except
28094 // when the result is v2i32 since we can't widen the assert.
28095 if (PromoteVT != MVT::v2i32)
28096 Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext
28097 : ISD::AssertSext,
28098 dl, PromoteVT, Res,
28099 DAG.getValueType(VT.getVectorElementType()));
28100
28101 // Truncate back to the original width.
28102 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
28103
28104 // Now widen to 128 bits.
28105 unsigned NumConcats = 128 / VT.getSizeInBits();
28106 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
28107 VT.getVectorNumElements() * NumConcats);
28108 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
28109 ConcatOps[0] = Res;
28110 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
28111 Results.push_back(Res);
28112 return;
28113 }
28114
28115
28116 if (VT == MVT::v2i32) {
28117 assert((IsSigned || Subtarget.hasAVX512()) &&(((IsSigned || Subtarget.hasAVX512()) && "Can only handle signed conversion without AVX512"
) ? static_cast<void> (0) : __assert_fail ("(IsSigned || Subtarget.hasAVX512()) && \"Can only handle signed conversion without AVX512\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28118, __PRETTY_FUNCTION__))
28118 "Can only handle signed conversion without AVX512")(((IsSigned || Subtarget.hasAVX512()) && "Can only handle signed conversion without AVX512"
) ? static_cast<void> (0) : __assert_fail ("(IsSigned || Subtarget.hasAVX512()) && \"Can only handle signed conversion without AVX512\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28118, __PRETTY_FUNCTION__))
;
28119 assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28119, __PRETTY_FUNCTION__))
;
28120 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28121, __PRETTY_FUNCTION__))
28121 "Unexpected type action!")((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28121, __PRETTY_FUNCTION__))
;
28122 if (Src.getValueType() == MVT::v2f64) {
28123 if (!IsSigned && !Subtarget.hasVLX()) {
28124 // If we have VLX we can emit a target specific FP_TO_UINT node,
28125 // otherwise we can defer to the generic legalizer which will widen
28126 // the input as well. This will be further widened during op
28127 // legalization to v8i32<-v8f64.
28128 return;
28129 }
28130 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
28131 SDValue Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
28132 Results.push_back(Res);
28133 return;
28134 }
28135
28136 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
28137 // so early out here.
28138 return;
28139 }
28140
28141 assert(!VT.isVector() && "Vectors should have been handled above!")((!VT.isVector() && "Vectors should have been handled above!"
) ? static_cast<void> (0) : __assert_fail ("!VT.isVector() && \"Vectors should have been handled above!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28141, __PRETTY_FUNCTION__))
;
28142
28143 if (Subtarget.hasDQI() && VT == MVT::i64 &&
28144 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
28145 assert(!Subtarget.is64Bit() && "i64 should be legal")((!Subtarget.is64Bit() && "i64 should be legal") ? static_cast
<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"i64 should be legal\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28145, __PRETTY_FUNCTION__))
;
28146 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
28147 // Using a 256-bit input here to guarantee 128-bit input for f32 case.
28148 // TODO: Use 128-bit vectors for f64 case?
28149 // TODO: Use 128-bit vectors for f32 by using CVTTP2SI/CVTTP2UI.
28150 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
28151 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), NumElts);
28152
28153 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
28154 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
28155 DAG.getConstantFP(0.0, dl, VecInVT), Src,
28156 ZeroIdx);
28157 Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res);
28158 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
28159 Results.push_back(Res);
28160 return;
28161 }
28162
28163 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned))
28164 Results.push_back(V);
28165 return;
28166 }
28167 case ISD::SINT_TO_FP: {
28168 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!")((Subtarget.hasDQI() && Subtarget.hasVLX() &&
"Requires AVX512DQVL!") ? static_cast<void> (0) : __assert_fail
("Subtarget.hasDQI() && Subtarget.hasVLX() && \"Requires AVX512DQVL!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28168, __PRETTY_FUNCTION__))
;
28169 SDValue Src = N->getOperand(0);
28170 if (N->getValueType(0) != MVT::v2f32 || Src.getValueType() != MVT::v2i64)
28171 return;
28172 Results.push_back(DAG.getNode(X86ISD::CVTSI2P, dl, MVT::v4f32, Src));
28173 return;
28174 }
28175 case ISD::UINT_TO_FP: {
28176 assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28176, __PRETTY_FUNCTION__))
;
28177 EVT VT = N->getValueType(0);
28178 if (VT != MVT::v2f32)
28179 return;
28180 SDValue Src = N->getOperand(0);
28181 EVT SrcVT = Src.getValueType();
28182 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
28183 Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src));
28184 return;
28185 }
28186 if (SrcVT != MVT::v2i32)
28187 return;
28188 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
28189 SDValue VBias =
28190 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
28191 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
28192 DAG.getBitcast(MVT::v2i64, VBias));
28193 Or = DAG.getBitcast(MVT::v2f64, Or);
28194 // TODO: Are there any fast-math-flags to propagate here?
28195 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
28196 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
28197 return;
28198 }
28199 case ISD::FP_ROUND: {
28200 if (!isTypeLegal(N->getOperand(0).getValueType()))
28201 return;
28202 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
28203 Results.push_back(V);
28204 return;
28205 }
28206 case ISD::FP_EXTEND: {
28207 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
28208 // No other ValueType for FP_EXTEND should reach this point.
28209 assert(N->getValueType(0) == MVT::v2f32 &&((N->getValueType(0) == MVT::v2f32 && "Do not know how to legalize this Node"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v2f32 && \"Do not know how to legalize this Node\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28210, __PRETTY_FUNCTION__))
28210 "Do not know how to legalize this Node")((N->getValueType(0) == MVT::v2f32 && "Do not know how to legalize this Node"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v2f32 && \"Do not know how to legalize this Node\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28210, __PRETTY_FUNCTION__))
;
28211 return;
28212 }
28213 case ISD::INTRINSIC_W_CHAIN: {
28214 unsigned IntNo = N->getConstantOperandVal(1);
28215 switch (IntNo) {
28216 default : llvm_unreachable("Do not know how to custom type "::llvm::llvm_unreachable_internal("Do not know how to custom type "
"legalize this intrinsic operation!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28217)
28217 "legalize this intrinsic operation!")::llvm::llvm_unreachable_internal("Do not know how to custom type "
"legalize this intrinsic operation!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28217)
;
28218 case Intrinsic::x86_rdtsc:
28219 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
28220 Results);
28221 case Intrinsic::x86_rdtscp:
28222 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
28223 Results);
28224 case Intrinsic::x86_rdpmc:
28225 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
28226 Results);
28227 return;
28228 case Intrinsic::x86_xgetbv:
28229 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
28230 Results);
28231 return;
28232 }
28233 }
28234 case ISD::READCYCLECOUNTER: {
28235 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
28236 }
28237 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
28238 EVT T = N->getValueType(0);
28239 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair")(((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair"
) ? static_cast<void> (0) : __assert_fail ("(T == MVT::i64 || T == MVT::i128) && \"can only expand cmpxchg pair\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28239, __PRETTY_FUNCTION__))
;
28240 bool Regs64bit = T == MVT::i128;
28241 assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&(((!Regs64bit || Subtarget.hasCmpxchg16b()) && "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B"
) ? static_cast<void> (0) : __assert_fail ("(!Regs64bit || Subtarget.hasCmpxchg16b()) && \"64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28242, __PRETTY_FUNCTION__))
28242 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B")(((!Regs64bit || Subtarget.hasCmpxchg16b()) && "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B"
) ? static_cast<void> (0) : __assert_fail ("(!Regs64bit || Subtarget.hasCmpxchg16b()) && \"64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28242, __PRETTY_FUNCTION__))
;
28243 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
28244 SDValue cpInL, cpInH;
28245 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
28246 DAG.getConstant(0, dl, HalfT));
28247 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
28248 DAG.getConstant(1, dl, HalfT));
28249 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
28250 Regs64bit ? X86::RAX : X86::EAX,
28251 cpInL, SDValue());
28252 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
28253 Regs64bit ? X86::RDX : X86::EDX,
28254 cpInH, cpInL.getValue(1));
28255 SDValue swapInL, swapInH;
28256 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
28257 DAG.getConstant(0, dl, HalfT));
28258 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
28259 DAG.getConstant(1, dl, HalfT));
28260 swapInH =
28261 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
28262 swapInH, cpInH.getValue(1));
28263 // If the current function needs the base pointer, RBX,
28264 // we shouldn't use cmpxchg directly.
28265 // Indeed the lowering of that instruction will clobber
28266 // that register and since RBX will be a reserved register
28267 // the register allocator will not make sure its value will
28268 // be properly saved and restored around this live-range.
28269 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
28270 SDValue Result;
28271 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
28272 Register BasePtr = TRI->getBaseRegister();
28273 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
28274 if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
28275 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
28276 // ISel prefers the LCMPXCHG64 variant.
28277 // If that assert breaks, that means it is not the case anymore,
28278 // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
28279 // not just EBX. This is a matter of accepting i64 input for that
28280 // pseudo, and restoring into the register of the right wide
28281 // in expand pseudo. Everything else should just work.
28282 assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&((((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX
) && "Saving only half of the RBX") ? static_cast<
void> (0) : __assert_fail ("((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) && \"Saving only half of the RBX\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28283, __PRETTY_FUNCTION__))
28283 "Saving only half of the RBX")((((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX
) && "Saving only half of the RBX") ? static_cast<
void> (0) : __assert_fail ("((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) && \"Saving only half of the RBX\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28283, __PRETTY_FUNCTION__))
;
28284 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
28285 : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
28286 SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
28287 Regs64bit ? X86::RBX : X86::EBX,
28288 HalfT, swapInH.getValue(1));
28289 SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
28290 RBXSave,
28291 /*Glue*/ RBXSave.getValue(2)};
28292 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
28293 } else {
28294 unsigned Opcode =
28295 Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
28296 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
28297 Regs64bit ? X86::RBX : X86::EBX, swapInL,
28298 swapInH.getValue(1));
28299 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
28300 swapInL.getValue(1)};
28301 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
28302 }
28303 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
28304 Regs64bit ? X86::RAX : X86::EAX,
28305 HalfT, Result.getValue(1));
28306 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
28307 Regs64bit ? X86::RDX : X86::EDX,
28308 HalfT, cpOutL.getValue(2));
28309 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
28310
28311 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
28312 MVT::i32, cpOutH.getValue(2));
28313 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
28314 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
28315
28316 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
28317 Results.push_back(Success);
28318 Results.push_back(EFLAGS.getValue(1));
28319 return;
28320 }
28321 case ISD::ATOMIC_LOAD: {
28322 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!")((N->getValueType(0) == MVT::i64 && "Unexpected VT!"
) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::i64 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28322, __PRETTY_FUNCTION__))
;
28323 bool NoImplicitFloatOps =
28324 DAG.getMachineFunction().getFunction().hasFnAttribute(
28325 Attribute::NoImplicitFloat);
28326 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
28327 auto *Node = cast<AtomicSDNode>(N);
28328 if (Subtarget.hasSSE2()) {
28329 // Use a VZEXT_LOAD which will be selected as MOVQ. Then extract the
28330 // lower 64-bits.
28331 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
28332 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
28333 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
28334 MVT::i64, Node->getMemOperand());
28335 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
28336 DAG.getIntPtrConstant(0, dl));
28337 Results.push_back(Res);
28338 Results.push_back(Ld.getValue(1));
28339 return;
28340 }
28341 if (Subtarget.hasX87()) {
28342 // First load this into an 80-bit X87 register. This will put the whole
28343 // integer into the significand.
28344 // FIXME: Do we need to glue? See FIXME comment in BuildFILD.
28345 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other, MVT::Glue);
28346 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
28347 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD_FLAG,
28348 dl, Tys, Ops, MVT::i64,
28349 Node->getMemOperand());
28350 SDValue Chain = Result.getValue(1);
28351 SDValue InFlag = Result.getValue(2);
28352
28353 // Now store the X87 register to a stack temporary and convert to i64.
28354 // This store is not atomic and doesn't need to be.
28355 // FIXME: We don't need a stack temporary if the result of the load
28356 // is already being stored. We could just directly store there.
28357 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
28358 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28359 MachinePointerInfo MPI =
28360 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28361 SDValue StoreOps[] = { Chain, Result, StackPtr, InFlag };
28362 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, dl,
28363 DAG.getVTList(MVT::Other), StoreOps,
28364 MVT::i64, MPI, 0 /*Align*/,
28365 MachineMemOperand::MOStore);
28366
28367 // Finally load the value back from the stack temporary and return it.
28368 // This load is not atomic and doesn't need to be.
28369 // This load will be further type legalized.
28370 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
28371 Results.push_back(Result);
28372 Results.push_back(Result.getValue(1));
28373 return;
28374 }
28375 }
28376 // TODO: Use MOVLPS when SSE1 is available?
28377 // Delegate to generic TypeLegalization. Situations we can really handle
28378 // should have already been dealt with by AtomicExpandPass.cpp.
28379 break;
28380 }
28381 case ISD::ATOMIC_SWAP:
28382 case ISD::ATOMIC_LOAD_ADD:
28383 case ISD::ATOMIC_LOAD_SUB:
28384 case ISD::ATOMIC_LOAD_AND:
28385 case ISD::ATOMIC_LOAD_OR:
28386 case ISD::ATOMIC_LOAD_XOR:
28387 case ISD::ATOMIC_LOAD_NAND:
28388 case ISD::ATOMIC_LOAD_MIN:
28389 case ISD::ATOMIC_LOAD_MAX:
28390 case ISD::ATOMIC_LOAD_UMIN:
28391 case ISD::ATOMIC_LOAD_UMAX:
28392 // Delegate to generic TypeLegalization. Situations we can really handle
28393 // should have already been dealt with by AtomicExpandPass.cpp.
28394 break;
28395
28396 case ISD::BITCAST: {
28397 assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ?
static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28397, __PRETTY_FUNCTION__))
;
28398 EVT DstVT = N->getValueType(0);
28399 EVT SrcVT = N->getOperand(0).getValueType();
28400
28401 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
28402 // we can split using the k-register rather than memory.
28403 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
28404 assert(!Subtarget.is64Bit() && "Expected 32-bit mode")((!Subtarget.is64Bit() && "Expected 32-bit mode") ? static_cast
<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Expected 32-bit mode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28404, __PRETTY_FUNCTION__))
;
28405 SDValue Lo, Hi;
28406 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
28407 Lo = DAG.getBitcast(MVT::i32, Lo);
28408 Hi = DAG.getBitcast(MVT::i32, Hi);
28409 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
28410 Results.push_back(Res);
28411 return;
28412 }
28413
28414 // Custom splitting for BWI types when AVX512F is available but BWI isn't.
28415 if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) &&
28416 SrcVT.isVector() && isTypeLegal(SrcVT)) {
28417 SDValue Lo, Hi;
28418 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
28419 MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8;
28420 Lo = DAG.getBitcast(CastVT, Lo);
28421 Hi = DAG.getBitcast(CastVT, Hi);
28422 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi);
28423 Results.push_back(Res);
28424 return;
28425 }
28426
28427 if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
28428 assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&((getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28429, __PRETTY_FUNCTION__))
28429 "Unexpected type action!")((getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28429, __PRETTY_FUNCTION__))
;
28430 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
28431 SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, WideVT, N->getOperand(0));
28432 Results.push_back(Res);
28433 return;
28434 }
28435
28436 return;
28437 }
28438 case ISD::MGATHER: {
28439 EVT VT = N->getValueType(0);
28440 if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
28441 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
28442 auto *Gather = cast<MaskedGatherSDNode>(N);
28443 SDValue Index = Gather->getIndex();
28444 if (Index.getValueType() != MVT::v2i64)
28445 return;
28446 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28447, __PRETTY_FUNCTION__))
28447 "Unexpected type action!")((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28447, __PRETTY_FUNCTION__))
;
28448 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
28449 SDValue Mask = Gather->getMask();
28450 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type")((Mask.getValueType() == MVT::v2i1 && "Unexpected mask type"
) ? static_cast<void> (0) : __assert_fail ("Mask.getValueType() == MVT::v2i1 && \"Unexpected mask type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28450, __PRETTY_FUNCTION__))
;
28451 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
28452 Gather->getPassThru(),
28453 DAG.getUNDEF(VT));
28454 if (!Subtarget.hasVLX()) {
28455 // We need to widen the mask, but the instruction will only use 2
28456 // of its elements. So we can use undef.
28457 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
28458 DAG.getUNDEF(MVT::v2i1));
28459 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
28460 }
28461 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
28462 Gather->getBasePtr(), Index, Gather->getScale() };
28463 SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>(
28464 DAG.getVTList(WideVT, Mask.getValueType(), MVT::Other), Ops, dl,
28465 Gather->getMemoryVT(), Gather->getMemOperand());
28466 Results.push_back(Res);
28467 Results.push_back(Res.getValue(2));
28468 return;
28469 }
28470 return;
28471 }
28472 case ISD::LOAD: {
28473 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
28474 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
28475 // cast since type legalization will try to use an i64 load.
28476 MVT VT = N->getSimpleValueType(0);
28477 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT")((VT.isVector() && VT.getSizeInBits() == 64 &&
"Unexpected VT") ? static_cast<void> (0) : __assert_fail
("VT.isVector() && VT.getSizeInBits() == 64 && \"Unexpected VT\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28477, __PRETTY_FUNCTION__))
;
28478 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28479, __PRETTY_FUNCTION__))
28479 "Unexpected type action!")((getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
"Unexpected type action!") ? static_cast<void> (0) : __assert_fail
("getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && \"Unexpected type action!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28479, __PRETTY_FUNCTION__))
;
28480 if (!ISD::isNON_EXTLoad(N))
28481 return;
28482 auto *Ld = cast<LoadSDNode>(N);
28483 if (Subtarget.hasSSE2()) {
28484 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
28485 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
28486 Ld->getPointerInfo(), Ld->getAlignment(),
28487 Ld->getMemOperand()->getFlags());
28488 SDValue Chain = Res.getValue(1);
28489 MVT VecVT = MVT::getVectorVT(LdVT, 2);
28490 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
28491 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
28492 Res = DAG.getBitcast(WideVT, Res);
28493 Results.push_back(Res);
28494 Results.push_back(Chain);
28495 return;
28496 }
28497 assert(Subtarget.hasSSE1() && "Expected SSE")((Subtarget.hasSSE1() && "Expected SSE") ? static_cast
<void> (0) : __assert_fail ("Subtarget.hasSSE1() && \"Expected SSE\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28497, __PRETTY_FUNCTION__))
;
28498 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
28499 SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
28500 SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
28501 MVT::i64, Ld->getMemOperand());
28502 Results.push_back(Res);
28503 Results.push_back(Res.getValue(1));
28504 return;
28505 }
28506 }
28507}
28508
28509const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
28510 switch ((X86ISD::NodeType)Opcode) {
28511 case X86ISD::FIRST_NUMBER: break;
28512 case X86ISD::BSF: return "X86ISD::BSF";
28513 case X86ISD::BSR: return "X86ISD::BSR";
28514 case X86ISD::SHLD: return "X86ISD::SHLD";
28515 case X86ISD::SHRD: return "X86ISD::SHRD";
28516 case X86ISD::FAND: return "X86ISD::FAND";
28517 case X86ISD::FANDN: return "X86ISD::FANDN";
28518 case X86ISD::FOR: return "X86ISD::FOR";
28519 case X86ISD::FXOR: return "X86ISD::FXOR";
28520 case X86ISD::FILD: return "X86ISD::FILD";
28521 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
28522 case X86ISD::FIST: return "X86ISD::FIST";
28523 case X86ISD::FP_TO_INT_IN_MEM: return "X86ISD::FP_TO_INT_IN_MEM";
28524 case X86ISD::FLD: return "X86ISD::FLD";
28525 case X86ISD::FST: return "X86ISD::FST";
28526 case X86ISD::CALL: return "X86ISD::CALL";
28527 case X86ISD::BT: return "X86ISD::BT";
28528 case X86ISD::CMP: return "X86ISD::CMP";
28529 case X86ISD::COMI: return "X86ISD::COMI";
28530 case X86ISD::UCOMI: return "X86ISD::UCOMI";
28531 case X86ISD::CMPM: return "X86ISD::CMPM";
28532 case X86ISD::CMPM_SAE: return "X86ISD::CMPM_SAE";
28533 case X86ISD::SETCC: return "X86ISD::SETCC";
28534 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
28535 case X86ISD::FSETCC: return "X86ISD::FSETCC";
28536 case X86ISD::FSETCCM: return "X86ISD::FSETCCM";
28537 case X86ISD::FSETCCM_SAE: return "X86ISD::FSETCCM_SAE";
28538 case X86ISD::CMOV: return "X86ISD::CMOV";
28539 case X86ISD::BRCOND: return "X86ISD::BRCOND";
28540 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
28541 case X86ISD::IRET: return "X86ISD::IRET";
28542 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
28543 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
28544 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
28545 case X86ISD::Wrapper: return "X86ISD::Wrapper";
28546 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
28547 case X86ISD::MOVQ2DQ: return "X86ISD::MOVQ2DQ";
28548 case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q";
28549 case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W";
28550 case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D";
28551 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
28552 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
28553 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
28554 case X86ISD::PINSRB: return "X86ISD::PINSRB";
28555 case X86ISD::PINSRW: return "X86ISD::PINSRW";
28556 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
28557 case X86ISD::ANDNP: return "X86ISD::ANDNP";
28558 case X86ISD::BLENDI: return "X86ISD::BLENDI";
28559 case X86ISD::BLENDV: return "X86ISD::BLENDV";
28560 case X86ISD::HADD: return "X86ISD::HADD";
28561 case X86ISD::HSUB: return "X86ISD::HSUB";
28562 case X86ISD::FHADD: return "X86ISD::FHADD";
28563 case X86ISD::FHSUB: return "X86ISD::FHSUB";
28564 case X86ISD::CONFLICT: return "X86ISD::CONFLICT";
28565 case X86ISD::FMAX: return "X86ISD::FMAX";
28566 case X86ISD::FMAXS: return "X86ISD::FMAXS";
28567 case X86ISD::FMAX_SAE: return "X86ISD::FMAX_SAE";
28568 case X86ISD::FMAXS_SAE: return "X86ISD::FMAXS_SAE";
28569 case X86ISD::FMIN: return "X86ISD::FMIN";
28570 case X86ISD::FMINS: return "X86ISD::FMINS";
28571 case X86ISD::FMIN_SAE: return "X86ISD::FMIN_SAE";
28572 case X86ISD::FMINS_SAE: return "X86ISD::FMINS_SAE";
28573 case X86ISD::FMAXC: return "X86ISD::FMAXC";
28574 case X86ISD::FMINC: return "X86ISD::FMINC";
28575 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
28576 case X86ISD::FRCP: return "X86ISD::FRCP";
28577 case X86ISD::EXTRQI: return "X86ISD::EXTRQI";
28578 case X86ISD::INSERTQI: return "X86ISD::INSERTQI";
28579 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
28580 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
28581 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
28582 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
28583 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
28584 case X86ISD::EH_SJLJ_SETUP_DISPATCH:
28585 return "X86ISD::EH_SJLJ_SETUP_DISPATCH";
28586 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
28587 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
28588 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
28589 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
28590 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
28591 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
28592 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
28593 case X86ISD::LCMPXCHG8_SAVE_EBX_DAG:
28594 return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG";
28595 case X86ISD::LCMPXCHG16_SAVE_RBX_DAG:
28596 return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG";
28597 case X86ISD::LADD: return "X86ISD::LADD";
28598 case X86ISD::LSUB: return "X86ISD::LSUB";
28599 case X86ISD::LOR: return "X86ISD::LOR";
28600 case X86ISD::LXOR: return "X86ISD::LXOR";
28601 case X86ISD::LAND: return "X86ISD::LAND";
28602 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
28603 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
28604 case X86ISD::VEXTRACT_STORE: return "X86ISD::VEXTRACT_STORE";
28605 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
28606 case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS";
28607 case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS";
28608 case X86ISD::VMTRUNC: return "X86ISD::VMTRUNC";
28609 case X86ISD::VMTRUNCS: return "X86ISD::VMTRUNCS";
28610 case X86ISD::VMTRUNCUS: return "X86ISD::VMTRUNCUS";
28611 case X86ISD::VTRUNCSTORES: return "X86ISD::VTRUNCSTORES";
28612 case X86ISD::VTRUNCSTOREUS: return "X86ISD::VTRUNCSTOREUS";
28613 case X86ISD::VMTRUNCSTORES: return "X86ISD::VMTRUNCSTORES";
28614 case X86ISD::VMTRUNCSTOREUS: return "X86ISD::VMTRUNCSTOREUS";
28615 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
28616 case X86ISD::VFPEXT_SAE: return "X86ISD::VFPEXT_SAE";
28617 case X86ISD::VFPEXTS: return "X86ISD::VFPEXTS";
28618 case X86ISD::VFPEXTS_SAE: return "X86ISD::VFPEXTS_SAE";
28619 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
28620 case X86ISD::VMFPROUND: return "X86ISD::VMFPROUND";
28621 case X86ISD::VFPROUND_RND: return "X86ISD::VFPROUND_RND";
28622 case X86ISD::VFPROUNDS: return "X86ISD::VFPROUNDS";
28623 case X86ISD::VFPROUNDS_RND: return "X86ISD::VFPROUNDS_RND";
28624 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
28625 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
28626 case X86ISD::VSHL: return "X86ISD::VSHL";
28627 case X86ISD::VSRL: return "X86ISD::VSRL";
28628 case X86ISD::VSRA: return "X86ISD::VSRA";
28629 case X86ISD::VSHLI: return "X86ISD::VSHLI";
28630 case X86ISD::VSRLI: return "X86ISD::VSRLI";
28631 case X86ISD::VSRAI: return "X86ISD::VSRAI";
28632 case X86ISD::VSHLV: return "X86ISD::VSHLV";
28633 case X86ISD::VSRLV: return "X86ISD::VSRLV";
28634 case X86ISD::VSRAV: return "X86ISD::VSRAV";
28635 case X86ISD::VROTLI: return "X86ISD::VROTLI";
28636 case X86ISD::VROTRI: return "X86ISD::VROTRI";
28637 case X86ISD::VPPERM: return "X86ISD::VPPERM";
28638 case X86ISD::CMPP: return "X86ISD::CMPP";
28639 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
28640 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
28641 case X86ISD::PHMINPOS: return "X86ISD::PHMINPOS";
28642 case X86ISD::ADD: return "X86ISD::ADD";
28643 case X86ISD::SUB: return "X86ISD::SUB";
28644 case X86ISD::ADC: return "X86ISD::ADC";
28645 case X86ISD::SBB: return "X86ISD::SBB";
28646 case X86ISD::SMUL: return "X86ISD::SMUL";
28647 case X86ISD::UMUL: return "X86ISD::UMUL";
28648 case X86ISD::OR: return "X86ISD::OR";
28649 case X86ISD::XOR: return "X86ISD::XOR";
28650 case X86ISD::AND: return "X86ISD::AND";
28651 case X86ISD::BEXTR: return "X86ISD::BEXTR";
28652 case X86ISD::BZHI: return "X86ISD::BZHI";
28653 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
28654 case X86ISD::MOVMSK: return "X86ISD::MOVMSK";
28655 case X86ISD::PTEST: return "X86ISD::PTEST";
28656 case X86ISD::TESTP: return "X86ISD::TESTP";
28657 case X86ISD::KORTEST: return "X86ISD::KORTEST";
28658 case X86ISD::KTEST: return "X86ISD::KTEST";
28659 case X86ISD::KADD: return "X86ISD::KADD";
28660 case X86ISD::KSHIFTL: return "X86ISD::KSHIFTL";
28661 case X86ISD::KSHIFTR: return "X86ISD::KSHIFTR";
28662 case X86ISD::PACKSS: return "X86ISD::PACKSS";
28663 case X86ISD::PACKUS: return "X86ISD::PACKUS";
28664 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
28665 case X86ISD::VALIGN: return "X86ISD::VALIGN";
28666 case X86ISD::VSHLD: return "X86ISD::VSHLD";
28667 case X86ISD::VSHRD: return "X86ISD::VSHRD";
28668 case X86ISD::VSHLDV: return "X86ISD::VSHLDV";
28669 case X86ISD::VSHRDV: return "X86ISD::VSHRDV";
28670 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
28671 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
28672 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
28673 case X86ISD::SHUFP: return "X86ISD::SHUFP";
28674 case X86ISD::SHUF128: return "X86ISD::SHUF128";
28675 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
28676 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
28677 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
28678 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
28679 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
28680 case X86ISD::MOVSD: return "X86ISD::MOVSD";
28681 case X86ISD::MOVSS: return "X86ISD::MOVSS";
28682 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
28683 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
28684 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
28685 case X86ISD::VBROADCAST_LOAD: return "X86ISD::VBROADCAST_LOAD";
28686 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
28687 case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST";
28688 case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV";
28689 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
28690 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
28691 case X86ISD::VPERMV: return "X86ISD::VPERMV";
28692 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
28693 case X86ISD::VPERMI: return "X86ISD::VPERMI";
28694 case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG";
28695 case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM";
28696 case X86ISD::VFIXUPIMM_SAE: return "X86ISD::VFIXUPIMM_SAE";
28697 case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS";
28698 case X86ISD::VFIXUPIMMS_SAE: return "X86ISD::VFIXUPIMMS_SAE";
28699 case X86ISD::VRANGE: return "X86ISD::VRANGE";
28700 case X86ISD::VRANGE_SAE: return "X86ISD::VRANGE_SAE";
28701 case X86ISD::VRANGES: return "X86ISD::VRANGES";
28702 case X86ISD::VRANGES_SAE: return "X86ISD::VRANGES_SAE";
28703 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
28704 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
28705 case X86ISD::PSADBW: return "X86ISD::PSADBW";
28706 case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW";
28707 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
28708 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
28709 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
28710 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
28711 case X86ISD::MFENCE: return "X86ISD::MFENCE";
28712 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
28713 case X86ISD::SAHF: return "X86ISD::SAHF";
28714 case X86ISD::RDRAND: return "X86ISD::RDRAND";
28715 case X86ISD::RDSEED: return "X86ISD::RDSEED";
28716 case X86ISD::RDPKRU: return "X86ISD::RDPKRU";
28717 case X86ISD::WRPKRU: return "X86ISD::WRPKRU";
28718 case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW";
28719 case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD";
28720 case X86ISD::VPSHA: return "X86ISD::VPSHA";
28721 case X86ISD::VPSHL: return "X86ISD::VPSHL";
28722 case X86ISD::VPCOM: return "X86ISD::VPCOM";
28723 case X86ISD::VPCOMU: return "X86ISD::VPCOMU";
28724 case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2";
28725 case X86ISD::FMSUB: return "X86ISD::FMSUB";
28726 case X86ISD::FNMADD: return "X86ISD::FNMADD";
28727 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
28728 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
28729 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
28730 case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND";
28731 case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND";
28732 case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND";
28733 case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND";
28734 case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND";
28735 case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND";
28736 case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H";
28737 case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L";
28738 case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE";
28739 case X86ISD::VRNDSCALE_SAE: return "X86ISD::VRNDSCALE_SAE";
28740 case X86ISD::VRNDSCALES: return "X86ISD::VRNDSCALES";
28741 case X86ISD::VRNDSCALES_SAE: return "X86ISD::VRNDSCALES_SAE";
28742 case X86ISD::VREDUCE: return "X86ISD::VREDUCE";
28743 case X86ISD::VREDUCE_SAE: return "X86ISD::VREDUCE_SAE";
28744 case X86ISD::VREDUCES: return "X86ISD::VREDUCES";
28745 case X86ISD::VREDUCES_SAE: return "X86ISD::VREDUCES_SAE";
28746 case X86ISD::VGETMANT: return "X86ISD::VGETMANT";
28747 case X86ISD::VGETMANT_SAE: return "X86ISD::VGETMANT_SAE";
28748 case X86ISD::VGETMANTS: return "X86ISD::VGETMANTS";
28749 case X86ISD::VGETMANTS_SAE: return "X86ISD::VGETMANTS_SAE";
28750 case X86ISD::PCMPESTR: return "X86ISD::PCMPESTR";
28751 case X86ISD::PCMPISTR: return "X86ISD::PCMPISTR";
28752 case X86ISD::XTEST: return "X86ISD::XTEST";
28753 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
28754 case X86ISD::EXPAND: return "X86ISD::EXPAND";
28755 case X86ISD::SELECTS: return "X86ISD::SELECTS";
28756 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
28757 case X86ISD::RCP14: return "X86ISD::RCP14";
28758 case X86ISD::RCP14S: return "X86ISD::RCP14S";
28759 case X86ISD::RCP28: return "X86ISD::RCP28";
28760 case X86ISD::RCP28_SAE: return "X86ISD::RCP28_SAE";
28761 case X86ISD::RCP28S: return "X86ISD::RCP28S";
28762 case X86ISD::RCP28S_SAE: return "X86ISD::RCP28S_SAE";
28763 case X86ISD::EXP2: return "X86ISD::EXP2";
28764 case X86ISD::EXP2_SAE: return "X86ISD::EXP2_SAE";
28765 case X86ISD::RSQRT14: return "X86ISD::RSQRT14";
28766 case X86ISD::RSQRT14S: return "X86ISD::RSQRT14S";
28767 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
28768 case X86ISD::RSQRT28_SAE: return "X86ISD::RSQRT28_SAE";
28769 case X86ISD::RSQRT28S: return "X86ISD::RSQRT28S";
28770 case X86ISD::RSQRT28S_SAE: return "X86ISD::RSQRT28S_SAE";
28771 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
28772 case X86ISD::FADDS: return "X86ISD::FADDS";
28773 case X86ISD::FADDS_RND: return "X86ISD::FADDS_RND";
28774 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
28775 case X86ISD::FSUBS: return "X86ISD::FSUBS";
28776 case X86ISD::FSUBS_RND: return "X86ISD::FSUBS_RND";
28777 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
28778 case X86ISD::FMULS: return "X86ISD::FMULS";
28779 case X86ISD::FMULS_RND: return "X86ISD::FMULS_RND";
28780 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
28781 case X86ISD::FDIVS: return "X86ISD::FDIVS";
28782 case X86ISD::FDIVS_RND: return "X86ISD::FDIVS_RND";
28783 case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND";
28784 case X86ISD::FSQRTS: return "X86ISD::FSQRTS";
28785 case X86ISD::FSQRTS_RND: return "X86ISD::FSQRTS_RND";
28786 case X86ISD::FGETEXP: return "X86ISD::FGETEXP";
28787 case X86ISD::FGETEXP_SAE: return "X86ISD::FGETEXP_SAE";
28788 case X86ISD::FGETEXPS: return "X86ISD::FGETEXPS";
28789 case X86ISD::FGETEXPS_SAE: return "X86ISD::FGETEXPS_SAE";
28790 case X86ISD::SCALEF: return "X86ISD::SCALEF";
28791 case X86ISD::SCALEF_RND: return "X86ISD::SCALEF_RND";
28792 case X86ISD::SCALEFS: return "X86ISD::SCALEFS";
28793 case X86ISD::SCALEFS_RND: return "X86ISD::SCALEFS_RND";
28794 case X86ISD::AVG: return "X86ISD::AVG";
28795 case X86ISD::MULHRS: return "X86ISD::MULHRS";
28796 case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND";
28797 case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND";
28798 case X86ISD::CVTTP2SI: return "X86ISD::CVTTP2SI";
28799 case X86ISD::CVTTP2UI: return "X86ISD::CVTTP2UI";
28800 case X86ISD::MCVTTP2SI: return "X86ISD::MCVTTP2SI";
28801 case X86ISD::MCVTTP2UI: return "X86ISD::MCVTTP2UI";
28802 case X86ISD::CVTTP2SI_SAE: return "X86ISD::CVTTP2SI_SAE";
28803 case X86ISD::CVTTP2UI_SAE: return "X86ISD::CVTTP2UI_SAE";
28804 case X86ISD::CVTTS2SI: return "X86ISD::CVTTS2SI";
28805 case X86ISD::CVTTS2UI: return "X86ISD::CVTTS2UI";
28806 case X86ISD::CVTTS2SI_SAE: return "X86ISD::CVTTS2SI_SAE";
28807 case X86ISD::CVTTS2UI_SAE: return "X86ISD::CVTTS2UI_SAE";
28808 case X86ISD::CVTSI2P: return "X86ISD::CVTSI2P";
28809 case X86ISD::CVTUI2P: return "X86ISD::CVTUI2P";
28810 case X86ISD::MCVTSI2P: return "X86ISD::MCVTSI2P";
28811 case X86ISD::MCVTUI2P: return "X86ISD::MCVTUI2P";
28812 case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS";
28813 case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS";
28814 case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT";
28815 case X86ISD::SCALAR_SINT_TO_FP: return "X86ISD::SCALAR_SINT_TO_FP";
28816 case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND";
28817 case X86ISD::SCALAR_UINT_TO_FP: return "X86ISD::SCALAR_UINT_TO_FP";
28818 case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND";
28819 case X86ISD::CVTPS2PH: return "X86ISD::CVTPS2PH";
28820 case X86ISD::MCVTPS2PH: return "X86ISD::MCVTPS2PH";
28821 case X86ISD::CVTPH2PS: return "X86ISD::CVTPH2PS";
28822 case X86ISD::CVTPH2PS_SAE: return "X86ISD::CVTPH2PS_SAE";
28823 case X86ISD::CVTP2SI: return "X86ISD::CVTP2SI";
28824 case X86ISD::CVTP2UI: return "X86ISD::CVTP2UI";
28825 case X86ISD::MCVTP2SI: return "X86ISD::MCVTP2SI";
28826 case X86ISD::MCVTP2UI: return "X86ISD::MCVTP2UI";
28827 case X86ISD::CVTP2SI_RND: return "X86ISD::CVTP2SI_RND";
28828 case X86ISD::CVTP2UI_RND: return "X86ISD::CVTP2UI_RND";
28829 case X86ISD::CVTS2SI: return "X86ISD::CVTS2SI";
28830 case X86ISD::CVTS2UI: return "X86ISD::CVTS2UI";
28831 case X86ISD::CVTS2SI_RND: return "X86ISD::CVTS2SI_RND";
28832 case X86ISD::CVTS2UI_RND: return "X86ISD::CVTS2UI_RND";
28833 case X86ISD::CVTNE2PS2BF16: return "X86ISD::CVTNE2PS2BF16";
28834 case X86ISD::CVTNEPS2BF16: return "X86ISD::CVTNEPS2BF16";
28835 case X86ISD::MCVTNEPS2BF16: return "X86ISD::MCVTNEPS2BF16";
28836 case X86ISD::DPBF16PS: return "X86ISD::DPBF16PS";
28837 case X86ISD::LWPINS: return "X86ISD::LWPINS";
28838 case X86ISD::MGATHER: return "X86ISD::MGATHER";
28839 case X86ISD::MSCATTER: return "X86ISD::MSCATTER";
28840 case X86ISD::VPDPBUSD: return "X86ISD::VPDPBUSD";
28841 case X86ISD::VPDPBUSDS: return "X86ISD::VPDPBUSDS";
28842 case X86ISD::VPDPWSSD: return "X86ISD::VPDPWSSD";
28843 case X86ISD::VPDPWSSDS: return "X86ISD::VPDPWSSDS";
28844 case X86ISD::VPSHUFBITQMB: return "X86ISD::VPSHUFBITQMB";
28845 case X86ISD::GF2P8MULB: return "X86ISD::GF2P8MULB";
28846 case X86ISD::GF2P8AFFINEQB: return "X86ISD::GF2P8AFFINEQB";
28847 case X86ISD::GF2P8AFFINEINVQB: return "X86ISD::GF2P8AFFINEINVQB";
28848 case X86ISD::NT_CALL: return "X86ISD::NT_CALL";
28849 case X86ISD::NT_BRIND: return "X86ISD::NT_BRIND";
28850 case X86ISD::UMWAIT: return "X86ISD::UMWAIT";
28851 case X86ISD::TPAUSE: return "X86ISD::TPAUSE";
28852 case X86ISD::ENQCMD: return "X86ISD:ENQCMD";
28853 case X86ISD::ENQCMDS: return "X86ISD:ENQCMDS";
28854 case X86ISD::VP2INTERSECT: return "X86ISD::VP2INTERSECT";
28855 }
28856 return nullptr;
28857}
28858
28859/// Return true if the addressing mode represented by AM is legal for this
28860/// target, for a load/store of the specified type.
28861bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
28862 const AddrMode &AM, Type *Ty,
28863 unsigned AS,
28864 Instruction *I) const {
28865 // X86 supports extremely general addressing modes.
28866 CodeModel::Model M = getTargetMachine().getCodeModel();
28867
28868 // X86 allows a sign-extended 32-bit immediate field as a displacement.
28869 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
28870 return false;
28871
28872 if (AM.BaseGV) {
28873 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
28874
28875 // If a reference to this global requires an extra load, we can't fold it.
28876 if (isGlobalStubReference(GVFlags))
28877 return false;
28878
28879 // If BaseGV requires a register for the PIC base, we cannot also have a
28880 // BaseReg specified.
28881 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
28882 return false;
28883
28884 // If lower 4G is not available, then we must use rip-relative addressing.
28885 if ((M != CodeModel::Small || isPositionIndependent()) &&
28886 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
28887 return false;
28888 }
28889
28890 switch (AM.Scale) {
28891 case 0:
28892 case 1:
28893 case 2:
28894 case 4:
28895 case 8:
28896 // These scales always work.
28897 break;
28898 case 3:
28899 case 5:
28900 case 9:
28901 // These scales are formed with basereg+scalereg. Only accept if there is
28902 // no basereg yet.
28903 if (AM.HasBaseReg)
28904 return false;
28905 break;
28906 default: // Other stuff never works.
28907 return false;
28908 }
28909
28910 return true;
28911}
28912
28913bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
28914 unsigned Bits = Ty->getScalarSizeInBits();
28915
28916 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
28917 // particularly cheaper than those without.
28918 if (Bits == 8)
28919 return false;
28920
28921 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
28922 if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 &&
28923 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
28924 return false;
28925
28926 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
28927 // shifts just as cheap as scalar ones.
28928 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
28929 return false;
28930
28931 // AVX512BW has shifts such as vpsllvw.
28932 if (Subtarget.hasBWI() && Bits == 16)
28933 return false;
28934
28935 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
28936 // fully general vector.
28937 return true;
28938}
28939
28940bool X86TargetLowering::isBinOp(unsigned Opcode) const {
28941 switch (Opcode) {
28942 // These are non-commutative binops.
28943 // TODO: Add more X86ISD opcodes once we have test coverage.
28944 case X86ISD::ANDNP:
28945 case X86ISD::PCMPGT:
28946 case X86ISD::FMAX:
28947 case X86ISD::FMIN:
28948 case X86ISD::FANDN:
28949 return true;
28950 }
28951
28952 return TargetLoweringBase::isBinOp(Opcode);
28953}
28954
28955bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
28956 switch (Opcode) {
28957 // TODO: Add more X86ISD opcodes once we have test coverage.
28958 case X86ISD::PCMPEQ:
28959 case X86ISD::PMULDQ:
28960 case X86ISD::PMULUDQ:
28961 case X86ISD::FMAXC:
28962 case X86ISD::FMINC:
28963 case X86ISD::FAND:
28964 case X86ISD::FOR:
28965 case X86ISD::FXOR:
28966 return true;
28967 }
28968
28969 return TargetLoweringBase::isCommutativeBinOp(Opcode);
28970}
28971
28972bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
28973 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
28974 return false;
28975 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
28976 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
28977 return NumBits1 > NumBits2;
28978}
28979
28980bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
28981 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
28982 return false;
28983
28984 if (!isTypeLegal(EVT::getEVT(Ty1)))
28985 return false;
28986
28987 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop")((Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop"
) ? static_cast<void> (0) : __assert_fail ("Ty1->getPrimitiveSizeInBits() <= 64 && \"i128 is probably not a noop\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 28987, __PRETTY_FUNCTION__))
;
28988
28989 // Assuming the caller doesn't have a zeroext or signext return parameter,
28990 // truncation all the way down to i1 is valid.
28991 return true;
28992}
28993
28994bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
28995 return isInt<32>(Imm);
28996}
28997
28998bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
28999 // Can also use sub to handle negated immediates.
29000 return isInt<32>(Imm);
29001}
29002
29003bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
29004 return isInt<32>(Imm);
29005}
29006
29007bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
29008 if (!VT1.isInteger() || !VT2.isInteger())
29009 return false;
29010 unsigned NumBits1 = VT1.getSizeInBits();
29011 unsigned NumBits2 = VT2.getSizeInBits();
29012 return NumBits1 > NumBits2;
29013}
29014
29015bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
29016 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
29017 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
29018}
29019
29020bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
29021 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
29022 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
29023}
29024
29025bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
29026 EVT VT1 = Val.getValueType();
29027 if (isZExtFree(VT1, VT2))
29028 return true;
29029
29030 if (Val.getOpcode() != ISD::LOAD)
29031 return false;
29032
29033 if (!VT1.isSimple() || !VT1.isInteger() ||
29034 !VT2.isSimple() || !VT2.isInteger())
29035 return false;
29036
29037 switch (VT1.getSimpleVT().SimpleTy) {
29038 default: break;
29039 case MVT::i8:
29040 case MVT::i16:
29041 case MVT::i32:
29042 // X86 has 8, 16, and 32-bit zero-extending loads.
29043 return true;
29044 }
29045
29046 return false;
29047}
29048
29049bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
29050 EVT SrcVT = ExtVal.getOperand(0).getValueType();
29051
29052 // There is no extending load for vXi1.
29053 if (SrcVT.getScalarType() == MVT::i1)
29054 return false;
29055
29056 return true;
29057}
29058
29059bool
29060X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
29061 if (!Subtarget.hasAnyFMA())
29062 return false;
29063
29064 VT = VT.getScalarType();
29065
29066 if (!VT.isSimple())
29067 return false;
29068
29069 switch (VT.getSimpleVT().SimpleTy) {
29070 case MVT::f32:
29071 case MVT::f64:
29072 return true;
29073 default:
29074 break;
29075 }
29076
29077 return false;
29078}
29079
29080bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
29081 // i16 instructions are longer (0x66 prefix) and potentially slower.
29082 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
29083}
29084
29085/// Targets can use this to indicate that they only support *some*
29086/// VECTOR_SHUFFLE operations, those with specific masks.
29087/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
29088/// are assumed to be legal.
29089bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
29090 if (!VT.isSimple())
29091 return false;
29092
29093 // Not for i1 vectors
29094 if (VT.getSimpleVT().getScalarType() == MVT::i1)
29095 return false;
29096
29097 // Very little shuffling can be done for 64-bit vectors right now.
29098 if (VT.getSimpleVT().getSizeInBits() == 64)
29099 return false;
29100
29101 // We only care that the types being shuffled are legal. The lowering can
29102 // handle any possible shuffle mask that results.
29103 return isTypeLegal(VT.getSimpleVT());
29104}
29105
29106bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
29107 EVT VT) const {
29108 // Don't convert an 'and' into a shuffle that we don't directly support.
29109 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
29110 if (!Subtarget.hasAVX2())
29111 if (VT == MVT::v32i8 || VT == MVT::v16i16)
29112 return false;
29113
29114 // Just delegate to the generic legality, clear masks aren't special.
29115 return isShuffleMaskLegal(Mask, VT);
29116}
29117
29118bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
29119 // If the subtarget is using retpolines, we need to not generate jump tables.
29120 if (Subtarget.useRetpolineIndirectBranches())
29121 return false;
29122
29123 // Otherwise, fallback on the generic logic.
29124 return TargetLowering::areJTsAllowed(Fn);
29125}
29126
29127//===----------------------------------------------------------------------===//
29128// X86 Scheduler Hooks
29129//===----------------------------------------------------------------------===//
29130
29131/// Utility function to emit xbegin specifying the start of an RTM region.
29132static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
29133 const TargetInstrInfo *TII) {
29134 DebugLoc DL = MI.getDebugLoc();
29135
29136 const BasicBlock *BB = MBB->getBasicBlock();
29137 MachineFunction::iterator I = ++MBB->getIterator();
29138
29139 // For the v = xbegin(), we generate
29140 //
29141 // thisMBB:
29142 // xbegin sinkMBB
29143 //
29144 // mainMBB:
29145 // s0 = -1
29146 //
29147 // fallBB:
29148 // eax = # XABORT_DEF
29149 // s1 = eax
29150 //
29151 // sinkMBB:
29152 // v = phi(s0/mainBB, s1/fallBB)
29153
29154 MachineBasicBlock *thisMBB = MBB;
29155 MachineFunction *MF = MBB->getParent();
29156 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
29157 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
29158 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
29159 MF->insert(I, mainMBB);
29160 MF->insert(I, fallMBB);
29161 MF->insert(I, sinkMBB);
29162
29163 // Transfer the remainder of BB and its successor edges to sinkMBB.
29164 sinkMBB->splice(sinkMBB->begin(), MBB,
29165 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
29166 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
29167
29168 MachineRegisterInfo &MRI = MF->getRegInfo();
29169 Register DstReg = MI.getOperand(0).getReg();
29170 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
29171 Register mainDstReg = MRI.createVirtualRegister(RC);
29172 Register fallDstReg = MRI.createVirtualRegister(RC);
29173
29174 // thisMBB:
29175 // xbegin fallMBB
29176 // # fallthrough to mainMBB
29177 // # abortion to fallMBB
29178 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
29179 thisMBB->addSuccessor(mainMBB);
29180 thisMBB->addSuccessor(fallMBB);
29181
29182 // mainMBB:
29183 // mainDstReg := -1
29184 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
29185 BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
29186 mainMBB->addSuccessor(sinkMBB);
29187
29188 // fallMBB:
29189 // ; pseudo instruction to model hardware's definition from XABORT
29190 // EAX := XABORT_DEF
29191 // fallDstReg := EAX
29192 BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
29193 BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
29194 .addReg(X86::EAX);
29195 fallMBB->addSuccessor(sinkMBB);
29196
29197 // sinkMBB:
29198 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
29199 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
29200 .addReg(mainDstReg).addMBB(mainMBB)
29201 .addReg(fallDstReg).addMBB(fallMBB);
29202
29203 MI.eraseFromParent();
29204 return sinkMBB;
29205}
29206
29207
29208
29209MachineBasicBlock *
29210X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
29211 MachineBasicBlock *MBB) const {
29212 // Emit va_arg instruction on X86-64.
29213
29214 // Operands to this pseudo-instruction:
29215 // 0 ) Output : destination address (reg)
29216 // 1-5) Input : va_list address (addr, i64mem)
29217 // 6 ) ArgSize : Size (in bytes) of vararg type
29218 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
29219 // 8 ) Align : Alignment of type
29220 // 9 ) EFLAGS (implicit-def)
29221
29222 assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!")((MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!"
) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() == 10 && \"VAARG_64 should have 10 operands!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 29222, __PRETTY_FUNCTION__))
;
29223 static_assert(X86::AddrNumOperands == 5,
29224 "VAARG_64 assumes 5 address operands");
29225
29226 Register DestReg = MI.getOperand(0).getReg();
29227 MachineOperand &Base = MI.getOperand(1);
29228 MachineOperand &Scale = MI.getOperand(2);
29229 MachineOperand &Index = MI.getOperand(3);
29230 MachineOperand &Disp = MI.getOperand(4);
29231 MachineOperand &Segment = MI.getOperand(5);
29232 unsigned ArgSize = MI.getOperand(6).getImm();
29233 unsigned ArgMode = MI.getOperand(7).getImm();
29234 unsigned Align = MI.getOperand(8).getImm();
29235
29236 MachineFunction *MF = MBB->getParent();
29237
29238 // Memory Reference
29239 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand")((MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand"
) ? static_cast<void> (0) : __assert_fail ("MI.hasOneMemOperand() && \"Expected VAARG_64 to have one memoperand\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 29239, __PRETTY_FUNCTION__))
;
29240
29241 MachineMemOperand *OldMMO = MI.memoperands().front();
29242
29243 // Clone the MMO into two separate MMOs for loading and storing
29244 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
29245 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
29246 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
29247 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
29248
29249 // Machine Information
29250 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29251 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
29252 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
29253 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
29254 DebugLoc DL = MI.getDebugLoc();
29255
29256 // struct va_list {
29257 // i32 gp_offset
29258 // i32 fp_offset
29259 // i64 overflow_area (address)
29260 // i64 reg_save_area (address)
29261 // }
29262 // sizeof(va_list) = 24
29263 // alignment(va_list) = 8
29264
29265 unsigned TotalNumIntRegs = 6;
29266 unsigned TotalNumXMMRegs = 8;
29267 bool UseGPOffset = (ArgMode == 1);
29268 bool UseFPOffset = (ArgMode == 2);
29269 unsigned MaxOffset = TotalNumIntRegs * 8 +
29270 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
29271
29272 /* Align ArgSize to a multiple of 8 */
29273 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
29274 bool NeedsAlign = (Align > 8);
29275
29276 MachineBasicBlock *thisMBB = MBB;
29277 MachineBasicBlock *overflowMBB;
29278 MachineBasicBlock *offsetMBB;
29279 MachineBasicBlock *endMBB;
29280
29281 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
29282 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
29283 unsigned OffsetReg = 0;
29284
29285 if (!UseGPOffset && !UseFPOffset) {
29286 // If we only pull from the overflow region, we don't create a branch.
29287 // We don't need to alter control flow.
29288 OffsetDestReg = 0; // unused
29289 OverflowDestReg = DestReg;
29290
29291 offsetMBB = nullptr;
29292 overflowMBB = thisMBB;
29293 endMBB = thisMBB;
29294 } else {
29295 // First emit code to check if gp_offset (or fp_offset) is below the bound.
29296 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
29297 // If not, pull from overflow_area. (branch to overflowMBB)
29298 //
29299 // thisMBB
29300 // | .
29301 // | .
29302 // offsetMBB overflowMBB
29303 // | .
29304 // | .
29305 // endMBB
29306
29307 // Registers for the PHI in endMBB
29308 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
29309 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
29310
29311 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
29312 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29313 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29314 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
29315
29316 MachineFunction::iterator MBBIter = ++MBB->getIterator();
29317
29318 // Insert the new basic blocks
29319 MF->insert(MBBIter, offsetMBB);
29320 MF->insert(MBBIter, overflowMBB);
29321 MF->insert(MBBIter, endMBB);
29322
29323 // Transfer the remainder of MBB and its successor edges to endMBB.
29324 endMBB->splice(endMBB->begin(), thisMBB,
29325 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
29326 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
29327
29328 // Make offsetMBB and overflowMBB successors of thisMBB
29329 thisMBB->addSuccessor(offsetMBB);
29330 thisMBB->addSuccessor(overflowMBB);
29331
29332 // endMBB is a successor of both offsetMBB and overflowMBB
29333 offsetMBB->addSuccessor(endMBB);
29334 overflowMBB->addSuccessor(endMBB);
29335
29336 // Load the offset value into a register
29337 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
29338 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
29339 .add(Base)
29340 .add(Scale)
29341 .add(Index)
29342 .addDisp(Disp, UseFPOffset ? 4 : 0)
29343 .add(Segment)
29344 .setMemRefs(LoadOnlyMMO);
29345
29346 // Check if there is enough room left to pull this argument.
29347 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
29348 .addReg(OffsetReg)
29349 .addImm(MaxOffset + 8 - ArgSizeA8);
29350
29351 // Branch to "overflowMBB" if offset >= max
29352 // Fall through to "offsetMBB" otherwise
29353 BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
29354 .addMBB(overflowMBB).addImm(X86::COND_AE);
29355 }
29356
29357 // In offsetMBB, emit code to use the reg_save_area.
29358 if (offsetMBB) {
29359 assert(OffsetReg != 0)((OffsetReg != 0) ? static_cast<void> (0) : __assert_fail
("OffsetReg != 0", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 29359, __PRETTY_FUNCTION__))
;
29360
29361 // Read the reg_save_area address.
29362 Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
29363 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
29364 .add(Base)
29365 .add(Scale)
29366 .add(Index)
29367 .addDisp(Disp, 16)
29368 .add(Segment)
29369 .setMemRefs(LoadOnlyMMO);
29370
29371 // Zero-extend the offset
29372 Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
29373 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
29374 .addImm(0)
29375 .addReg(OffsetReg)
29376 .addImm(X86::sub_32bit);
29377
29378 // Add the offset to the reg_save_area to get the final address.
29379 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
29380 .addReg(OffsetReg64)
29381 .addReg(RegSaveReg);
29382
29383 // Compute the offset for the next argument
29384 Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
29385 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
29386 .addReg(OffsetReg)
29387 .addImm(UseFPOffset ? 16 : 8);
29388
29389 // Store it back into the va_list.
29390 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
29391 .add(Base)
29392 .add(Scale)
29393 .add(Index)
29394 .addDisp(Disp, UseFPOffset ? 4 : 0)
29395 .add(Segment)
29396 .addReg(NextOffsetReg)
29397 .setMemRefs(StoreOnlyMMO);
29398
29399 // Jump to endMBB
29400 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
29401 .addMBB(endMBB);
29402 }
29403
29404 //
29405 // Emit code to use overflow area
29406 //
29407
29408 // Load the overflow_area address into a register.
29409 Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
29410 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
29411 .add(Base)
29412 .add(Scale)
29413 .add(Index)
29414 .addDisp(Disp, 8)
29415 .add(Segment)
29416 .setMemRefs(LoadOnlyMMO);
29417
29418 // If we need to align it, do so. Otherwise, just copy the address
29419 // to OverflowDestReg.
29420 if (NeedsAlign) {
29421 // Align the overflow address
29422 assert(isPowerOf2_32(Align) && "Alignment must be a power of 2")((isPowerOf2_32(Align) && "Alignment must be a power of 2"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(Align) && \"Alignment must be a power of 2\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 29422, __PRETTY_FUNCTION__))
;
29423 Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
29424
29425 // aligned_addr = (addr + (align-1)) & ~(align-1)
29426 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
29427 .addReg(OverflowAddrReg)
29428 .addImm(Align-1);
29429
29430 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
29431 .addReg(TmpReg)
29432 .addImm(~(uint64_t)(Align-1));
29433 } else {
29434 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
29435 .addReg(OverflowAddrReg);
29436 }
29437
29438 // Compute the next overflow address after this argument.
29439 // (the overflow address should be kept 8-byte aligned)
29440 Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
29441 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
29442 .addReg(OverflowDestReg)
29443 .addImm(ArgSizeA8);
29444
29445 // Store the new overflow address.
29446 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
29447 .add(Base)
29448 .add(Scale)
29449 .add(Index)
29450 .addDisp(Disp, 8)
29451 .add(Segment)
29452 .addReg(NextAddrReg)
29453 .setMemRefs(StoreOnlyMMO);
29454
29455 // If we branched, emit the PHI to the front of endMBB.
29456 if (offsetMBB) {
29457 BuildMI(*endMBB, endMBB->begin(), DL,
29458 TII->get(X86::PHI), DestReg)
29459 .addReg(OffsetDestReg).addMBB(offsetMBB)
29460 .addReg(OverflowDestReg).addMBB(overflowMBB);
29461 }
29462
29463 // Erase the pseudo instruction
29464 MI.eraseFromParent();
29465
29466 return endMBB;
29467}
29468
29469MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
29470 MachineInstr &MI, MachineBasicBlock *MBB) const {
29471 // Emit code to save XMM registers to the stack. The ABI says that the
29472 // number of registers to save is given in %al, so it's theoretically
29473 // possible to do an indirect jump trick to avoid saving all of them,
29474 // however this code takes a simpler approach and just executes all
29475 // of the stores if %al is non-zero. It's less code, and it's probably
29476 // easier on the hardware branch predictor, and stores aren't all that
29477 // expensive anyway.
29478
29479 // Create the new basic blocks. One block contains all the XMM stores,
29480 // and one block is the final destination regardless of whether any
29481 // stores were performed.
29482 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
29483 MachineFunction *F = MBB->getParent();
29484 MachineFunction::iterator MBBIter = ++MBB->getIterator();
29485 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
29486 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
29487 F->insert(MBBIter, XMMSaveMBB);
29488 F->insert(MBBIter, EndMBB);
29489
29490 // Transfer the remainder of MBB and its successor edges to EndMBB.
29491 EndMBB->splice(EndMBB->begin(), MBB,
29492 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
29493 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
29494
29495 // The original block will now fall through to the XMM save block.
29496 MBB->addSuccessor(XMMSaveMBB);
29497 // The XMMSaveMBB will fall through to the end block.
29498 XMMSaveMBB->addSuccessor(EndMBB);
29499
29500 // Now add the instructions.
29501 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29502 DebugLoc DL = MI.getDebugLoc();
29503
29504 Register CountReg = MI.getOperand(0).getReg();
29505 int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
29506 int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
29507
29508 if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
29509 // If %al is 0, branch around the XMM save block.
29510 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
29511 BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
29512 MBB->addSuccessor(EndMBB);
29513 }
29514
29515 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
29516 // that was just emitted, but clearly shouldn't be "saved".
29517 assert((MI.getNumOperands() <= 3 ||(((MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands
() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg
() == X86::EFLAGS) && "Expected last argument to be EFLAGS"
) ? static_cast<void> (0) : __assert_fail ("(MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) && \"Expected last argument to be EFLAGS\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 29520, __PRETTY_FUNCTION__))
29518 !MI.getOperand(MI.getNumOperands() - 1).isReg() ||(((MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands
() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg
() == X86::EFLAGS) && "Expected last argument to be EFLAGS"
) ? static_cast<void> (0) : __assert_fail ("(MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) && \"Expected last argument to be EFLAGS\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 29520, __PRETTY_FUNCTION__))
29519 MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&(((MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands
() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg
() == X86::EFLAGS) && "Expected last argument to be EFLAGS"
) ? static_cast<void> (0) : __assert_fail ("(MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) && \"Expected last argument to be EFLAGS\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 29520, __PRETTY_FUNCTION__))
29520 "Expected last argument to be EFLAGS")(((MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands
() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg
() == X86::EFLAGS) && "Expected last argument to be EFLAGS"
) ? static_cast<void> (0) : __assert_fail ("(MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) && \"Expected last argument to be EFLAGS\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 29520, __PRETTY_FUNCTION__))
;
29521 unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
29522 // In the XMM save block, save all the XMM argument registers.
29523 for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
29524 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
29525 MachineMemOperand *MMO = F->getMachineMemOperand(
29526 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
29527 MachineMemOperand::MOStore,
29528 /*Size=*/16, /*Align=*/16);
29529 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
29530 .addFrameIndex(RegSaveFrameIndex)
29531 .addImm(/*Scale=*/1)
29532 .addReg(/*IndexReg=*/0)
29533 .addImm(/*Disp=*/Offset)
29534 .addReg(/*Segment=*/0)
29535 .addReg(MI.getOperand(i).getReg())
29536 .addMemOperand(MMO);
29537 }
29538
29539 MI.eraseFromParent(); // The pseudo instruction is gone now.
29540
29541 return EndMBB;
29542}
29543
29544// The EFLAGS operand of SelectItr might be missing a kill marker
29545// because there were multiple uses of EFLAGS, and ISel didn't know
29546// which to mark. Figure out whether SelectItr should have had a
29547// kill marker, and set it if it should. Returns the correct kill
29548// marker value.
29549static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
29550 MachineBasicBlock* BB,
29551 const TargetRegisterInfo* TRI) {
29552 // Scan forward through BB for a use/def of EFLAGS.
29553 MachineBasicBlock::iterator miI(std::next(SelectItr));
29554 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
29555 const MachineInstr& mi = *miI;
29556 if (mi.readsRegister(X86::EFLAGS))
29557 return false;
29558 if (mi.definesRegister(X86::EFLAGS))
29559 break; // Should have kill-flag - update below.
29560 }
29561
29562 // If we hit the end of the block, check whether EFLAGS is live into a
29563 // successor.
29564 if (miI == BB->end()) {
29565 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
29566 sEnd = BB->succ_end();
29567 sItr != sEnd; ++sItr) {
29568 MachineBasicBlock* succ = *sItr;
29569 if (succ->isLiveIn(X86::EFLAGS))
29570 return false;
29571 }
29572 }
29573
29574 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
29575 // out. SelectMI should have a kill flag on EFLAGS.
29576 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
29577 return true;
29578}
29579
29580// Return true if it is OK for this CMOV pseudo-opcode to be cascaded
29581// together with other CMOV pseudo-opcodes into a single basic-block with
29582// conditional jump around it.
29583static bool isCMOVPseudo(MachineInstr &MI) {
29584 switch (MI.getOpcode()) {
29585 case X86::CMOV_FR32:
29586 case X86::CMOV_FR32X:
29587 case X86::CMOV_FR64:
29588 case X86::CMOV_FR64X:
29589 case X86::CMOV_GR8:
29590 case X86::CMOV_GR16:
29591 case X86::CMOV_GR32:
29592 case X86::CMOV_RFP32:
29593 case X86::CMOV_RFP64:
29594 case X86::CMOV_RFP80:
29595 case X86::CMOV_VR128:
29596 case X86::CMOV_VR128X:
29597 case X86::CMOV_VR256:
29598 case X86::CMOV_VR256X:
29599 case X86::CMOV_VR512:
29600 case X86::CMOV_VK2:
29601 case X86::CMOV_VK4:
29602 case X86::CMOV_VK8:
29603 case X86::CMOV_VK16:
29604 case X86::CMOV_VK32:
29605 case X86::CMOV_VK64:
29606 return true;
29607
29608 default:
29609 return false;
29610 }
29611}
29612
29613// Helper function, which inserts PHI functions into SinkMBB:
29614// %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
29615// where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
29616// in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
29617// the last PHI function inserted.
29618static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
29619 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
29620 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
29621 MachineBasicBlock *SinkMBB) {
29622 MachineFunction *MF = TrueMBB->getParent();
29623 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
29624 DebugLoc DL = MIItBegin->getDebugLoc();
29625
29626 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
29627 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
29628
29629 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
29630
29631 // As we are creating the PHIs, we have to be careful if there is more than
29632 // one. Later CMOVs may reference the results of earlier CMOVs, but later
29633 // PHIs have to reference the individual true/false inputs from earlier PHIs.
29634 // That also means that PHI construction must work forward from earlier to
29635 // later, and that the code must maintain a mapping from earlier PHI's
29636 // destination registers, and the registers that went into the PHI.
29637 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
29638 MachineInstrBuilder MIB;
29639
29640 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
29641 Register DestReg = MIIt->getOperand(0).getReg();
29642 Register Op1Reg = MIIt->getOperand(1).getReg();
29643 Register Op2Reg = MIIt->getOperand(2).getReg();
29644
29645 // If this CMOV we are generating is the opposite condition from
29646 // the jump we generated, then we have to swap the operands for the
29647 // PHI that is going to be generated.
29648 if (MIIt->getOperand(3).getImm() == OppCC)
29649 std::swap(Op1Reg, Op2Reg);
29650
29651 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
29652 Op1Reg = RegRewriteTable[Op1Reg].first;
29653
29654 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
29655 Op2Reg = RegRewriteTable[Op2Reg].second;
29656
29657 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
29658 .addReg(Op1Reg)
29659 .addMBB(FalseMBB)
29660 .addReg(Op2Reg)
29661 .addMBB(TrueMBB);
29662
29663 // Add this PHI to the rewrite table.
29664 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
29665 }
29666
29667 return MIB;
29668}
29669
29670// Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
29671MachineBasicBlock *
29672X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
29673 MachineInstr &SecondCascadedCMOV,
29674 MachineBasicBlock *ThisMBB) const {
29675 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29676 DebugLoc DL = FirstCMOV.getDebugLoc();
29677
29678 // We lower cascaded CMOVs such as
29679 //
29680 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
29681 //
29682 // to two successive branches.
29683 //
29684 // Without this, we would add a PHI between the two jumps, which ends up
29685 // creating a few copies all around. For instance, for
29686 //
29687 // (sitofp (zext (fcmp une)))
29688 //
29689 // we would generate:
29690 //
29691 // ucomiss %xmm1, %xmm0
29692 // movss <1.0f>, %xmm0
29693 // movaps %xmm0, %xmm1
29694 // jne .LBB5_2
29695 // xorps %xmm1, %xmm1
29696 // .LBB5_2:
29697 // jp .LBB5_4
29698 // movaps %xmm1, %xmm0
29699 // .LBB5_4:
29700 // retq
29701 //
29702 // because this custom-inserter would have generated:
29703 //
29704 // A
29705 // | \
29706 // | B
29707 // | /
29708 // C
29709 // | \
29710 // | D
29711 // | /
29712 // E
29713 //
29714 // A: X = ...; Y = ...
29715 // B: empty
29716 // C: Z = PHI [X, A], [Y, B]
29717 // D: empty
29718 // E: PHI [X, C], [Z, D]
29719 //
29720 // If we lower both CMOVs in a single step, we can instead generate:
29721 //
29722 // A
29723 // | \
29724 // | C
29725 // | /|
29726 // |/ |
29727 // | |
29728 // | D
29729 // | /
29730 // E
29731 //
29732 // A: X = ...; Y = ...
29733 // D: empty
29734 // E: PHI [X, A], [X, C], [Y, D]
29735 //
29736 // Which, in our sitofp/fcmp example, gives us something like:
29737 //
29738 // ucomiss %xmm1, %xmm0
29739 // movss <1.0f>, %xmm0
29740 // jne .LBB5_4
29741 // jp .LBB5_4
29742 // xorps %xmm0, %xmm0
29743 // .LBB5_4:
29744 // retq
29745 //
29746
29747 // We lower cascaded CMOV into two successive branches to the same block.
29748 // EFLAGS is used by both, so mark it as live in the second.
29749 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
29750 MachineFunction *F = ThisMBB->getParent();
29751 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
29752 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
29753 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
29754
29755 MachineFunction::iterator It = ++ThisMBB->getIterator();
29756 F->insert(It, FirstInsertedMBB);
29757 F->insert(It, SecondInsertedMBB);
29758 F->insert(It, SinkMBB);
29759
29760 // For a cascaded CMOV, we lower it to two successive branches to
29761 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
29762 // the FirstInsertedMBB.
29763 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
29764
29765 // If the EFLAGS register isn't dead in the terminator, then claim that it's
29766 // live into the sink and copy blocks.
29767 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
29768 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
29769 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
29770 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
29771 SinkMBB->addLiveIn(X86::EFLAGS);
29772 }
29773
29774 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
29775 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
29776 std::next(MachineBasicBlock::iterator(FirstCMOV)),
29777 ThisMBB->end());
29778 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
29779
29780 // Fallthrough block for ThisMBB.
29781 ThisMBB->addSuccessor(FirstInsertedMBB);
29782 // The true block target of the first branch is always SinkMBB.
29783 ThisMBB->addSuccessor(SinkMBB);
29784 // Fallthrough block for FirstInsertedMBB.
29785 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
29786 // The true block for the branch of FirstInsertedMBB.
29787 FirstInsertedMBB->addSuccessor(SinkMBB);
29788 // This is fallthrough.
29789 SecondInsertedMBB->addSuccessor(SinkMBB);
29790
29791 // Create the conditional branch instructions.
29792 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
29793 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
29794
29795 X86::CondCode SecondCC =
29796 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
29797 BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
29798
29799 // SinkMBB:
29800 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
29801 Register DestReg = FirstCMOV.getOperand(0).getReg();
29802 Register Op1Reg = FirstCMOV.getOperand(1).getReg();
29803 Register Op2Reg = FirstCMOV.getOperand(2).getReg();
29804 MachineInstrBuilder MIB =
29805 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
29806 .addReg(Op1Reg)
29807 .addMBB(SecondInsertedMBB)
29808 .addReg(Op2Reg)
29809 .addMBB(ThisMBB);
29810
29811 // The second SecondInsertedMBB provides the same incoming value as the
29812 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
29813 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
29814 // Copy the PHI result to the register defined by the second CMOV.
29815 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
29816 TII->get(TargetOpcode::COPY),
29817 SecondCascadedCMOV.getOperand(0).getReg())
29818 .addReg(FirstCMOV.getOperand(0).getReg());
29819
29820 // Now remove the CMOVs.
29821 FirstCMOV.eraseFromParent();
29822 SecondCascadedCMOV.eraseFromParent();
29823
29824 return SinkMBB;
29825}
29826
29827MachineBasicBlock *
29828X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
29829 MachineBasicBlock *ThisMBB) const {
29830 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29831 DebugLoc DL = MI.getDebugLoc();
29832
29833 // To "insert" a SELECT_CC instruction, we actually have to insert the
29834 // diamond control-flow pattern. The incoming instruction knows the
29835 // destination vreg to set, the condition code register to branch on, the
29836 // true/false values to select between and a branch opcode to use.
29837
29838 // ThisMBB:
29839 // ...
29840 // TrueVal = ...
29841 // cmpTY ccX, r1, r2
29842 // bCC copy1MBB
29843 // fallthrough --> FalseMBB
29844
29845 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
29846 // as described above, by inserting a BB, and then making a PHI at the join
29847 // point to select the true and false operands of the CMOV in the PHI.
29848 //
29849 // The code also handles two different cases of multiple CMOV opcodes
29850 // in a row.
29851 //
29852 // Case 1:
29853 // In this case, there are multiple CMOVs in a row, all which are based on
29854 // the same condition setting (or the exact opposite condition setting).
29855 // In this case we can lower all the CMOVs using a single inserted BB, and
29856 // then make a number of PHIs at the join point to model the CMOVs. The only
29857 // trickiness here, is that in a case like:
29858 //
29859 // t2 = CMOV cond1 t1, f1
29860 // t3 = CMOV cond1 t2, f2
29861 //
29862 // when rewriting this into PHIs, we have to perform some renaming on the
29863 // temps since you cannot have a PHI operand refer to a PHI result earlier
29864 // in the same block. The "simple" but wrong lowering would be:
29865 //
29866 // t2 = PHI t1(BB1), f1(BB2)
29867 // t3 = PHI t2(BB1), f2(BB2)
29868 //
29869 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
29870 // renaming is to note that on the path through BB1, t2 is really just a
29871 // copy of t1, and do that renaming, properly generating:
29872 //
29873 // t2 = PHI t1(BB1), f1(BB2)
29874 // t3 = PHI t1(BB1), f2(BB2)
29875 //
29876 // Case 2:
29877 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
29878 // function - EmitLoweredCascadedSelect.
29879
29880 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
29881 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
29882 MachineInstr *LastCMOV = &MI;
29883 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
29884
29885 // Check for case 1, where there are multiple CMOVs with the same condition
29886 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
29887 // number of jumps the most.
29888
29889 if (isCMOVPseudo(MI)) {
29890 // See if we have a string of CMOVS with the same condition. Skip over
29891 // intervening debug insts.
29892 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
29893 (NextMIIt->getOperand(3).getImm() == CC ||
29894 NextMIIt->getOperand(3).getImm() == OppCC)) {
29895 LastCMOV = &*NextMIIt;
29896 ++NextMIIt;
29897 NextMIIt = skipDebugInstructionsForward(NextMIIt, ThisMBB->end());
29898 }
29899 }
29900
29901 // This checks for case 2, but only do this if we didn't already find
29902 // case 1, as indicated by LastCMOV == MI.
29903 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
29904 NextMIIt->getOpcode() == MI.getOpcode() &&
29905 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
29906 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
29907 NextMIIt->getOperand(1).isKill()) {
29908 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
29909 }
29910
29911 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
29912 MachineFunction *F = ThisMBB->getParent();
29913 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
29914 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
29915
29916 MachineFunction::iterator It = ++ThisMBB->getIterator();
29917 F->insert(It, FalseMBB);
29918 F->insert(It, SinkMBB);
29919
29920 // If the EFLAGS register isn't dead in the terminator, then claim that it's
29921 // live into the sink and copy blocks.
29922 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
29923 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
29924 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
29925 FalseMBB->addLiveIn(X86::EFLAGS);
29926 SinkMBB->addLiveIn(X86::EFLAGS);
29927 }
29928
29929 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
29930 auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
29931 auto DbgIt = MachineBasicBlock::iterator(MI);
29932 while (DbgIt != DbgEnd) {
29933 auto Next = std::next(DbgIt);
29934 if (DbgIt->isDebugInstr())
29935 SinkMBB->push_back(DbgIt->removeFromParent());
29936 DbgIt = Next;
29937 }
29938
29939 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
29940 SinkMBB->splice(SinkMBB->end(), ThisMBB,
29941 std::next(MachineBasicBlock::iterator(LastCMOV)),
29942 ThisMBB->end());
29943 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
29944
29945 // Fallthrough block for ThisMBB.
29946 ThisMBB->addSuccessor(FalseMBB);
29947 // The true block target of the first (or only) branch is always a SinkMBB.
29948 ThisMBB->addSuccessor(SinkMBB);
29949 // Fallthrough block for FalseMBB.
29950 FalseMBB->addSuccessor(SinkMBB);
29951
29952 // Create the conditional branch instruction.
29953 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
29954
29955 // SinkMBB:
29956 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
29957 // ...
29958 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
29959 MachineBasicBlock::iterator MIItEnd =
29960 std::next(MachineBasicBlock::iterator(LastCMOV));
29961 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
29962
29963 // Now remove the CMOV(s).
29964 ThisMBB->erase(MIItBegin, MIItEnd);
29965
29966 return SinkMBB;
29967}
29968
29969MachineBasicBlock *
29970X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
29971 MachineBasicBlock *BB) const {
29972 MachineFunction *MF = BB->getParent();
29973 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
29974 DebugLoc DL = MI.getDebugLoc();
29975 const BasicBlock *LLVM_BB = BB->getBasicBlock();
29976
29977 assert(MF->shouldSplitStack())((MF->shouldSplitStack()) ? static_cast<void> (0) : __assert_fail
("MF->shouldSplitStack()", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 29977, __PRETTY_FUNCTION__))
;
29978
29979 const bool Is64Bit = Subtarget.is64Bit();
29980 const bool IsLP64 = Subtarget.isTarget64BitLP64();
29981
29982 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
29983 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
29984
29985 // BB:
29986 // ... [Till the alloca]
29987 // If stacklet is not large enough, jump to mallocMBB
29988 //
29989 // bumpMBB:
29990 // Allocate by subtracting from RSP
29991 // Jump to continueMBB
29992 //
29993 // mallocMBB:
29994 // Allocate by call to runtime
29995 //
29996 // continueMBB:
29997 // ...
29998 // [rest of original BB]
29999 //
30000
30001 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30002 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30003 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
30004
30005 MachineRegisterInfo &MRI = MF->getRegInfo();
30006 const TargetRegisterClass *AddrRegClass =
30007 getRegClassFor(getPointerTy(MF->getDataLayout()));
30008
30009 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
30010 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
30011 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
30012 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
30013 sizeVReg = MI.getOperand(1).getReg(),
30014 physSPReg =
30015 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
30016
30017 MachineFunction::iterator MBBIter = ++BB->getIterator();
30018
30019 MF->insert(MBBIter, bumpMBB);
30020 MF->insert(MBBIter, mallocMBB);
30021 MF->insert(MBBIter, continueMBB);
30022
30023 continueMBB->splice(continueMBB->begin(), BB,
30024 std::next(MachineBasicBlock::iterator(MI)), BB->end());
30025 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
30026
30027 // Add code to the main basic block to check if the stack limit has been hit,
30028 // and if so, jump to mallocMBB otherwise to bumpMBB.
30029 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
30030 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
30031 .addReg(tmpSPVReg).addReg(sizeVReg);
30032 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
30033 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
30034 .addReg(SPLimitVReg);
30035 BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
30036
30037 // bumpMBB simply decreases the stack pointer, since we know the current
30038 // stacklet has enough space.
30039 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
30040 .addReg(SPLimitVReg);
30041 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
30042 .addReg(SPLimitVReg);
30043 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
30044
30045 // Calls into a routine in libgcc to allocate more space from the heap.
30046 const uint32_t *RegMask =
30047 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
30048 if (IsLP64) {
30049 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
30050 .addReg(sizeVReg);
30051 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
30052 .addExternalSymbol("__morestack_allocate_stack_space")
30053 .addRegMask(RegMask)
30054 .addReg(X86::RDI, RegState::Implicit)
30055 .addReg(X86::RAX, RegState::ImplicitDefine);
30056 } else if (Is64Bit) {
30057 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
30058 .addReg(sizeVReg);
30059 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
30060 .addExternalSymbol("__morestack_allocate_stack_space")
30061 .addRegMask(RegMask)
30062 .addReg(X86::EDI, RegState::Implicit)
30063 .addReg(X86::EAX, RegState::ImplicitDefine);
30064 } else {
30065 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
30066 .addImm(12);
30067 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
30068 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
30069 .addExternalSymbol("__morestack_allocate_stack_space")
30070 .addRegMask(RegMask)
30071 .addReg(X86::EAX, RegState::ImplicitDefine);
30072 }
30073
30074 if (!Is64Bit)
30075 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
30076 .addImm(16);
30077
30078 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
30079 .addReg(IsLP64 ? X86::RAX : X86::EAX);
30080 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
30081
30082 // Set up the CFG correctly.
30083 BB->addSuccessor(bumpMBB);
30084 BB->addSuccessor(mallocMBB);
30085 mallocMBB->addSuccessor(continueMBB);
30086 bumpMBB->addSuccessor(continueMBB);
30087
30088 // Take care of the PHI nodes.
30089 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
30090 MI.getOperand(0).getReg())
30091 .addReg(mallocPtrVReg)
30092 .addMBB(mallocMBB)
30093 .addReg(bumpSPPtrVReg)
30094 .addMBB(bumpMBB);
30095
30096 // Delete the original pseudo instruction.
30097 MI.eraseFromParent();
30098
30099 // And we're done.
30100 return continueMBB;
30101}
30102
30103MachineBasicBlock *
30104X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
30105 MachineBasicBlock *BB) const {
30106 MachineFunction *MF = BB->getParent();
30107 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
30108 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
30109 DebugLoc DL = MI.getDebugLoc();
30110
30111 assert(!isAsynchronousEHPersonality(((!isAsynchronousEHPersonality( classifyEHPersonality(MF->
getFunction().getPersonalityFn())) && "SEH does not use catchret!"
) ? static_cast<void> (0) : __assert_fail ("!isAsynchronousEHPersonality( classifyEHPersonality(MF->getFunction().getPersonalityFn())) && \"SEH does not use catchret!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30113, __PRETTY_FUNCTION__))
30112 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&((!isAsynchronousEHPersonality( classifyEHPersonality(MF->
getFunction().getPersonalityFn())) && "SEH does not use catchret!"
) ? static_cast<void> (0) : __assert_fail ("!isAsynchronousEHPersonality( classifyEHPersonality(MF->getFunction().getPersonalityFn())) && \"SEH does not use catchret!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30113, __PRETTY_FUNCTION__))
30113 "SEH does not use catchret!")((!isAsynchronousEHPersonality( classifyEHPersonality(MF->
getFunction().getPersonalityFn())) && "SEH does not use catchret!"
) ? static_cast<void> (0) : __assert_fail ("!isAsynchronousEHPersonality( classifyEHPersonality(MF->getFunction().getPersonalityFn())) && \"SEH does not use catchret!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30113, __PRETTY_FUNCTION__))
;
30114
30115 // Only 32-bit EH needs to worry about manually restoring stack pointers.
30116 if (!Subtarget.is32Bit())
30117 return BB;
30118
30119 // C++ EH creates a new target block to hold the restore code, and wires up
30120 // the new block to the return destination with a normal JMP_4.
30121 MachineBasicBlock *RestoreMBB =
30122 MF->CreateMachineBasicBlock(BB->getBasicBlock());
30123 assert(BB->succ_size() == 1)((BB->succ_size() == 1) ? static_cast<void> (0) : __assert_fail
("BB->succ_size() == 1", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30123, __PRETTY_FUNCTION__))
;
30124 MF->insert(std::next(BB->getIterator()), RestoreMBB);
30125 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
30126 BB->addSuccessor(RestoreMBB);
30127 MI.getOperand(0).setMBB(RestoreMBB);
30128
30129 auto RestoreMBBI = RestoreMBB->begin();
30130 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE));
30131 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
30132 return BB;
30133}
30134
30135MachineBasicBlock *
30136X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI,
30137 MachineBasicBlock *BB) const {
30138 MachineFunction *MF = BB->getParent();
30139 const Constant *PerFn = MF->getFunction().getPersonalityFn();
30140 bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn));
30141 // Only 32-bit SEH requires special handling for catchpad.
30142 if (IsSEH && Subtarget.is32Bit()) {
30143 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
30144 DebugLoc DL = MI.getDebugLoc();
30145 BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE));
30146 }
30147 MI.eraseFromParent();
30148 return BB;
30149}
30150
30151MachineBasicBlock *
30152X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
30153 MachineBasicBlock *BB) const {
30154 // So, here we replace TLSADDR with the sequence:
30155 // adjust_stackdown -> TLSADDR -> adjust_stackup.
30156 // We need this because TLSADDR is lowered into calls
30157 // inside MC, therefore without the two markers shrink-wrapping
30158 // may push the prologue/epilogue pass them.
30159 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
30160 DebugLoc DL = MI.getDebugLoc();
30161 MachineFunction &MF = *BB->getParent();
30162
30163 // Emit CALLSEQ_START right before the instruction.
30164 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
30165 MachineInstrBuilder CallseqStart =
30166 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
30167 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
30168
30169 // Emit CALLSEQ_END right after the instruction.
30170 // We don't call erase from parent because we want to keep the
30171 // original instruction around.
30172 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
30173 MachineInstrBuilder CallseqEnd =
30174 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
30175 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
30176
30177 return BB;
30178}
30179
30180MachineBasicBlock *
30181X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
30182 MachineBasicBlock *BB) const {
30183 // This is pretty easy. We're taking the value that we received from
30184 // our load from the relocation, sticking it in either RDI (x86-64)
30185 // or EAX and doing an indirect call. The return value will then
30186 // be in the normal return register.
30187 MachineFunction *F = BB->getParent();
30188 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30189 DebugLoc DL = MI.getDebugLoc();
30190
30191 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?")((Subtarget.isTargetDarwin() && "Darwin only instr emitted?"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.isTargetDarwin() && \"Darwin only instr emitted?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30191, __PRETTY_FUNCTION__))
;
30192 assert(MI.getOperand(3).isGlobal() && "This should be a global")((MI.getOperand(3).isGlobal() && "This should be a global"
) ? static_cast<void> (0) : __assert_fail ("MI.getOperand(3).isGlobal() && \"This should be a global\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30192, __PRETTY_FUNCTION__))
;
30193
30194 // Get a register mask for the lowered call.
30195 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
30196 // proper register mask.
30197 const uint32_t *RegMask =
30198 Subtarget.is64Bit() ?
30199 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
30200 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
30201 if (Subtarget.is64Bit()) {
30202 MachineInstrBuilder MIB =
30203 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
30204 .addReg(X86::RIP)
30205 .addImm(0)
30206 .addReg(0)
30207 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
30208 MI.getOperand(3).getTargetFlags())
30209 .addReg(0);
30210 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
30211 addDirectMem(MIB, X86::RDI);
30212 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
30213 } else if (!isPositionIndependent()) {
30214 MachineInstrBuilder MIB =
30215 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
30216 .addReg(0)
30217 .addImm(0)
30218 .addReg(0)
30219 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
30220 MI.getOperand(3).getTargetFlags())
30221 .addReg(0);
30222 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
30223 addDirectMem(MIB, X86::EAX);
30224 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
30225 } else {
30226 MachineInstrBuilder MIB =
30227 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
30228 .addReg(TII->getGlobalBaseReg(F))
30229 .addImm(0)
30230 .addReg(0)
30231 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
30232 MI.getOperand(3).getTargetFlags())
30233 .addReg(0);
30234 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
30235 addDirectMem(MIB, X86::EAX);
30236 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
30237 }
30238
30239 MI.eraseFromParent(); // The pseudo instruction is gone now.
30240 return BB;
30241}
30242
30243static unsigned getOpcodeForRetpoline(unsigned RPOpc) {
30244 switch (RPOpc) {
30245 case X86::RETPOLINE_CALL32:
30246 return X86::CALLpcrel32;
30247 case X86::RETPOLINE_CALL64:
30248 return X86::CALL64pcrel32;
30249 case X86::RETPOLINE_TCRETURN32:
30250 return X86::TCRETURNdi;
30251 case X86::RETPOLINE_TCRETURN64:
30252 return X86::TCRETURNdi64;
30253 }
30254 llvm_unreachable("not retpoline opcode")::llvm::llvm_unreachable_internal("not retpoline opcode", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30254)
;
30255}
30256
30257static const char *getRetpolineSymbol(const X86Subtarget &Subtarget,
30258 unsigned Reg) {
30259 if (Subtarget.useRetpolineExternalThunk()) {
30260 // When using an external thunk for retpolines, we pick names that match the
30261 // names GCC happens to use as well. This helps simplify the implementation
30262 // of the thunks for kernels where they have no easy ability to create
30263 // aliases and are doing non-trivial configuration of the thunk's body. For
30264 // example, the Linux kernel will do boot-time hot patching of the thunk
30265 // bodies and cannot easily export aliases of these to loaded modules.
30266 //
30267 // Note that at any point in the future, we may need to change the semantics
30268 // of how we implement retpolines and at that time will likely change the
30269 // name of the called thunk. Essentially, there is no hard guarantee that
30270 // LLVM will generate calls to specific thunks, we merely make a best-effort
30271 // attempt to help out kernels and other systems where duplicating the
30272 // thunks is costly.
30273 switch (Reg) {
30274 case X86::EAX:
30275 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30275, __PRETTY_FUNCTION__))
;
30276 return "__x86_indirect_thunk_eax";
30277 case X86::ECX:
30278 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30278, __PRETTY_FUNCTION__))
;
30279 return "__x86_indirect_thunk_ecx";
30280 case X86::EDX:
30281 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30281, __PRETTY_FUNCTION__))
;
30282 return "__x86_indirect_thunk_edx";
30283 case X86::EDI:
30284 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30284, __PRETTY_FUNCTION__))
;
30285 return "__x86_indirect_thunk_edi";
30286 case X86::R11:
30287 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!")((Subtarget.is64Bit() && "Should not be using a 64-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"Should not be using a 64-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30287, __PRETTY_FUNCTION__))
;
30288 return "__x86_indirect_thunk_r11";
30289 }
30290 llvm_unreachable("unexpected reg for retpoline")::llvm::llvm_unreachable_internal("unexpected reg for retpoline"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30290)
;
30291 }
30292
30293 // When targeting an internal COMDAT thunk use an LLVM-specific name.
30294 switch (Reg) {
30295 case X86::EAX:
30296 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30296, __PRETTY_FUNCTION__))
;
30297 return "__llvm_retpoline_eax";
30298 case X86::ECX:
30299 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30299, __PRETTY_FUNCTION__))
;
30300 return "__llvm_retpoline_ecx";
30301 case X86::EDX:
30302 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30302, __PRETTY_FUNCTION__))
;
30303 return "__llvm_retpoline_edx";
30304 case X86::EDI:
30305 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30305, __PRETTY_FUNCTION__))
;
30306 return "__llvm_retpoline_edi";
30307 case X86::R11:
30308 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!")((Subtarget.is64Bit() && "Should not be using a 64-bit thunk!"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"Should not be using a 64-bit thunk!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30308, __PRETTY_FUNCTION__))
;
30309 return "__llvm_retpoline_r11";
30310 }
30311 llvm_unreachable("unexpected reg for retpoline")::llvm::llvm_unreachable_internal("unexpected reg for retpoline"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30311)
;
30312}
30313
30314MachineBasicBlock *
30315X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI,
30316 MachineBasicBlock *BB) const {
30317 // Copy the virtual register into the R11 physical register and
30318 // call the retpoline thunk.
30319 DebugLoc DL = MI.getDebugLoc();
30320 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30321 Register CalleeVReg = MI.getOperand(0).getReg();
30322 unsigned Opc = getOpcodeForRetpoline(MI.getOpcode());
30323
30324 // Find an available scratch register to hold the callee. On 64-bit, we can
30325 // just use R11, but we scan for uses anyway to ensure we don't generate
30326 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
30327 // already a register use operand to the call to hold the callee. If none
30328 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
30329 // register and ESI is the base pointer to realigned stack frames with VLAs.
30330 SmallVector<unsigned, 3> AvailableRegs;
30331 if (Subtarget.is64Bit())
30332 AvailableRegs.push_back(X86::R11);
30333 else
30334 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
30335
30336 // Zero out any registers that are already used.
30337 for (const auto &MO : MI.operands()) {
30338 if (MO.isReg() && MO.isUse())
30339 for (unsigned &Reg : AvailableRegs)
30340 if (Reg == MO.getReg())
30341 Reg = 0;
30342 }
30343
30344 // Choose the first remaining non-zero available register.
30345 unsigned AvailableReg = 0;
30346 for (unsigned MaybeReg : AvailableRegs) {
30347 if (MaybeReg) {
30348 AvailableReg = MaybeReg;
30349 break;
30350 }
30351 }
30352 if (!AvailableReg)
30353 report_fatal_error("calling convention incompatible with retpoline, no "
30354 "available registers");
30355
30356 const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg);
30357
30358 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
30359 .addReg(CalleeVReg);
30360 MI.getOperand(0).ChangeToES(Symbol);
30361 MI.setDesc(TII->get(Opc));
30362 MachineInstrBuilder(*BB->getParent(), &MI)
30363 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
30364 return BB;
30365}
30366
30367/// SetJmp implies future control flow change upon calling the corresponding
30368/// LongJmp.
30369/// Instead of using the 'return' instruction, the long jump fixes the stack and
30370/// performs an indirect branch. To do so it uses the registers that were stored
30371/// in the jump buffer (when calling SetJmp).
30372/// In case the shadow stack is enabled we need to fix it as well, because some
30373/// return addresses will be skipped.
30374/// The function will save the SSP for future fixing in the function
30375/// emitLongJmpShadowStackFix.
30376/// \sa emitLongJmpShadowStackFix
30377/// \param [in] MI The temporary Machine Instruction for the builtin.
30378/// \param [in] MBB The Machine Basic Block that will be modified.
30379void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
30380 MachineBasicBlock *MBB) const {
30381 DebugLoc DL = MI.getDebugLoc();
30382 MachineFunction *MF = MBB->getParent();
30383 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30384 MachineRegisterInfo &MRI = MF->getRegInfo();
30385 MachineInstrBuilder MIB;
30386
30387 // Memory Reference.
30388 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30389 MI.memoperands_end());
30390
30391 // Initialize a register with zero.
30392 MVT PVT = getPointerTy(MF->getDataLayout());
30393 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30394 Register ZReg = MRI.createVirtualRegister(PtrRC);
30395 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
30396 BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
30397 .addDef(ZReg)
30398 .addReg(ZReg, RegState::Undef)
30399 .addReg(ZReg, RegState::Undef);
30400
30401 // Read the current SSP Register value to the zeroed register.
30402 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
30403 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
30404 BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
30405
30406 // Write the SSP register value to offset 3 in input memory buffer.
30407 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30408 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
30409 const int64_t SSPOffset = 3 * PVT.getStoreSize();
30410 const unsigned MemOpndSlot = 1;
30411 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30412 if (i == X86::AddrDisp)
30413 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
30414 else
30415 MIB.add(MI.getOperand(MemOpndSlot + i));
30416 }
30417 MIB.addReg(SSPCopyReg);
30418 MIB.setMemRefs(MMOs);
30419}
30420
30421MachineBasicBlock *
30422X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
30423 MachineBasicBlock *MBB) const {
30424 DebugLoc DL = MI.getDebugLoc();
30425 MachineFunction *MF = MBB->getParent();
30426 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30427 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
30428 MachineRegisterInfo &MRI = MF->getRegInfo();
30429
30430 const BasicBlock *BB = MBB->getBasicBlock();
30431 MachineFunction::iterator I = ++MBB->getIterator();
30432
30433 // Memory Reference
30434 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30435 MI.memoperands_end());
30436
30437 unsigned DstReg;
30438 unsigned MemOpndSlot = 0;
30439
30440 unsigned CurOp = 0;
30441
30442 DstReg = MI.getOperand(CurOp++).getReg();
30443 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
30444 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!")((TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!"
) ? static_cast<void> (0) : __assert_fail ("TRI->isTypeLegalForClass(*RC, MVT::i32) && \"Invalid destination!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30444, __PRETTY_FUNCTION__))
;
30445 (void)TRI;
30446 Register mainDstReg = MRI.createVirtualRegister(RC);
30447 Register restoreDstReg = MRI.createVirtualRegister(RC);
30448
30449 MemOpndSlot = CurOp;
30450
30451 MVT PVT = getPointerTy(MF->getDataLayout());
30452 assert((PVT == MVT::i64 || PVT == MVT::i32) &&(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"
) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30453, __PRETTY_FUNCTION__))
30453 "Invalid Pointer Size!")(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"
) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30453, __PRETTY_FUNCTION__))
;
30454
30455 // For v = setjmp(buf), we generate
30456 //
30457 // thisMBB:
30458 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
30459 // SjLjSetup restoreMBB
30460 //
30461 // mainMBB:
30462 // v_main = 0
30463 //
30464 // sinkMBB:
30465 // v = phi(main, restore)
30466 //
30467 // restoreMBB:
30468 // if base pointer being used, load it from frame
30469 // v_restore = 1
30470
30471 MachineBasicBlock *thisMBB = MBB;
30472 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
30473 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30474 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
30475 MF->insert(I, mainMBB);
30476 MF->insert(I, sinkMBB);
30477 MF->push_back(restoreMBB);
30478 restoreMBB->setHasAddressTaken();
30479
30480 MachineInstrBuilder MIB;
30481
30482 // Transfer the remainder of BB and its successor edges to sinkMBB.
30483 sinkMBB->splice(sinkMBB->begin(), MBB,
30484 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
30485 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30486
30487 // thisMBB:
30488 unsigned PtrStoreOpc = 0;
30489 unsigned LabelReg = 0;
30490 const int64_t LabelOffset = 1 * PVT.getStoreSize();
30491 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
30492 !isPositionIndependent();
30493
30494 // Prepare IP either in reg or imm.
30495 if (!UseImmLabel) {
30496 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30497 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30498 LabelReg = MRI.createVirtualRegister(PtrRC);
30499 if (Subtarget.is64Bit()) {
30500 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
30501 .addReg(X86::RIP)
30502 .addImm(0)
30503 .addReg(0)
30504 .addMBB(restoreMBB)
30505 .addReg(0);
30506 } else {
30507 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
30508 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
30509 .addReg(XII->getGlobalBaseReg(MF))
30510 .addImm(0)
30511 .addReg(0)
30512 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
30513 .addReg(0);
30514 }
30515 } else
30516 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
30517 // Store IP
30518 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
30519 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30520 if (i == X86::AddrDisp)
30521 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
30522 else
30523 MIB.add(MI.getOperand(MemOpndSlot + i));
30524 }
30525 if (!UseImmLabel)
30526 MIB.addReg(LabelReg);
30527 else
30528 MIB.addMBB(restoreMBB);
30529 MIB.setMemRefs(MMOs);
30530
30531 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
30532 emitSetJmpShadowStackFix(MI, thisMBB);
30533 }
30534
30535 // Setup
30536 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
30537 .addMBB(restoreMBB);
30538
30539 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
30540 MIB.addRegMask(RegInfo->getNoPreservedMask());
30541 thisMBB->addSuccessor(mainMBB);
30542 thisMBB->addSuccessor(restoreMBB);
30543
30544 // mainMBB:
30545 // EAX = 0
30546 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
30547 mainMBB->addSuccessor(sinkMBB);
30548
30549 // sinkMBB:
30550 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
30551 TII->get(X86::PHI), DstReg)
30552 .addReg(mainDstReg).addMBB(mainMBB)
30553 .addReg(restoreDstReg).addMBB(restoreMBB);
30554
30555 // restoreMBB:
30556 if (RegInfo->hasBasePointer(*MF)) {
30557 const bool Uses64BitFramePtr =
30558 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
30559 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
30560 X86FI->setRestoreBasePointer(MF);
30561 Register FramePtr = RegInfo->getFrameRegister(*MF);
30562 Register BasePtr = RegInfo->getBaseRegister();
30563 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
30564 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
30565 FramePtr, true, X86FI->getRestoreBasePointerOffset())
30566 .setMIFlag(MachineInstr::FrameSetup);
30567 }
30568 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
30569 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
30570 restoreMBB->addSuccessor(sinkMBB);
30571
30572 MI.eraseFromParent();
30573 return sinkMBB;
30574}
30575
30576/// Fix the shadow stack using the previously saved SSP pointer.
30577/// \sa emitSetJmpShadowStackFix
30578/// \param [in] MI The temporary Machine Instruction for the builtin.
30579/// \param [in] MBB The Machine Basic Block that will be modified.
30580/// \return The sink MBB that will perform the future indirect branch.
30581MachineBasicBlock *
30582X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
30583 MachineBasicBlock *MBB) const {
30584 DebugLoc DL = MI.getDebugLoc();
30585 MachineFunction *MF = MBB->getParent();
30586 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30587 MachineRegisterInfo &MRI = MF->getRegInfo();
30588
30589 // Memory Reference
30590 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30591 MI.memoperands_end());
30592
30593 MVT PVT = getPointerTy(MF->getDataLayout());
30594 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
30595
30596 // checkSspMBB:
30597 // xor vreg1, vreg1
30598 // rdssp vreg1
30599 // test vreg1, vreg1
30600 // je sinkMBB # Jump if Shadow Stack is not supported
30601 // fallMBB:
30602 // mov buf+24/12(%rip), vreg2
30603 // sub vreg1, vreg2
30604 // jbe sinkMBB # No need to fix the Shadow Stack
30605 // fixShadowMBB:
30606 // shr 3/2, vreg2
30607 // incssp vreg2 # fix the SSP according to the lower 8 bits
30608 // shr 8, vreg2
30609 // je sinkMBB
30610 // fixShadowLoopPrepareMBB:
30611 // shl vreg2
30612 // mov 128, vreg3
30613 // fixShadowLoopMBB:
30614 // incssp vreg3
30615 // dec vreg2
30616 // jne fixShadowLoopMBB # Iterate until you finish fixing
30617 // # the Shadow Stack
30618 // sinkMBB:
30619
30620 MachineFunction::iterator I = ++MBB->getIterator();
30621 const BasicBlock *BB = MBB->getBasicBlock();
30622
30623 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
30624 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
30625 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
30626 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
30627 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
30628 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
30629 MF->insert(I, checkSspMBB);
30630 MF->insert(I, fallMBB);
30631 MF->insert(I, fixShadowMBB);
30632 MF->insert(I, fixShadowLoopPrepareMBB);
30633 MF->insert(I, fixShadowLoopMBB);
30634 MF->insert(I, sinkMBB);
30635
30636 // Transfer the remainder of BB and its successor edges to sinkMBB.
30637 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
30638 MBB->end());
30639 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
30640
30641 MBB->addSuccessor(checkSspMBB);
30642
30643 // Initialize a register with zero.
30644 Register ZReg = MRI.createVirtualRegister(PtrRC);
30645 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
30646 BuildMI(checkSspMBB, DL, TII->get(XorRROpc))
30647 .addDef(ZReg)
30648 .addReg(ZReg, RegState::Undef)
30649 .addReg(ZReg, RegState::Undef);
30650
30651 // Read the current SSP Register value to the zeroed register.
30652 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
30653 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
30654 BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
30655
30656 // Check whether the result of the SSP register is zero and jump directly
30657 // to the sink.
30658 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
30659 BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
30660 .addReg(SSPCopyReg)
30661 .addReg(SSPCopyReg);
30662 BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
30663 checkSspMBB->addSuccessor(sinkMBB);
30664 checkSspMBB->addSuccessor(fallMBB);
30665
30666 // Reload the previously saved SSP register value.
30667 Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
30668 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
30669 const int64_t SPPOffset = 3 * PVT.getStoreSize();
30670 MachineInstrBuilder MIB =
30671 BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
30672 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30673 const MachineOperand &MO = MI.getOperand(i);
30674 if (i == X86::AddrDisp)
30675 MIB.addDisp(MO, SPPOffset);
30676 else if (MO.isReg()) // Don't add the whole operand, we don't want to
30677 // preserve kill flags.
30678 MIB.addReg(MO.getReg());
30679 else
30680 MIB.add(MO);
30681 }
30682 MIB.setMemRefs(MMOs);
30683
30684 // Subtract the current SSP from the previous SSP.
30685 Register SspSubReg = MRI.createVirtualRegister(PtrRC);
30686 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
30687 BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
30688 .addReg(PrevSSPReg)
30689 .addReg(SSPCopyReg);
30690
30691 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
30692 BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
30693 fallMBB->addSuccessor(sinkMBB);
30694 fallMBB->addSuccessor(fixShadowMBB);
30695
30696 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
30697 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
30698 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
30699 Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
30700 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
30701 .addReg(SspSubReg)
30702 .addImm(Offset);
30703
30704 // Increase SSP when looking only on the lower 8 bits of the delta.
30705 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
30706 BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
30707
30708 // Reset the lower 8 bits.
30709 Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
30710 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
30711 .addReg(SspFirstShrReg)
30712 .addImm(8);
30713
30714 // Jump if the result of the shift is zero.
30715 BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
30716 fixShadowMBB->addSuccessor(sinkMBB);
30717 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
30718
30719 // Do a single shift left.
30720 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
30721 Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
30722 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
30723 .addReg(SspSecondShrReg);
30724
30725 // Save the value 128 to a register (will be used next with incssp).
30726 Register Value128InReg = MRI.createVirtualRegister(PtrRC);
30727 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
30728 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
30729 .addImm(128);
30730 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
30731
30732 // Since incssp only looks at the lower 8 bits, we might need to do several
30733 // iterations of incssp until we finish fixing the shadow stack.
30734 Register DecReg = MRI.createVirtualRegister(PtrRC);
30735 Register CounterReg = MRI.createVirtualRegister(PtrRC);
30736 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
30737 .addReg(SspAfterShlReg)
30738 .addMBB(fixShadowLoopPrepareMBB)
30739 .addReg(DecReg)
30740 .addMBB(fixShadowLoopMBB);
30741
30742 // Every iteration we increase the SSP by 128.
30743 BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
30744
30745 // Every iteration we decrement the counter by 1.
30746 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
30747 BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
30748
30749 // Jump if the counter is not zero yet.
30750 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
30751 fixShadowLoopMBB->addSuccessor(sinkMBB);
30752 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
30753
30754 return sinkMBB;
30755}
30756
30757MachineBasicBlock *
30758X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
30759 MachineBasicBlock *MBB) const {
30760 DebugLoc DL = MI.getDebugLoc();
30761 MachineFunction *MF = MBB->getParent();
30762 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
30763 MachineRegisterInfo &MRI = MF->getRegInfo();
30764
30765 // Memory Reference
30766 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
30767 MI.memoperands_end());
30768
30769 MVT PVT = getPointerTy(MF->getDataLayout());
30770 assert((PVT == MVT::i64 || PVT == MVT::i32) &&(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"
) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30771, __PRETTY_FUNCTION__))
30771 "Invalid Pointer Size!")(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"
) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30771, __PRETTY_FUNCTION__))
;
30772
30773 const TargetRegisterClass *RC =
30774 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
30775 Register Tmp = MRI.createVirtualRegister(RC);
30776 // Since FP is only updated here but NOT referenced, it's treated as GPR.
30777 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
30778 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
30779 Register SP = RegInfo->getStackRegister();
30780
30781 MachineInstrBuilder MIB;
30782
30783 const int64_t LabelOffset = 1 * PVT.getStoreSize();
30784 const int64_t SPOffset = 2 * PVT.getStoreSize();
30785
30786 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
30787 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
30788
30789 MachineBasicBlock *thisMBB = MBB;
30790
30791 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
30792 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
30793 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
30794 }
30795
30796 // Reload FP
30797 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
30798 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30799 const MachineOperand &MO = MI.getOperand(i);
30800 if (MO.isReg()) // Don't add the whole operand, we don't want to
30801 // preserve kill flags.
30802 MIB.addReg(MO.getReg());
30803 else
30804 MIB.add(MO);
30805 }
30806 MIB.setMemRefs(MMOs);
30807
30808 // Reload IP
30809 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
30810 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30811 const MachineOperand &MO = MI.getOperand(i);
30812 if (i == X86::AddrDisp)
30813 MIB.addDisp(MO, LabelOffset);
30814 else if (MO.isReg()) // Don't add the whole operand, we don't want to
30815 // preserve kill flags.
30816 MIB.addReg(MO.getReg());
30817 else
30818 MIB.add(MO);
30819 }
30820 MIB.setMemRefs(MMOs);
30821
30822 // Reload SP
30823 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
30824 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
30825 if (i == X86::AddrDisp)
30826 MIB.addDisp(MI.getOperand(i), SPOffset);
30827 else
30828 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
30829 // the last instruction of the expansion.
30830 }
30831 MIB.setMemRefs(MMOs);
30832
30833 // Jump
30834 BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
30835
30836 MI.eraseFromParent();
30837 return thisMBB;
30838}
30839
30840void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
30841 MachineBasicBlock *MBB,
30842 MachineBasicBlock *DispatchBB,
30843 int FI) const {
30844 DebugLoc DL = MI.getDebugLoc();
30845 MachineFunction *MF = MBB->getParent();
30846 MachineRegisterInfo *MRI = &MF->getRegInfo();
30847 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30848
30849 MVT PVT = getPointerTy(MF->getDataLayout());
30850 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!")(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!"
) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30850, __PRETTY_FUNCTION__))
;
30851
30852 unsigned Op = 0;
30853 unsigned VR = 0;
30854
30855 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
30856 !isPositionIndependent();
30857
30858 if (UseImmLabel) {
30859 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
30860 } else {
30861 const TargetRegisterClass *TRC =
30862 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
30863 VR = MRI->createVirtualRegister(TRC);
30864 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
30865
30866 if (Subtarget.is64Bit())
30867 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
30868 .addReg(X86::RIP)
30869 .addImm(1)
30870 .addReg(0)
30871 .addMBB(DispatchBB)
30872 .addReg(0);
30873 else
30874 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
30875 .addReg(0) /* TII->getGlobalBaseReg(MF) */
30876 .addImm(1)
30877 .addReg(0)
30878 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
30879 .addReg(0);
30880 }
30881
30882 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
30883 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
30884 if (UseImmLabel)
30885 MIB.addMBB(DispatchBB);
30886 else
30887 MIB.addReg(VR);
30888}
30889
30890MachineBasicBlock *
30891X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
30892 MachineBasicBlock *BB) const {
30893 DebugLoc DL = MI.getDebugLoc();
30894 MachineFunction *MF = BB->getParent();
30895 MachineRegisterInfo *MRI = &MF->getRegInfo();
30896 const X86InstrInfo *TII = Subtarget.getInstrInfo();
30897 int FI = MF->getFrameInfo().getFunctionContextIndex();
30898
30899 // Get a mapping of the call site numbers to all of the landing pads they're
30900 // associated with.
30901 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
30902 unsigned MaxCSNum = 0;
30903 for (auto &MBB : *MF) {
30904 if (!MBB.isEHPad())
30905 continue;
30906
30907 MCSymbol *Sym = nullptr;
30908 for (const auto &MI : MBB) {
30909 if (MI.isDebugInstr())
30910 continue;
30911
30912 assert(MI.isEHLabel() && "expected EH_LABEL")((MI.isEHLabel() && "expected EH_LABEL") ? static_cast
<void> (0) : __assert_fail ("MI.isEHLabel() && \"expected EH_LABEL\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30912, __PRETTY_FUNCTION__))
;
30913 Sym = MI.getOperand(0).getMCSymbol();
30914 break;
30915 }
30916
30917 if (!MF->hasCallSiteLandingPad(Sym))
30918 continue;
30919
30920 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
30921 CallSiteNumToLPad[CSI].push_back(&MBB);
30922 MaxCSNum = std::max(MaxCSNum, CSI);
30923 }
30924 }
30925
30926 // Get an ordered list of the machine basic blocks for the jump table.
30927 std::vector<MachineBasicBlock *> LPadList;
30928 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
30929 LPadList.reserve(CallSiteNumToLPad.size());
30930
30931 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
30932 for (auto &LP : CallSiteNumToLPad[CSI]) {
30933 LPadList.push_back(LP);
30934 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
30935 }
30936 }
30937
30938 assert(!LPadList.empty() &&((!LPadList.empty() && "No landing pad destinations for the dispatch jump table!"
) ? static_cast<void> (0) : __assert_fail ("!LPadList.empty() && \"No landing pad destinations for the dispatch jump table!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30939, __PRETTY_FUNCTION__))
30939 "No landing pad destinations for the dispatch jump table!")((!LPadList.empty() && "No landing pad destinations for the dispatch jump table!"
) ? static_cast<void> (0) : __assert_fail ("!LPadList.empty() && \"No landing pad destinations for the dispatch jump table!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 30939, __PRETTY_FUNCTION__))
;
30940
30941 // Create the MBBs for the dispatch code.
30942
30943 // Shove the dispatch's address into the return slot in the function context.
30944 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
30945 DispatchBB->setIsEHPad(true);
30946
30947 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
30948 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
30949 DispatchBB->addSuccessor(TrapBB);
30950
30951 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
30952 DispatchBB->addSuccessor(DispContBB);
30953
30954 // Insert MBBs.
30955 MF->push_back(DispatchBB);
30956 MF->push_back(DispContBB);
30957 MF->push_back(TrapBB);
30958
30959 // Insert code into the entry block that creates and registers the function
30960 // context.
30961 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
30962
30963 // Create the jump table and associated information
30964 unsigned JTE = getJumpTableEncoding();
30965 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
30966 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
30967
30968 const X86RegisterInfo &RI = TII->getRegisterInfo();
30969 // Add a register mask with no preserved registers. This results in all
30970 // registers being marked as clobbered.
30971 if (RI.hasBasePointer(*MF)) {
30972 const bool FPIs64Bit =
30973 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
30974 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
30975 MFI->setRestoreBasePointer(MF);
30976
30977 Register FP = RI.getFrameRegister(*MF);
30978 Register BP = RI.getBaseRegister();
30979 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
30980 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
30981 MFI->getRestoreBasePointerOffset())
30982 .addRegMask(RI.getNoPreservedMask());
30983 } else {
30984 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
30985 .addRegMask(RI.getNoPreservedMask());
30986 }
30987
30988 // IReg is used as an index in a memory operand and therefore can't be SP
30989 Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
30990 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
30991 Subtarget.is64Bit() ? 8 : 4);
30992 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
30993 .addReg(IReg)
30994 .addImm(LPadList.size());
30995 BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
30996
30997 if (Subtarget.is64Bit()) {
30998 Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
30999 Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
31000
31001 // leaq .LJTI0_0(%rip), BReg
31002 BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
31003 .addReg(X86::RIP)
31004 .addImm(1)
31005 .addReg(0)
31006 .addJumpTableIndex(MJTI)
31007 .addReg(0);
31008 // movzx IReg64, IReg
31009 BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
31010 .addImm(0)
31011 .addReg(IReg)
31012 .addImm(X86::sub_32bit);
31013
31014 switch (JTE) {
31015 case MachineJumpTableInfo::EK_BlockAddress:
31016 // jmpq *(BReg,IReg64,8)
31017 BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
31018 .addReg(BReg)
31019 .addImm(8)
31020 .addReg(IReg64)
31021 .addImm(0)
31022 .addReg(0);
31023 break;
31024 case MachineJumpTableInfo::EK_LabelDifference32: {
31025 Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
31026 Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
31027 Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
31028
31029 // movl (BReg,IReg64,4), OReg
31030 BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
31031 .addReg(BReg)
31032 .addImm(4)
31033 .addReg(IReg64)
31034 .addImm(0)
31035 .addReg(0);
31036 // movsx OReg64, OReg
31037 BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
31038 // addq BReg, OReg64, TReg
31039 BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
31040 .addReg(OReg64)
31041 .addReg(BReg);
31042 // jmpq *TReg
31043 BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
31044 break;
31045 }
31046 default:
31047 llvm_unreachable("Unexpected jump table encoding")::llvm::llvm_unreachable_internal("Unexpected jump table encoding"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31047)
;
31048 }
31049 } else {
31050 // jmpl *.LJTI0_0(,IReg,4)
31051 BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
31052 .addReg(0)
31053 .addImm(4)
31054 .addReg(IReg)
31055 .addJumpTableIndex(MJTI)
31056 .addReg(0);
31057 }
31058
31059 // Add the jump table entries as successors to the MBB.
31060 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
31061 for (auto &LP : LPadList)
31062 if (SeenMBBs.insert(LP).second)
31063 DispContBB->addSuccessor(LP);
31064
31065 // N.B. the order the invoke BBs are processed in doesn't matter here.
31066 SmallVector<MachineBasicBlock *, 64> MBBLPads;
31067 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
31068 for (MachineBasicBlock *MBB : InvokeBBs) {
31069 // Remove the landing pad successor from the invoke block and replace it
31070 // with the new dispatch block.
31071 // Keep a copy of Successors since it's modified inside the loop.
31072 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
31073 MBB->succ_rend());
31074 // FIXME: Avoid quadratic complexity.
31075 for (auto MBBS : Successors) {
31076 if (MBBS->isEHPad()) {
31077 MBB->removeSuccessor(MBBS);
31078 MBBLPads.push_back(MBBS);
31079 }
31080 }
31081
31082 MBB->addSuccessor(DispatchBB);
31083
31084 // Find the invoke call and mark all of the callee-saved registers as
31085 // 'implicit defined' so that they're spilled. This prevents code from
31086 // moving instructions to before the EH block, where they will never be
31087 // executed.
31088 for (auto &II : reverse(*MBB)) {
31089 if (!II.isCall())
31090 continue;
31091
31092 DenseMap<unsigned, bool> DefRegs;
31093 for (auto &MOp : II.operands())
31094 if (MOp.isReg())
31095 DefRegs[MOp.getReg()] = true;
31096
31097 MachineInstrBuilder MIB(*MF, &II);
31098 for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
31099 unsigned Reg = SavedRegs[RegIdx];
31100 if (!DefRegs[Reg])
31101 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
31102 }
31103
31104 break;
31105 }
31106 }
31107
31108 // Mark all former landing pads as non-landing pads. The dispatch is the only
31109 // landing pad now.
31110 for (auto &LP : MBBLPads)
31111 LP->setIsEHPad(false);
31112
31113 // The instruction is gone now.
31114 MI.eraseFromParent();
31115 return BB;
31116}
31117
31118MachineBasicBlock *
31119X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
31120 MachineBasicBlock *BB) const {
31121 MachineFunction *MF = BB->getParent();
31122 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31123 DebugLoc DL = MI.getDebugLoc();
31124
31125 switch (MI.getOpcode()) {
31126 default: llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31126)
;
31127 case X86::TLS_addr32:
31128 case X86::TLS_addr64:
31129 case X86::TLS_base_addr32:
31130 case X86::TLS_base_addr64:
31131 return EmitLoweredTLSAddr(MI, BB);
31132 case X86::RETPOLINE_CALL32:
31133 case X86::RETPOLINE_CALL64:
31134 case X86::RETPOLINE_TCRETURN32:
31135 case X86::RETPOLINE_TCRETURN64:
31136 return EmitLoweredRetpoline(MI, BB);
31137 case X86::CATCHRET:
31138 return EmitLoweredCatchRet(MI, BB);
31139 case X86::CATCHPAD:
31140 return EmitLoweredCatchPad(MI, BB);
31141 case X86::SEG_ALLOCA_32:
31142 case X86::SEG_ALLOCA_64:
31143 return EmitLoweredSegAlloca(MI, BB);
31144 case X86::TLSCall_32:
31145 case X86::TLSCall_64:
31146 return EmitLoweredTLSCall(MI, BB);
31147 case X86::CMOV_FR32:
31148 case X86::CMOV_FR32X:
31149 case X86::CMOV_FR64:
31150 case X86::CMOV_FR64X:
31151 case X86::CMOV_GR8:
31152 case X86::CMOV_GR16:
31153 case X86::CMOV_GR32:
31154 case X86::CMOV_RFP32:
31155 case X86::CMOV_RFP64:
31156 case X86::CMOV_RFP80:
31157 case X86::CMOV_VR128:
31158 case X86::CMOV_VR128X:
31159 case X86::CMOV_VR256:
31160 case X86::CMOV_VR256X:
31161 case X86::CMOV_VR512:
31162 case X86::CMOV_VK2:
31163 case X86::CMOV_VK4:
31164 case X86::CMOV_VK8:
31165 case X86::CMOV_VK16:
31166 case X86::CMOV_VK32:
31167 case X86::CMOV_VK64:
31168 return EmitLoweredSelect(MI, BB);
31169
31170 case X86::RDFLAGS32:
31171 case X86::RDFLAGS64: {
31172 unsigned PushF =
31173 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
31174 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
31175 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
31176 // Permit reads of the EFLAGS and DF registers without them being defined.
31177 // This intrinsic exists to read external processor state in flags, such as
31178 // the trap flag, interrupt flag, and direction flag, none of which are
31179 // modeled by the backend.
31180 assert(Push->getOperand(2).getReg() == X86::EFLAGS &&((Push->getOperand(2).getReg() == X86::EFLAGS && "Unexpected register in operand!"
) ? static_cast<void> (0) : __assert_fail ("Push->getOperand(2).getReg() == X86::EFLAGS && \"Unexpected register in operand!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31181, __PRETTY_FUNCTION__))
31181 "Unexpected register in operand!")((Push->getOperand(2).getReg() == X86::EFLAGS && "Unexpected register in operand!"
) ? static_cast<void> (0) : __assert_fail ("Push->getOperand(2).getReg() == X86::EFLAGS && \"Unexpected register in operand!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31181, __PRETTY_FUNCTION__))
;
31182 Push->getOperand(2).setIsUndef();
31183 assert(Push->getOperand(3).getReg() == X86::DF &&((Push->getOperand(3).getReg() == X86::DF && "Unexpected register in operand!"
) ? static_cast<void> (0) : __assert_fail ("Push->getOperand(3).getReg() == X86::DF && \"Unexpected register in operand!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31184, __PRETTY_FUNCTION__))
31184 "Unexpected register in operand!")((Push->getOperand(3).getReg() == X86::DF && "Unexpected register in operand!"
) ? static_cast<void> (0) : __assert_fail ("Push->getOperand(3).getReg() == X86::DF && \"Unexpected register in operand!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31184, __PRETTY_FUNCTION__))
;
31185 Push->getOperand(3).setIsUndef();
31186 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
31187
31188 MI.eraseFromParent(); // The pseudo is gone now.
31189 return BB;
31190 }
31191
31192 case X86::WRFLAGS32:
31193 case X86::WRFLAGS64: {
31194 unsigned Push =
31195 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
31196 unsigned PopF =
31197 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
31198 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
31199 BuildMI(*BB, MI, DL, TII->get(PopF));
31200
31201 MI.eraseFromParent(); // The pseudo is gone now.
31202 return BB;
31203 }
31204
31205 case X86::FP32_TO_INT16_IN_MEM:
31206 case X86::FP32_TO_INT32_IN_MEM:
31207 case X86::FP32_TO_INT64_IN_MEM:
31208 case X86::FP64_TO_INT16_IN_MEM:
31209 case X86::FP64_TO_INT32_IN_MEM:
31210 case X86::FP64_TO_INT64_IN_MEM:
31211 case X86::FP80_TO_INT16_IN_MEM:
31212 case X86::FP80_TO_INT32_IN_MEM:
31213 case X86::FP80_TO_INT64_IN_MEM: {
31214 // Change the floating point control register to use "round towards zero"
31215 // mode when truncating to an integer value.
31216 int OrigCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
31217 addFrameReference(BuildMI(*BB, MI, DL,
31218 TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
31219
31220 // Load the old value of the control word...
31221 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
31222 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
31223 OrigCWFrameIdx);
31224
31225 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
31226 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
31227 BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
31228 .addReg(OldCW, RegState::Kill).addImm(0xC00);
31229
31230 // Extract to 16 bits.
31231 Register NewCW16 =
31232 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
31233 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
31234 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
31235
31236 // Prepare memory for FLDCW.
31237 int NewCWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false);
31238 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
31239 NewCWFrameIdx)
31240 .addReg(NewCW16, RegState::Kill);
31241
31242 // Reload the modified control word now...
31243 addFrameReference(BuildMI(*BB, MI, DL,
31244 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
31245
31246 // Get the X86 opcode to use.
31247 unsigned Opc;
31248 switch (MI.getOpcode()) {
31249 default: llvm_unreachable("illegal opcode!")::llvm::llvm_unreachable_internal("illegal opcode!", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31249)
;
31250 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
31251 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
31252 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
31253 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
31254 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
31255 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
31256 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
31257 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
31258 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
31259 }
31260
31261 X86AddressMode AM = getAddressFromInstr(&MI, 0);
31262 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
31263 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
31264
31265 // Reload the original control word now.
31266 addFrameReference(BuildMI(*BB, MI, DL,
31267 TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
31268
31269 MI.eraseFromParent(); // The pseudo instruction is gone now.
31270 return BB;
31271 }
31272
31273 // xbegin
31274 case X86::XBEGIN:
31275 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
31276
31277 case X86::VASTART_SAVE_XMM_REGS:
31278 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
31279
31280 case X86::VAARG_64:
31281 return EmitVAARG64WithCustomInserter(MI, BB);
31282
31283 case X86::EH_SjLj_SetJmp32:
31284 case X86::EH_SjLj_SetJmp64:
31285 return emitEHSjLjSetJmp(MI, BB);
31286
31287 case X86::EH_SjLj_LongJmp32:
31288 case X86::EH_SjLj_LongJmp64:
31289 return emitEHSjLjLongJmp(MI, BB);
31290
31291 case X86::Int_eh_sjlj_setup_dispatch:
31292 return EmitSjLjDispatchBlock(MI, BB);
31293
31294 case TargetOpcode::STATEPOINT:
31295 // As an implementation detail, STATEPOINT shares the STACKMAP format at
31296 // this point in the process. We diverge later.
31297 return emitPatchPoint(MI, BB);
31298
31299 case TargetOpcode::STACKMAP:
31300 case TargetOpcode::PATCHPOINT:
31301 return emitPatchPoint(MI, BB);
31302
31303 case TargetOpcode::PATCHABLE_EVENT_CALL:
31304 return emitXRayCustomEvent(MI, BB);
31305
31306 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
31307 return emitXRayTypedEvent(MI, BB);
31308
31309 case X86::LCMPXCHG8B: {
31310 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
31311 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
31312 // requires a memory operand. If it happens that current architecture is
31313 // i686 and for current function we need a base pointer
31314 // - which is ESI for i686 - register allocator would not be able to
31315 // allocate registers for an address in form of X(%reg, %reg, Y)
31316 // - there never would be enough unreserved registers during regalloc
31317 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
31318 // We are giving a hand to register allocator by precomputing the address in
31319 // a new vreg using LEA.
31320
31321 // If it is not i686 or there is no base pointer - nothing to do here.
31322 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
31323 return BB;
31324
31325 // Even though this code does not necessarily needs the base pointer to
31326 // be ESI, we check for that. The reason: if this assert fails, there are
31327 // some changes happened in the compiler base pointer handling, which most
31328 // probably have to be addressed somehow here.
31329 assert(TRI->getBaseRegister() == X86::ESI &&((TRI->getBaseRegister() == X86::ESI && "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
"base pointer in mind") ? static_cast<void> (0) : __assert_fail
("TRI->getBaseRegister() == X86::ESI && \"LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a \" \"base pointer in mind\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31331, __PRETTY_FUNCTION__))
31330 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "((TRI->getBaseRegister() == X86::ESI && "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
"base pointer in mind") ? static_cast<void> (0) : __assert_fail
("TRI->getBaseRegister() == X86::ESI && \"LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a \" \"base pointer in mind\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31331, __PRETTY_FUNCTION__))
31331 "base pointer in mind")((TRI->getBaseRegister() == X86::ESI && "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
"base pointer in mind") ? static_cast<void> (0) : __assert_fail
("TRI->getBaseRegister() == X86::ESI && \"LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a \" \"base pointer in mind\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31331, __PRETTY_FUNCTION__))
;
31332
31333 MachineRegisterInfo &MRI = MF->getRegInfo();
31334 MVT SPTy = getPointerTy(MF->getDataLayout());
31335 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
31336 Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
31337
31338 X86AddressMode AM = getAddressFromInstr(&MI, 0);
31339 // Regalloc does not need any help when the memory operand of CMPXCHG8B
31340 // does not use index register.
31341 if (AM.IndexReg == X86::NoRegister)
31342 return BB;
31343
31344 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
31345 // four operand definitions that are E[ABCD] registers. We skip them and
31346 // then insert the LEA.
31347 MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
31348 while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
31349 RMBBI->definesRegister(X86::EBX) ||
31350 RMBBI->definesRegister(X86::ECX) ||
31351 RMBBI->definesRegister(X86::EDX))) {
31352 ++RMBBI;
31353 }
31354 MachineBasicBlock::iterator MBBI(RMBBI);
31355 addFullAddress(
31356 BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
31357
31358 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
31359
31360 return BB;
31361 }
31362 case X86::LCMPXCHG16B:
31363 return BB;
31364 case X86::LCMPXCHG8B_SAVE_EBX:
31365 case X86::LCMPXCHG16B_SAVE_RBX: {
31366 unsigned BasePtr =
31367 MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
31368 if (!BB->isLiveIn(BasePtr))
31369 BB->addLiveIn(BasePtr);
31370 return BB;
31371 }
31372 }
31373}
31374
31375//===----------------------------------------------------------------------===//
31376// X86 Optimization Hooks
31377//===----------------------------------------------------------------------===//
31378
31379bool
31380X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
31381 const APInt &Demanded,
31382 TargetLoweringOpt &TLO) const {
31383 // Only optimize Ands to prevent shrinking a constant that could be
31384 // matched by movzx.
31385 if (Op.getOpcode() != ISD::AND)
31386 return false;
31387
31388 EVT VT = Op.getValueType();
31389
31390 // Ignore vectors.
31391 if (VT.isVector())
31392 return false;
31393
31394 unsigned Size = VT.getSizeInBits();
31395
31396 // Make sure the RHS really is a constant.
31397 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
31398 if (!C)
31399 return false;
31400
31401 const APInt &Mask = C->getAPIntValue();
31402
31403 // Clear all non-demanded bits initially.
31404 APInt ShrunkMask = Mask & Demanded;
31405
31406 // Find the width of the shrunk mask.
31407 unsigned Width = ShrunkMask.getActiveBits();
31408
31409 // If the mask is all 0s there's nothing to do here.
31410 if (Width == 0)
31411 return false;
31412
31413 // Find the next power of 2 width, rounding up to a byte.
31414 Width = PowerOf2Ceil(std::max(Width, 8U));
31415 // Truncate the width to size to handle illegal types.
31416 Width = std::min(Width, Size);
31417
31418 // Calculate a possible zero extend mask for this constant.
31419 APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width);
31420
31421 // If we aren't changing the mask, just return true to keep it and prevent
31422 // the caller from optimizing.
31423 if (ZeroExtendMask == Mask)
31424 return true;
31425
31426 // Make sure the new mask can be represented by a combination of mask bits
31427 // and non-demanded bits.
31428 if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded))
31429 return false;
31430
31431 // Replace the constant with the zero extend mask.
31432 SDLoc DL(Op);
31433 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
31434 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
31435 return TLO.CombineTo(Op, NewOp);
31436}
31437
31438void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
31439 KnownBits &Known,
31440 const APInt &DemandedElts,
31441 const SelectionDAG &DAG,
31442 unsigned Depth) const {
31443 unsigned BitWidth = Known.getBitWidth();
31444 unsigned Opc = Op.getOpcode();
31445 EVT VT = Op.getValueType();
31446 assert((Opc >= ISD::BUILTIN_OP_END ||(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN
|| Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID
) && "Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!") ? static_cast<void> (0) : __assert_fail
("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31451, __PRETTY_FUNCTION__))
31447 Opc == ISD::INTRINSIC_WO_CHAIN ||(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN
|| Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID
) && "Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!") ? static_cast<void> (0) : __assert_fail
("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31451, __PRETTY_FUNCTION__))
31448 Opc == ISD::INTRINSIC_W_CHAIN ||(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN
|| Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID
) && "Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!") ? static_cast<void> (0) : __assert_fail
("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31451, __PRETTY_FUNCTION__))
31449 Opc == ISD::INTRINSIC_VOID) &&(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN
|| Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID
) && "Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!") ? static_cast<void> (0) : __assert_fail
("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31451, __PRETTY_FUNCTION__))
31450 "Should use MaskedValueIsZero if you don't know whether Op"(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN
|| Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID
) && "Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!") ? static_cast<void> (0) : __assert_fail
("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31451, __PRETTY_FUNCTION__))
31451 " is a target node!")(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN
|| Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID
) && "Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!") ? static_cast<void> (0) : __assert_fail
("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31451, __PRETTY_FUNCTION__))
;
31452
31453 Known.resetAll();
31454 switch (Opc) {
31455 default: break;
31456 case X86ISD::SETCC:
31457 Known.Zero.setBitsFrom(1);
31458 break;
31459 case X86ISD::MOVMSK: {
31460 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
31461 Known.Zero.setBitsFrom(NumLoBits);
31462 break;
31463 }
31464 case X86ISD::PEXTRB:
31465 case X86ISD::PEXTRW: {
31466 SDValue Src = Op.getOperand(0);
31467 EVT SrcVT = Src.getValueType();
31468 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
31469 Op.getConstantOperandVal(1));
31470 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
31471 Known = Known.zextOrTrunc(BitWidth, false);
31472 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
31473 break;
31474 }
31475 case X86ISD::VSRAI:
31476 case X86ISD::VSHLI:
31477 case X86ISD::VSRLI: {
31478 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
31479 if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
31480 Known.setAllZero();
31481 break;
31482 }
31483
31484 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31485 unsigned ShAmt = ShiftImm->getZExtValue();
31486 if (Opc == X86ISD::VSHLI) {
31487 Known.Zero <<= ShAmt;
31488 Known.One <<= ShAmt;
31489 // Low bits are known zero.
31490 Known.Zero.setLowBits(ShAmt);
31491 } else if (Opc == X86ISD::VSRLI) {
31492 Known.Zero.lshrInPlace(ShAmt);
31493 Known.One.lshrInPlace(ShAmt);
31494 // High bits are known zero.
31495 Known.Zero.setHighBits(ShAmt);
31496 } else {
31497 Known.Zero.ashrInPlace(ShAmt);
31498 Known.One.ashrInPlace(ShAmt);
31499 }
31500 }
31501 break;
31502 }
31503 case X86ISD::PACKUS: {
31504 // PACKUS is just a truncation if the upper half is zero.
31505 APInt DemandedLHS, DemandedRHS;
31506 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
31507
31508 Known.One = APInt::getAllOnesValue(BitWidth * 2);
31509 Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
31510
31511 KnownBits Known2;
31512 if (!!DemandedLHS) {
31513 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
31514 Known.One &= Known2.One;
31515 Known.Zero &= Known2.Zero;
31516 }
31517 if (!!DemandedRHS) {
31518 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
31519 Known.One &= Known2.One;
31520 Known.Zero &= Known2.Zero;
31521 }
31522
31523 if (Known.countMinLeadingZeros() < BitWidth)
31524 Known.resetAll();
31525 Known = Known.trunc(BitWidth);
31526 break;
31527 }
31528 case X86ISD::ANDNP: {
31529 KnownBits Known2;
31530 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
31531 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31532
31533 // ANDNP = (~X & Y);
31534 Known.One &= Known2.Zero;
31535 Known.Zero |= Known2.One;
31536 break;
31537 }
31538 case X86ISD::FOR: {
31539 KnownBits Known2;
31540 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
31541 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
31542
31543 // Output known-0 bits are only known if clear in both the LHS & RHS.
31544 Known.Zero &= Known2.Zero;
31545 // Output known-1 are known to be set if set in either the LHS | RHS.
31546 Known.One |= Known2.One;
31547 break;
31548 }
31549 case X86ISD::PSADBW: {
31550 assert(VT.getScalarType() == MVT::i64 &&((VT.getScalarType() == MVT::i64 && Op.getOperand(0).
getValueType().getScalarType() == MVT::i8 && "Unexpected PSADBW types"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i64 && Op.getOperand(0).getValueType().getScalarType() == MVT::i8 && \"Unexpected PSADBW types\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31552, __PRETTY_FUNCTION__))
31551 Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&((VT.getScalarType() == MVT::i64 && Op.getOperand(0).
getValueType().getScalarType() == MVT::i8 && "Unexpected PSADBW types"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i64 && Op.getOperand(0).getValueType().getScalarType() == MVT::i8 && \"Unexpected PSADBW types\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31552, __PRETTY_FUNCTION__))
31552 "Unexpected PSADBW types")((VT.getScalarType() == MVT::i64 && Op.getOperand(0).
getValueType().getScalarType() == MVT::i8 && "Unexpected PSADBW types"
) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i64 && Op.getOperand(0).getValueType().getScalarType() == MVT::i8 && \"Unexpected PSADBW types\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31552, __PRETTY_FUNCTION__))
;
31553
31554 // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
31555 Known.Zero.setBitsFrom(16);
31556 break;
31557 }
31558 case X86ISD::CMOV: {
31559 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
31560 // If we don't know any bits, early out.
31561 if (Known.isUnknown())
31562 break;
31563 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
31564
31565 // Only known if known in both the LHS and RHS.
31566 Known.One &= Known2.One;
31567 Known.Zero &= Known2.Zero;
31568 break;
31569 }
31570 }
31571
31572 // Handle target shuffles.
31573 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
31574 if (isTargetShuffle(Opc)) {
31575 bool IsUnary;
31576 SmallVector<int, 64> Mask;
31577 SmallVector<SDValue, 2> Ops;
31578 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
31579 IsUnary)) {
31580 unsigned NumOps = Ops.size();
31581 unsigned NumElts = VT.getVectorNumElements();
31582 if (Mask.size() == NumElts) {
31583 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
31584 Known.Zero.setAllBits(); Known.One.setAllBits();
31585 for (unsigned i = 0; i != NumElts; ++i) {
31586 if (!DemandedElts[i])
31587 continue;
31588 int M = Mask[i];
31589 if (M == SM_SentinelUndef) {
31590 // For UNDEF elements, we don't know anything about the common state
31591 // of the shuffle result.
31592 Known.resetAll();
31593 break;
31594 } else if (M == SM_SentinelZero) {
31595 Known.One.clearAllBits();
31596 continue;
31597 }
31598 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&((0 <= M && (unsigned)M < (NumOps * NumElts) &&
"Shuffle index out of range") ? static_cast<void> (0) :
__assert_fail ("0 <= M && (unsigned)M < (NumOps * NumElts) && \"Shuffle index out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31599, __PRETTY_FUNCTION__))
31599 "Shuffle index out of range")((0 <= M && (unsigned)M < (NumOps * NumElts) &&
"Shuffle index out of range") ? static_cast<void> (0) :
__assert_fail ("0 <= M && (unsigned)M < (NumOps * NumElts) && \"Shuffle index out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31599, __PRETTY_FUNCTION__))
;
31600
31601 unsigned OpIdx = (unsigned)M / NumElts;
31602 unsigned EltIdx = (unsigned)M % NumElts;
31603 if (Ops[OpIdx].getValueType() != VT) {
31604 // TODO - handle target shuffle ops with different value types.
31605 Known.resetAll();
31606 break;
31607 }
31608 DemandedOps[OpIdx].setBit(EltIdx);
31609 }
31610 // Known bits are the values that are shared by every demanded element.
31611 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
31612 if (!DemandedOps[i])
31613 continue;
31614 KnownBits Known2 =
31615 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
31616 Known.One &= Known2.One;
31617 Known.Zero &= Known2.Zero;
31618 }
31619 }
31620 }
31621 }
31622}
31623
31624unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
31625 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
31626 unsigned Depth) const {
31627 EVT VT = Op.getValueType();
31628 unsigned VTBits = VT.getScalarSizeInBits();
31629 unsigned Opcode = Op.getOpcode();
31630 switch (Opcode) {
31631 case X86ISD::SETCC_CARRY:
31632 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
31633 return VTBits;
31634
31635 case X86ISD::VTRUNC: {
31636 // TODO: Add DemandedElts support.
31637 SDValue Src = Op.getOperand(0);
31638 unsigned NumSrcBits = Src.getScalarValueSizeInBits();
31639 assert(VTBits < NumSrcBits && "Illegal truncation input type")((VTBits < NumSrcBits && "Illegal truncation input type"
) ? static_cast<void> (0) : __assert_fail ("VTBits < NumSrcBits && \"Illegal truncation input type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31639, __PRETTY_FUNCTION__))
;
31640 unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1);
31641 if (Tmp > (NumSrcBits - VTBits))
31642 return Tmp - (NumSrcBits - VTBits);
31643 return 1;
31644 }
31645
31646 case X86ISD::PACKSS: {
31647 // PACKSS is just a truncation if the sign bits extend to the packed size.
31648 APInt DemandedLHS, DemandedRHS;
31649 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
31650 DemandedRHS);
31651
31652 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
31653 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
31654 if (!!DemandedLHS)
31655 Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
31656 if (!!DemandedRHS)
31657 Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
31658 unsigned Tmp = std::min(Tmp0, Tmp1);
31659 if (Tmp > (SrcBits - VTBits))
31660 return Tmp - (SrcBits - VTBits);
31661 return 1;
31662 }
31663
31664 case X86ISD::VSHLI: {
31665 SDValue Src = Op.getOperand(0);
31666 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
31667 if (ShiftVal.uge(VTBits))
31668 return VTBits; // Shifted all bits out --> zero.
31669 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
31670 if (ShiftVal.uge(Tmp))
31671 return 1; // Shifted all sign bits out --> unknown.
31672 return Tmp - ShiftVal.getZExtValue();
31673 }
31674
31675 case X86ISD::VSRAI: {
31676 SDValue Src = Op.getOperand(0);
31677 APInt ShiftVal = Op.getConstantOperandAPInt(1);
31678 if (ShiftVal.uge(VTBits - 1))
31679 return VTBits; // Sign splat.
31680 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
31681 ShiftVal += Tmp;
31682 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
31683 }
31684
31685 case X86ISD::PCMPGT:
31686 case X86ISD::PCMPEQ:
31687 case X86ISD::CMPP:
31688 case X86ISD::VPCOM:
31689 case X86ISD::VPCOMU:
31690 // Vector compares return zero/all-bits result values.
31691 return VTBits;
31692
31693 case X86ISD::ANDNP: {
31694 unsigned Tmp0 =
31695 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
31696 if (Tmp0 == 1) return 1; // Early out.
31697 unsigned Tmp1 =
31698 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
31699 return std::min(Tmp0, Tmp1);
31700 }
31701
31702 case X86ISD::CMOV: {
31703 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
31704 if (Tmp0 == 1) return 1; // Early out.
31705 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
31706 return std::min(Tmp0, Tmp1);
31707 }
31708 }
31709
31710 // Handle target shuffles.
31711 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
31712 if (isTargetShuffle(Opcode)) {
31713 bool IsUnary;
31714 SmallVector<int, 64> Mask;
31715 SmallVector<SDValue, 2> Ops;
31716 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
31717 IsUnary)) {
31718 unsigned NumOps = Ops.size();
31719 unsigned NumElts = VT.getVectorNumElements();
31720 if (Mask.size() == NumElts) {
31721 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
31722 for (unsigned i = 0; i != NumElts; ++i) {
31723 if (!DemandedElts[i])
31724 continue;
31725 int M = Mask[i];
31726 if (M == SM_SentinelUndef) {
31727 // For UNDEF elements, we don't know anything about the common state
31728 // of the shuffle result.
31729 return 1;
31730 } else if (M == SM_SentinelZero) {
31731 // Zero = all sign bits.
31732 continue;
31733 }
31734 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&((0 <= M && (unsigned)M < (NumOps * NumElts) &&
"Shuffle index out of range") ? static_cast<void> (0) :
__assert_fail ("0 <= M && (unsigned)M < (NumOps * NumElts) && \"Shuffle index out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31735, __PRETTY_FUNCTION__))
31735 "Shuffle index out of range")((0 <= M && (unsigned)M < (NumOps * NumElts) &&
"Shuffle index out of range") ? static_cast<void> (0) :
__assert_fail ("0 <= M && (unsigned)M < (NumOps * NumElts) && \"Shuffle index out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31735, __PRETTY_FUNCTION__))
;
31736
31737 unsigned OpIdx = (unsigned)M / NumElts;
31738 unsigned EltIdx = (unsigned)M % NumElts;
31739 if (Ops[OpIdx].getValueType() != VT) {
31740 // TODO - handle target shuffle ops with different value types.
31741 return 1;
31742 }
31743 DemandedOps[OpIdx].setBit(EltIdx);
31744 }
31745 unsigned Tmp0 = VTBits;
31746 for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
31747 if (!DemandedOps[i])
31748 continue;
31749 unsigned Tmp1 =
31750 DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
31751 Tmp0 = std::min(Tmp0, Tmp1);
31752 }
31753 return Tmp0;
31754 }
31755 }
31756 }
31757
31758 // Fallback case.
31759 return 1;
31760}
31761
31762SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
31763 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
31764 return N->getOperand(0);
31765 return N;
31766}
31767
31768// Attempt to match a combined shuffle mask against supported unary shuffle
31769// instructions.
31770// TODO: Investigate sharing more of this with shuffle lowering.
31771static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
31772 bool AllowFloatDomain, bool AllowIntDomain,
31773 SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
31774 const X86Subtarget &Subtarget, unsigned &Shuffle,
31775 MVT &SrcVT, MVT &DstVT) {
31776 unsigned NumMaskElts = Mask.size();
31777 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
31778
31779 // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
31780 if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
31781 isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
31782 Shuffle = X86ISD::VZEXT_MOVL;
31783 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
31784 return true;
31785 }
31786
31787 // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
31788 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
31789 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
31790 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
31791 unsigned MaxScale = 64 / MaskEltSize;
31792 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
31793 bool MatchAny = true;
31794 bool MatchZero = true;
31795 unsigned NumDstElts = NumMaskElts / Scale;
31796 for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
31797 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
31798 MatchAny = MatchZero = false;
31799 break;
31800 }
31801 MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
31802 MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
31803 }
31804 if (MatchAny || MatchZero) {
31805 assert(MatchZero && "Failed to match zext but matched aext?")((MatchZero && "Failed to match zext but matched aext?"
) ? static_cast<void> (0) : __assert_fail ("MatchZero && \"Failed to match zext but matched aext?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31805, __PRETTY_FUNCTION__))
;
31806 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
31807 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
31808 MVT::getIntegerVT(MaskEltSize);
31809 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
31810
31811 if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
31812 V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
31813
31814 Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
31815 if (SrcVT.getVectorNumElements() != NumDstElts)
31816 Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
31817
31818 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
31819 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
31820 return true;
31821 }
31822 }
31823 }
31824
31825 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
31826 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
31827 isUndefOrEqual(Mask[0], 0) &&
31828 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
31829 Shuffle = X86ISD::VZEXT_MOVL;
31830 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
31831 return true;
31832 }
31833
31834 // Check if we have SSE3 which will let us use MOVDDUP etc. The
31835 // instructions are no slower than UNPCKLPD but has the option to
31836 // fold the input operand into even an unaligned memory load.
31837 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
31838 if (isTargetShuffleEquivalent(Mask, {0, 0})) {
31839 Shuffle = X86ISD::MOVDDUP;
31840 SrcVT = DstVT = MVT::v2f64;
31841 return true;
31842 }
31843 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
31844 Shuffle = X86ISD::MOVSLDUP;
31845 SrcVT = DstVT = MVT::v4f32;
31846 return true;
31847 }
31848 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
31849 Shuffle = X86ISD::MOVSHDUP;
31850 SrcVT = DstVT = MVT::v4f32;
31851 return true;
31852 }
31853 }
31854
31855 if (MaskVT.is256BitVector() && AllowFloatDomain) {
31856 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles")((Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX() && \"AVX required for 256-bit vector shuffles\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31856, __PRETTY_FUNCTION__))
;
31857 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
31858 Shuffle = X86ISD::MOVDDUP;
31859 SrcVT = DstVT = MVT::v4f64;
31860 return true;
31861 }
31862 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
31863 Shuffle = X86ISD::MOVSLDUP;
31864 SrcVT = DstVT = MVT::v8f32;
31865 return true;
31866 }
31867 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
31868 Shuffle = X86ISD::MOVSHDUP;
31869 SrcVT = DstVT = MVT::v8f32;
31870 return true;
31871 }
31872 }
31873
31874 if (MaskVT.is512BitVector() && AllowFloatDomain) {
31875 assert(Subtarget.hasAVX512() &&((Subtarget.hasAVX512() && "AVX512 required for 512-bit vector shuffles"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"AVX512 required for 512-bit vector shuffles\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31876, __PRETTY_FUNCTION__))
31876 "AVX512 required for 512-bit vector shuffles")((Subtarget.hasAVX512() && "AVX512 required for 512-bit vector shuffles"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"AVX512 required for 512-bit vector shuffles\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31876, __PRETTY_FUNCTION__))
;
31877 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
31878 Shuffle = X86ISD::MOVDDUP;
31879 SrcVT = DstVT = MVT::v8f64;
31880 return true;
31881 }
31882 if (isTargetShuffleEquivalent(
31883 Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
31884 Shuffle = X86ISD::MOVSLDUP;
31885 SrcVT = DstVT = MVT::v16f32;
31886 return true;
31887 }
31888 if (isTargetShuffleEquivalent(
31889 Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
31890 Shuffle = X86ISD::MOVSHDUP;
31891 SrcVT = DstVT = MVT::v16f32;
31892 return true;
31893 }
31894 }
31895
31896 return false;
31897}
31898
31899// Attempt to match a combined shuffle mask against supported unary immediate
31900// permute instructions.
31901// TODO: Investigate sharing more of this with shuffle lowering.
31902static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
31903 const APInt &Zeroable,
31904 bool AllowFloatDomain, bool AllowIntDomain,
31905 const X86Subtarget &Subtarget,
31906 unsigned &Shuffle, MVT &ShuffleVT,
31907 unsigned &PermuteImm) {
31908 unsigned NumMaskElts = Mask.size();
31909 unsigned InputSizeInBits = MaskVT.getSizeInBits();
31910 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
31911 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
31912
31913 bool ContainsZeros =
31914 llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
31915
31916 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
31917 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
31918 // Check for lane crossing permutes.
31919 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
31920 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
31921 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
31922 Shuffle = X86ISD::VPERMI;
31923 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
31924 PermuteImm = getV4X86ShuffleImm(Mask);
31925 return true;
31926 }
31927 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
31928 SmallVector<int, 4> RepeatedMask;
31929 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
31930 Shuffle = X86ISD::VPERMI;
31931 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
31932 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
31933 return true;
31934 }
31935 }
31936 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
31937 // VPERMILPD can permute with a non-repeating shuffle.
31938 Shuffle = X86ISD::VPERMILPI;
31939 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
31940 PermuteImm = 0;
31941 for (int i = 0, e = Mask.size(); i != e; ++i) {
31942 int M = Mask[i];
31943 if (M == SM_SentinelUndef)
31944 continue;
31945 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index")((((M / 2) == (i / 2)) && "Out of range shuffle mask index"
) ? static_cast<void> (0) : __assert_fail ("((M / 2) == (i / 2)) && \"Out of range shuffle mask index\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 31945, __PRETTY_FUNCTION__))
;
31946 PermuteImm |= (M & 1) << i;
31947 }
31948 return true;
31949 }
31950 }
31951
31952 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
31953 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
31954 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
31955 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
31956 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
31957 SmallVector<int, 4> RepeatedMask;
31958 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
31959 // Narrow the repeated mask to create 32-bit element permutes.
31960 SmallVector<int, 4> WordMask = RepeatedMask;
31961 if (MaskScalarSizeInBits == 64)
31962 scaleShuffleMask<int>(2, RepeatedMask, WordMask);
31963
31964 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
31965 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
31966 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
31967 PermuteImm = getV4X86ShuffleImm(WordMask);
31968 return true;
31969 }
31970 }
31971
31972 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
31973 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
31974 SmallVector<int, 4> RepeatedMask;
31975 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
31976 ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
31977 ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
31978
31979 // PSHUFLW: permute lower 4 elements only.
31980 if (isUndefOrInRange(LoMask, 0, 4) &&
31981 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
31982 Shuffle = X86ISD::PSHUFLW;
31983 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
31984 PermuteImm = getV4X86ShuffleImm(LoMask);
31985 return true;
31986 }
31987
31988 // PSHUFHW: permute upper 4 elements only.
31989 if (isUndefOrInRange(HiMask, 4, 8) &&
31990 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
31991 // Offset the HiMask so that we can create the shuffle immediate.
31992 int OffsetHiMask[4];
31993 for (int i = 0; i != 4; ++i)
31994 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
31995
31996 Shuffle = X86ISD::PSHUFHW;
31997 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
31998 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
31999 return true;
32000 }
32001 }
32002 }
32003
32004 // Attempt to match against byte/bit shifts.
32005 // FIXME: Add 512-bit support.
32006 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
32007 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
32008 int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
32009 Mask, 0, Zeroable, Subtarget);
32010 if (0 < ShiftAmt) {
32011 PermuteImm = (unsigned)ShiftAmt;
32012 return true;
32013 }
32014 }
32015
32016 return false;
32017}
32018
32019// Attempt to match a combined unary shuffle mask against supported binary
32020// shuffle instructions.
32021// TODO: Investigate sharing more of this with shuffle lowering.
32022static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
32023 bool AllowFloatDomain, bool AllowIntDomain,
32024 SDValue &V1, SDValue &V2, const SDLoc &DL,
32025 SelectionDAG &DAG, const X86Subtarget &Subtarget,
32026 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
32027 bool IsUnary) {
32028 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
32029
32030 if (MaskVT.is128BitVector()) {
32031 if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
32032 V2 = V1;
32033 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
32034 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
32035 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
32036 return true;
32037 }
32038 if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
32039 V2 = V1;
32040 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
32041 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
32042 return true;
32043 }
32044 if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
32045 (AllowFloatDomain || !Subtarget.hasSSE41())) {
32046 std::swap(V1, V2);
32047 Shuffle = X86ISD::MOVSD;
32048 SrcVT = DstVT = MVT::v2f64;
32049 return true;
32050 }
32051 if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
32052 (AllowFloatDomain || !Subtarget.hasSSE41())) {
32053 Shuffle = X86ISD::MOVSS;
32054 SrcVT = DstVT = MVT::v4f32;
32055 return true;
32056 }
32057 }
32058
32059 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
32060 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
32061 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
32062 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
32063 if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
32064 Subtarget)) {
32065 DstVT = MaskVT;
32066 return true;
32067 }
32068 }
32069
32070 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
32071 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
32072 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
32073 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
32074 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
32075 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
32076 if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL,
32077 DAG, Subtarget)) {
32078 SrcVT = DstVT = MaskVT;
32079 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
32080 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
32081 return true;
32082 }
32083 }
32084
32085 return false;
32086}
32087
32088static bool matchBinaryPermuteShuffle(
32089 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
32090 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
32091 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
32092 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
32093 unsigned NumMaskElts = Mask.size();
32094 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
32095
32096 // Attempt to match against PALIGNR byte rotate.
32097 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
32098 (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) {
32099 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
32100 if (0 < ByteRotation) {
32101 Shuffle = X86ISD::PALIGNR;
32102 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
32103 PermuteImm = ByteRotation;
32104 return true;
32105 }
32106 }
32107
32108 // Attempt to combine to X86ISD::BLENDI.
32109 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
32110 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
32111 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
32112 uint64_t BlendMask = 0;
32113 bool ForceV1Zero = false, ForceV2Zero = false;
32114 SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
32115 if (matchVectorShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
32116 ForceV2Zero, BlendMask)) {
32117 if (MaskVT == MVT::v16i16) {
32118 // We can only use v16i16 PBLENDW if the lanes are repeated.
32119 SmallVector<int, 8> RepeatedMask;
32120 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
32121 RepeatedMask)) {
32122 assert(RepeatedMask.size() == 8 &&((RepeatedMask.size() == 8 && "Repeated mask size doesn't match!"
) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 8 && \"Repeated mask size doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32123, __PRETTY_FUNCTION__))
32123 "Repeated mask size doesn't match!")((RepeatedMask.size() == 8 && "Repeated mask size doesn't match!"
) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 8 && \"Repeated mask size doesn't match!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32123, __PRETTY_FUNCTION__))
;
32124 PermuteImm = 0;
32125 for (int i = 0; i < 8; ++i)
32126 if (RepeatedMask[i] >= 8)
32127 PermuteImm |= 1 << i;
32128 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
32129 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
32130 Shuffle = X86ISD::BLENDI;
32131 ShuffleVT = MaskVT;
32132 return true;
32133 }
32134 } else {
32135 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
32136 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
32137 PermuteImm = (unsigned)BlendMask;
32138 Shuffle = X86ISD::BLENDI;
32139 ShuffleVT = MaskVT;
32140 return true;
32141 }
32142 }
32143 }
32144
32145 // Attempt to combine to INSERTPS, but only if it has elements that need to
32146 // be set to zero.
32147 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
32148 MaskVT.is128BitVector() &&
32149 llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }) &&
32150 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
32151 Shuffle = X86ISD::INSERTPS;
32152 ShuffleVT = MVT::v4f32;
32153 return true;
32154 }
32155
32156 // Attempt to combine to SHUFPD.
32157 if (AllowFloatDomain && EltSizeInBits == 64 &&
32158 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
32159 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
32160 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
32161 bool ForceV1Zero = false, ForceV2Zero = false;
32162 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
32163 PermuteImm, Mask, Zeroable)) {
32164 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
32165 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
32166 Shuffle = X86ISD::SHUFP;
32167 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
32168 return true;
32169 }
32170 }
32171
32172 // Attempt to combine to SHUFPS.
32173 if (AllowFloatDomain && EltSizeInBits == 32 &&
32174 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
32175 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
32176 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
32177 SmallVector<int, 4> RepeatedMask;
32178 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
32179 // Match each half of the repeated mask, to determine if its just
32180 // referencing one of the vectors, is zeroable or entirely undef.
32181 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
32182 int M0 = RepeatedMask[Offset];
32183 int M1 = RepeatedMask[Offset + 1];
32184
32185 if (isUndefInRange(RepeatedMask, Offset, 2)) {
32186 return DAG.getUNDEF(MaskVT);
32187 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
32188 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
32189 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
32190 return getZeroVector(MaskVT, Subtarget, DAG, DL);
32191 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
32192 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
32193 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
32194 return V1;
32195 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
32196 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
32197 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
32198 return V2;
32199 }
32200
32201 return SDValue();
32202 };
32203
32204 int ShufMask[4] = {-1, -1, -1, -1};
32205 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
32206 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
32207
32208 if (Lo && Hi) {
32209 V1 = Lo;
32210 V2 = Hi;
32211 Shuffle = X86ISD::SHUFP;
32212 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
32213 PermuteImm = getV4X86ShuffleImm(ShufMask);
32214 return true;
32215 }
32216 }
32217 }
32218
32219 // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
32220 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
32221 MaskVT.is128BitVector() &&
32222 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
32223 Shuffle = X86ISD::INSERTPS;
32224 ShuffleVT = MVT::v4f32;
32225 return true;
32226 }
32227
32228 return false;
32229}
32230
32231static SDValue combineX86ShuffleChainWithExtract(
32232 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
32233 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
32234 const X86Subtarget &Subtarget);
32235
32236/// Combine an arbitrary chain of shuffles into a single instruction if
32237/// possible.
32238///
32239/// This is the leaf of the recursive combine below. When we have found some
32240/// chain of single-use x86 shuffle instructions and accumulated the combined
32241/// shuffle mask represented by them, this will try to pattern match that mask
32242/// into either a single instruction if there is a special purpose instruction
32243/// for this operation, or into a PSHUFB instruction which is a fully general
32244/// instruction but should only be used to replace chains over a certain depth.
32245static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
32246 ArrayRef<int> BaseMask, int Depth,
32247 bool HasVariableMask,
32248 bool AllowVariableMask, SelectionDAG &DAG,
32249 const X86Subtarget &Subtarget) {
32250 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!")((!BaseMask.empty() && "Cannot combine an empty shuffle mask!"
) ? static_cast<void> (0) : __assert_fail ("!BaseMask.empty() && \"Cannot combine an empty shuffle mask!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32250, __PRETTY_FUNCTION__))
;
32251 assert((Inputs.size() == 1 || Inputs.size() == 2) &&(((Inputs.size() == 1 || Inputs.size() == 2) && "Unexpected number of shuffle inputs!"
) ? static_cast<void> (0) : __assert_fail ("(Inputs.size() == 1 || Inputs.size() == 2) && \"Unexpected number of shuffle inputs!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32252, __PRETTY_FUNCTION__))
32252 "Unexpected number of shuffle inputs!")(((Inputs.size() == 1 || Inputs.size() == 2) && "Unexpected number of shuffle inputs!"
) ? static_cast<void> (0) : __assert_fail ("(Inputs.size() == 1 || Inputs.size() == 2) && \"Unexpected number of shuffle inputs!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32252, __PRETTY_FUNCTION__))
;
32253
32254 // Find the inputs that enter the chain. Note that multiple uses are OK
32255 // here, we're not going to remove the operands we find.
32256 bool UnaryShuffle = (Inputs.size() == 1);
32257 SDValue V1 = peekThroughBitcasts(Inputs[0]);
32258 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
32259 : peekThroughBitcasts(Inputs[1]));
32260
32261 MVT VT1 = V1.getSimpleValueType();
32262 MVT VT2 = V2.getSimpleValueType();
32263 MVT RootVT = Root.getSimpleValueType();
32264 assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&((VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2
.getSizeInBits() == RootVT.getSizeInBits() && "Vector size mismatch"
) ? static_cast<void> (0) : __assert_fail ("VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2.getSizeInBits() == RootVT.getSizeInBits() && \"Vector size mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32266, __PRETTY_FUNCTION__))
32265 VT2.getSizeInBits() == RootVT.getSizeInBits() &&((VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2
.getSizeInBits() == RootVT.getSizeInBits() && "Vector size mismatch"
) ? static_cast<void> (0) : __assert_fail ("VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2.getSizeInBits() == RootVT.getSizeInBits() && \"Vector size mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32266, __PRETTY_FUNCTION__))
32266 "Vector size mismatch")((VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2
.getSizeInBits() == RootVT.getSizeInBits() && "Vector size mismatch"
) ? static_cast<void> (0) : __assert_fail ("VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2.getSizeInBits() == RootVT.getSizeInBits() && \"Vector size mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32266, __PRETTY_FUNCTION__))
;
32267
32268 SDLoc DL(Root);
32269 SDValue Res;
32270
32271 unsigned NumBaseMaskElts = BaseMask.size();
32272 if (NumBaseMaskElts == 1) {
32273 assert(BaseMask[0] == 0 && "Invalid shuffle index found!")((BaseMask[0] == 0 && "Invalid shuffle index found!")
? static_cast<void> (0) : __assert_fail ("BaseMask[0] == 0 && \"Invalid shuffle index found!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32273, __PRETTY_FUNCTION__))
;
32274 return DAG.getBitcast(RootVT, V1);
32275 }
32276
32277 unsigned RootSizeInBits = RootVT.getSizeInBits();
32278 unsigned NumRootElts = RootVT.getVectorNumElements();
32279 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
32280 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
32281 (RootVT.isFloatingPoint() && Depth >= 1) ||
32282 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
32283
32284 // Don't combine if we are a AVX512/EVEX target and the mask element size
32285 // is different from the root element size - this would prevent writemasks
32286 // from being reused.
32287 // TODO - this currently prevents all lane shuffles from occurring.
32288 // TODO - check for writemasks usage instead of always preventing combining.
32289 // TODO - attempt to narrow Mask back to writemask size.
32290 bool IsEVEXShuffle =
32291 RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128);
32292
32293 // Attempt to match a subvector broadcast.
32294 // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
32295 if (UnaryShuffle &&
32296 (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
32297 SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
32298 if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
32299 SDValue Src = Inputs[0];
32300 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
32301 Src.getOperand(0).isUndef() &&
32302 Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
32303 MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
32304 return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
32305 Src.getValueType(),
32306 Src.getOperand(1)));
32307 }
32308 }
32309 }
32310
32311 // TODO - handle 128/256-bit lane shuffles of 512-bit vectors.
32312
32313 // Handle 128-bit lane shuffles of 256-bit vectors.
32314 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
32315 // we need to use the zeroing feature.
32316 // TODO - this should support binary shuffles.
32317 if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 &&
32318 !(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) &&
32319 !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) {
32320 if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
32321 return SDValue(); // Nothing to do!
32322 MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
32323 unsigned PermMask = 0;
32324 PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
32325 PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
32326
32327 Res = DAG.getBitcast(ShuffleVT, V1);
32328 Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
32329 DAG.getUNDEF(ShuffleVT),
32330 DAG.getTargetConstant(PermMask, DL, MVT::i8));
32331 return DAG.getBitcast(RootVT, Res);
32332 }
32333
32334 // For masks that have been widened to 128-bit elements or more,
32335 // narrow back down to 64-bit elements.
32336 SmallVector<int, 64> Mask;
32337 if (BaseMaskEltSizeInBits > 64) {
32338 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size")(((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size"
) ? static_cast<void> (0) : __assert_fail ("(BaseMaskEltSizeInBits % 64) == 0 && \"Illegal mask size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32338, __PRETTY_FUNCTION__))
;
32339 int MaskScale = BaseMaskEltSizeInBits / 64;
32340 scaleShuffleMask<int>(MaskScale, BaseMask, Mask);
32341 } else {
32342 Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end());
32343 }
32344
32345 unsigned NumMaskElts = Mask.size();
32346 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
32347
32348 // Determine the effective mask value type.
32349 FloatDomain &= (32 <= MaskEltSizeInBits);
32350 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
32351 : MVT::getIntegerVT(MaskEltSizeInBits);
32352 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
32353
32354 // Only allow legal mask types.
32355 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
32356 return SDValue();
32357
32358 // Attempt to match the mask against known shuffle patterns.
32359 MVT ShuffleSrcVT, ShuffleVT;
32360 unsigned Shuffle, PermuteImm;
32361
32362 // Which shuffle domains are permitted?
32363 // Permit domain crossing at higher combine depths.
32364 // TODO: Should we indicate which domain is preferred if both are allowed?
32365 bool AllowFloatDomain = FloatDomain || (Depth >= 3);
32366 bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
32367 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
32368
32369 // Determine zeroable mask elements.
32370 APInt Zeroable(NumMaskElts, 0);
32371 for (unsigned i = 0; i != NumMaskElts; ++i)
32372 if (isUndefOrZero(Mask[i]))
32373 Zeroable.setBit(i);
32374
32375 if (UnaryShuffle) {
32376 // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load
32377 // directly if we don't shuffle the lower element and we shuffle the upper
32378 // (zero) elements within themselves.
32379 if (V1.getOpcode() == X86ISD::VZEXT_LOAD &&
32380 (cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() %
32381 MaskEltSizeInBits) == 0) {
32382 unsigned Scale =
32383 cast<MemIntrinsicSDNode>(V1)->getMemoryVT().getScalarSizeInBits() /
32384 MaskEltSizeInBits;
32385 ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale);
32386 if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) &&
32387 isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) {
32388 return DAG.getBitcast(RootVT, V1);
32389 }
32390 }
32391
32392 // Attempt to match against broadcast-from-vector.
32393 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
32394 if ((Subtarget.hasAVX2() || (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits))
32395 && (!IsEVEXShuffle || NumRootElts == NumMaskElts)) {
32396 SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
32397 if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
32398 if (V1.getValueType() == MaskVT &&
32399 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
32400 MayFoldLoad(V1.getOperand(0))) {
32401 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
32402 return SDValue(); // Nothing to do!
32403 Res = V1.getOperand(0);
32404 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
32405 return DAG.getBitcast(RootVT, Res);
32406 }
32407 if (Subtarget.hasAVX2()) {
32408 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
32409 return SDValue(); // Nothing to do!
32410 Res = DAG.getBitcast(MaskVT, V1);
32411 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
32412 return DAG.getBitcast(RootVT, Res);
32413 }
32414 }
32415 }
32416
32417 SDValue NewV1 = V1; // Save operand in case early exit happens.
32418 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
32419 DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
32420 ShuffleVT) &&
32421 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32422 if (Depth == 0 && Root.getOpcode() == Shuffle)
32423 return SDValue(); // Nothing to do!
32424 Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
32425 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
32426 return DAG.getBitcast(RootVT, Res);
32427 }
32428
32429 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
32430 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
32431 PermuteImm) &&
32432 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32433 if (Depth == 0 && Root.getOpcode() == Shuffle)
32434 return SDValue(); // Nothing to do!
32435 Res = DAG.getBitcast(ShuffleVT, V1);
32436 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
32437 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
32438 return DAG.getBitcast(RootVT, Res);
32439 }
32440 }
32441
32442 SDValue NewV1 = V1; // Save operands in case early exit happens.
32443 SDValue NewV2 = V2;
32444 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
32445 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
32446 ShuffleVT, UnaryShuffle) &&
32447 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32448 if (Depth == 0 && Root.getOpcode() == Shuffle)
32449 return SDValue(); // Nothing to do!
32450 NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
32451 NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
32452 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
32453 return DAG.getBitcast(RootVT, Res);
32454 }
32455
32456 NewV1 = V1; // Save operands in case early exit happens.
32457 NewV2 = V2;
32458 if (matchBinaryPermuteShuffle(
32459 MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
32460 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
32461 (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
32462 if (Depth == 0 && Root.getOpcode() == Shuffle)
32463 return SDValue(); // Nothing to do!
32464 NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
32465 NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
32466 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
32467 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
32468 return DAG.getBitcast(RootVT, Res);
32469 }
32470
32471 // Typically from here on, we need an integer version of MaskVT.
32472 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
32473 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
32474
32475 // Annoyingly, SSE4A instructions don't map into the above match helpers.
32476 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
32477 uint64_t BitLen, BitIdx;
32478 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
32479 Zeroable)) {
32480 if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
32481 return SDValue(); // Nothing to do!
32482 V1 = DAG.getBitcast(IntMaskVT, V1);
32483 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
32484 DAG.getTargetConstant(BitLen, DL, MVT::i8),
32485 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
32486 return DAG.getBitcast(RootVT, Res);
32487 }
32488
32489 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
32490 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
32491 return SDValue(); // Nothing to do!
32492 V1 = DAG.getBitcast(IntMaskVT, V1);
32493 V2 = DAG.getBitcast(IntMaskVT, V2);
32494 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
32495 DAG.getTargetConstant(BitLen, DL, MVT::i8),
32496 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
32497 return DAG.getBitcast(RootVT, Res);
32498 }
32499 }
32500
32501 // Don't try to re-form single instruction chains under any circumstances now
32502 // that we've done encoding canonicalization for them.
32503 if (Depth < 1)
32504 return SDValue();
32505
32506 // Depth threshold above which we can efficiently use variable mask shuffles.
32507 int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 1 : 2;
32508 AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
32509
32510 bool MaskContainsZeros =
32511 any_of(Mask, [](int M) { return M == SM_SentinelZero; });
32512
32513 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
32514 // If we have a single input lane-crossing shuffle then lower to VPERMV.
32515 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32516 ((Subtarget.hasAVX2() &&
32517 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32518 (Subtarget.hasAVX512() &&
32519 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32520 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32521 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32522 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32523 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32524 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32525 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32526 Res = DAG.getBitcast(MaskVT, V1);
32527 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
32528 return DAG.getBitcast(RootVT, Res);
32529 }
32530
32531 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
32532 // vector as the second source.
32533 if (UnaryShuffle && AllowVariableMask &&
32534 ((Subtarget.hasAVX512() &&
32535 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32536 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32537 (Subtarget.hasVLX() &&
32538 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
32539 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32540 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32541 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32542 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32543 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32544 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
32545 for (unsigned i = 0; i != NumMaskElts; ++i)
32546 if (Mask[i] == SM_SentinelZero)
32547 Mask[i] = NumMaskElts + i;
32548
32549 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32550 Res = DAG.getBitcast(MaskVT, V1);
32551 SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
32552 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
32553 return DAG.getBitcast(RootVT, Res);
32554 }
32555
32556 // If that failed and either input is extracted then try to combine as a
32557 // shuffle with the larger type.
32558 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
32559 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
32560 DAG, Subtarget))
32561 return WideShuffle;
32562
32563 // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
32564 if (AllowVariableMask && !MaskContainsZeros &&
32565 ((Subtarget.hasAVX512() &&
32566 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32567 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32568 (Subtarget.hasVLX() &&
32569 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
32570 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32571 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32572 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
32573 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32574 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
32575 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32576 V1 = DAG.getBitcast(MaskVT, V1);
32577 V2 = DAG.getBitcast(MaskVT, V2);
32578 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
32579 return DAG.getBitcast(RootVT, Res);
32580 }
32581 return SDValue();
32582 }
32583
32584 // See if we can combine a single input shuffle with zeros to a bit-mask,
32585 // which is much simpler than any shuffle.
32586 if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
32587 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
32588 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
32589 APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
32590 APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
32591 APInt UndefElts(NumMaskElts, 0);
32592 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
32593 for (unsigned i = 0; i != NumMaskElts; ++i) {
32594 int M = Mask[i];
32595 if (M == SM_SentinelUndef) {
32596 UndefElts.setBit(i);
32597 continue;
32598 }
32599 if (M == SM_SentinelZero)
32600 continue;
32601 EltBits[i] = AllOnes;
32602 }
32603 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
32604 Res = DAG.getBitcast(MaskVT, V1);
32605 unsigned AndOpcode =
32606 FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
32607 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
32608 return DAG.getBitcast(RootVT, Res);
32609 }
32610
32611 // If we have a single input shuffle with different shuffle patterns in the
32612 // the 128-bit lanes use the variable mask to VPERMILPS.
32613 // TODO Combine other mask types at higher depths.
32614 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32615 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
32616 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
32617 SmallVector<SDValue, 16> VPermIdx;
32618 for (int M : Mask) {
32619 SDValue Idx =
32620 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
32621 VPermIdx.push_back(Idx);
32622 }
32623 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
32624 Res = DAG.getBitcast(MaskVT, V1);
32625 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
32626 return DAG.getBitcast(RootVT, Res);
32627 }
32628
32629 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
32630 // to VPERMIL2PD/VPERMIL2PS.
32631 if (AllowVariableMask && Subtarget.hasXOP() &&
32632 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
32633 MaskVT == MVT::v8f32)) {
32634 // VPERMIL2 Operation.
32635 // Bits[3] - Match Bit.
32636 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
32637 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
32638 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
32639 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
32640 SmallVector<int, 8> VPerm2Idx;
32641 unsigned M2ZImm = 0;
32642 for (int M : Mask) {
32643 if (M == SM_SentinelUndef) {
32644 VPerm2Idx.push_back(-1);
32645 continue;
32646 }
32647 if (M == SM_SentinelZero) {
32648 M2ZImm = 2;
32649 VPerm2Idx.push_back(8);
32650 continue;
32651 }
32652 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
32653 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
32654 VPerm2Idx.push_back(Index);
32655 }
32656 V1 = DAG.getBitcast(MaskVT, V1);
32657 V2 = DAG.getBitcast(MaskVT, V2);
32658 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
32659 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
32660 DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
32661 return DAG.getBitcast(RootVT, Res);
32662 }
32663
32664 // If we have 3 or more shuffle instructions or a chain involving a variable
32665 // mask, we can replace them with a single PSHUFB instruction profitably.
32666 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
32667 // instructions, but in practice PSHUFB tends to be *very* fast so we're
32668 // more aggressive.
32669 if (UnaryShuffle && AllowVariableMask &&
32670 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
32671 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
32672 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
32673 SmallVector<SDValue, 16> PSHUFBMask;
32674 int NumBytes = RootVT.getSizeInBits() / 8;
32675 int Ratio = NumBytes / NumMaskElts;
32676 for (int i = 0; i < NumBytes; ++i) {
32677 int M = Mask[i / Ratio];
32678 if (M == SM_SentinelUndef) {
32679 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
32680 continue;
32681 }
32682 if (M == SM_SentinelZero) {
32683 PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8));
32684 continue;
32685 }
32686 M = Ratio * M + i % Ratio;
32687 assert((M / 16) == (i / 16) && "Lane crossing detected")(((M / 16) == (i / 16) && "Lane crossing detected") ?
static_cast<void> (0) : __assert_fail ("(M / 16) == (i / 16) && \"Lane crossing detected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32687, __PRETTY_FUNCTION__))
;
32688 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
32689 }
32690 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
32691 Res = DAG.getBitcast(ByteVT, V1);
32692 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
32693 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
32694 return DAG.getBitcast(RootVT, Res);
32695 }
32696
32697 // With XOP, if we have a 128-bit binary input shuffle we can always combine
32698 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
32699 // slower than PSHUFB on targets that support both.
32700 if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
32701 // VPPERM Mask Operation
32702 // Bits[4:0] - Byte Index (0 - 31)
32703 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
32704 SmallVector<SDValue, 16> VPPERMMask;
32705 int NumBytes = 16;
32706 int Ratio = NumBytes / NumMaskElts;
32707 for (int i = 0; i < NumBytes; ++i) {
32708 int M = Mask[i / Ratio];
32709 if (M == SM_SentinelUndef) {
32710 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
32711 continue;
32712 }
32713 if (M == SM_SentinelZero) {
32714 VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8));
32715 continue;
32716 }
32717 M = Ratio * M + i % Ratio;
32718 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
32719 }
32720 MVT ByteVT = MVT::v16i8;
32721 V1 = DAG.getBitcast(ByteVT, V1);
32722 V2 = DAG.getBitcast(ByteVT, V2);
32723 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
32724 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
32725 return DAG.getBitcast(RootVT, Res);
32726 }
32727
32728 // If that failed and either input is extracted then try to combine as a
32729 // shuffle with the larger type.
32730 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
32731 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
32732 DAG, Subtarget))
32733 return WideShuffle;
32734
32735 // If we have a dual input shuffle then lower to VPERMV3.
32736 if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
32737 ((Subtarget.hasAVX512() &&
32738 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
32739 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
32740 (Subtarget.hasVLX() &&
32741 (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
32742 MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
32743 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
32744 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
32745 (Subtarget.hasBWI() && Subtarget.hasVLX() &&
32746 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
32747 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
32748 (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
32749 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
32750 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
32751 V1 = DAG.getBitcast(MaskVT, V1);
32752 V2 = DAG.getBitcast(MaskVT, V2);
32753 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
32754 return DAG.getBitcast(RootVT, Res);
32755 }
32756
32757 // Failed to find any combines.
32758 return SDValue();
32759}
32760
32761// Combine an arbitrary chain of shuffles + extract_subvectors into a single
32762// instruction if possible.
32763//
32764// Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
32765// type size to attempt to combine:
32766// shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
32767// -->
32768// extract_subvector(shuffle(x,y,m2),0)
32769static SDValue combineX86ShuffleChainWithExtract(
32770 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
32771 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
32772 const X86Subtarget &Subtarget) {
32773 unsigned NumMaskElts = BaseMask.size();
32774 unsigned NumInputs = Inputs.size();
32775 if (NumInputs == 0)
32776 return SDValue();
32777
32778 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
32779 SmallVector<unsigned, 4> Offsets(NumInputs, 0);
32780
32781 // Peek through subvectors.
32782 // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
32783 unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
32784 for (unsigned i = 0; i != NumInputs; ++i) {
32785 SDValue &Src = WideInputs[i];
32786 unsigned &Offset = Offsets[i];
32787 Src = peekThroughBitcasts(Src);
32788 EVT BaseVT = Src.getValueType();
32789 while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
32790 isa<ConstantSDNode>(Src.getOperand(1))) {
32791 Offset += Src.getConstantOperandVal(1);
32792 Src = Src.getOperand(0);
32793 }
32794 WideSizeInBits = std::max(WideSizeInBits, Src.getValueSizeInBits());
32795 assert((Offset % BaseVT.getVectorNumElements()) == 0 &&(((Offset % BaseVT.getVectorNumElements()) == 0 && "Unexpected subvector extraction"
) ? static_cast<void> (0) : __assert_fail ("(Offset % BaseVT.getVectorNumElements()) == 0 && \"Unexpected subvector extraction\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32796, __PRETTY_FUNCTION__))
32796 "Unexpected subvector extraction")(((Offset % BaseVT.getVectorNumElements()) == 0 && "Unexpected subvector extraction"
) ? static_cast<void> (0) : __assert_fail ("(Offset % BaseVT.getVectorNumElements()) == 0 && \"Unexpected subvector extraction\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32796, __PRETTY_FUNCTION__))
;
32797 Offset /= BaseVT.getVectorNumElements();
32798 Offset *= NumMaskElts;
32799 }
32800
32801 // Bail if we're always extracting from the lowest subvectors,
32802 // combineX86ShuffleChain should match this for the current width.
32803 if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
32804 return SDValue();
32805
32806 EVT RootVT = Root.getValueType();
32807 unsigned RootSizeInBits = RootVT.getSizeInBits();
32808 unsigned Scale = WideSizeInBits / RootSizeInBits;
32809 assert((WideSizeInBits % RootSizeInBits) == 0 &&(((WideSizeInBits % RootSizeInBits) == 0 && "Unexpected subvector extraction"
) ? static_cast<void> (0) : __assert_fail ("(WideSizeInBits % RootSizeInBits) == 0 && \"Unexpected subvector extraction\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32810, __PRETTY_FUNCTION__))
32810 "Unexpected subvector extraction")(((WideSizeInBits % RootSizeInBits) == 0 && "Unexpected subvector extraction"
) ? static_cast<void> (0) : __assert_fail ("(WideSizeInBits % RootSizeInBits) == 0 && \"Unexpected subvector extraction\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32810, __PRETTY_FUNCTION__))
;
32811
32812 // If the src vector types aren't the same, see if we can extend
32813 // them to match each other.
32814 // TODO: Support different scalar types?
32815 EVT WideSVT = WideInputs[0].getValueType().getScalarType();
32816 if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
32817 return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
32818 Op.getValueType().getScalarType() != WideSVT;
32819 }))
32820 return SDValue();
32821
32822 for (SDValue &NewInput : WideInputs) {
32823 assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&(((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
"Shuffle vector size mismatch") ? static_cast<void> (0
) : __assert_fail ("(WideSizeInBits % NewInput.getValueSizeInBits()) == 0 && \"Shuffle vector size mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32824, __PRETTY_FUNCTION__))
32824 "Shuffle vector size mismatch")(((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
"Shuffle vector size mismatch") ? static_cast<void> (0
) : __assert_fail ("(WideSizeInBits % NewInput.getValueSizeInBits()) == 0 && \"Shuffle vector size mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32824, __PRETTY_FUNCTION__))
;
32825 if (WideSizeInBits > NewInput.getValueSizeInBits())
32826 NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
32827 SDLoc(NewInput), WideSizeInBits);
32828 assert(WideSizeInBits == NewInput.getValueSizeInBits() &&((WideSizeInBits == NewInput.getValueSizeInBits() && "Unexpected subvector extraction"
) ? static_cast<void> (0) : __assert_fail ("WideSizeInBits == NewInput.getValueSizeInBits() && \"Unexpected subvector extraction\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32829, __PRETTY_FUNCTION__))
32829 "Unexpected subvector extraction")((WideSizeInBits == NewInput.getValueSizeInBits() && "Unexpected subvector extraction"
) ? static_cast<void> (0) : __assert_fail ("WideSizeInBits == NewInput.getValueSizeInBits() && \"Unexpected subvector extraction\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32829, __PRETTY_FUNCTION__))
;
32830 }
32831
32832 // Create new mask for larger type.
32833 for (unsigned i = 1; i != NumInputs; ++i)
32834 Offsets[i] += i * Scale * NumMaskElts;
32835
32836 SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
32837 for (int &M : WideMask) {
32838 if (M < 0)
32839 continue;
32840 M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
32841 }
32842 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
32843
32844 // Remove unused/repeated shuffle source ops.
32845 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
32846 assert(!WideInputs.empty() && "Shuffle with no inputs detected")((!WideInputs.empty() && "Shuffle with no inputs detected"
) ? static_cast<void> (0) : __assert_fail ("!WideInputs.empty() && \"Shuffle with no inputs detected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32846, __PRETTY_FUNCTION__))
;
32847
32848 if (WideInputs.size() > 2)
32849 return SDValue();
32850
32851 // Increase depth for every upper subvector we've peeked through.
32852 Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
32853
32854 // Attempt to combine wider chain.
32855 // TODO: Can we use a better Root?
32856 SDValue WideRoot = WideInputs[0];
32857 if (SDValue WideShuffle = combineX86ShuffleChain(
32858 WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
32859 AllowVariableMask, DAG, Subtarget)) {
32860 WideShuffle =
32861 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
32862 return DAG.getBitcast(RootVT, WideShuffle);
32863 }
32864 return SDValue();
32865}
32866
32867// Attempt to constant fold all of the constant source ops.
32868// Returns true if the entire shuffle is folded to a constant.
32869// TODO: Extend this to merge multiple constant Ops and update the mask.
32870static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
32871 ArrayRef<int> Mask, SDValue Root,
32872 bool HasVariableMask,
32873 SelectionDAG &DAG,
32874 const X86Subtarget &Subtarget) {
32875 MVT VT = Root.getSimpleValueType();
32876
32877 unsigned SizeInBits = VT.getSizeInBits();
32878 unsigned NumMaskElts = Mask.size();
32879 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
32880 unsigned NumOps = Ops.size();
32881
32882 // Extract constant bits from each source op.
32883 bool OneUseConstantOp = false;
32884 SmallVector<APInt, 16> UndefEltsOps(NumOps);
32885 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
32886 for (unsigned i = 0; i != NumOps; ++i) {
32887 SDValue SrcOp = Ops[i];
32888 OneUseConstantOp |= SrcOp.hasOneUse();
32889 if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
32890 RawBitsOps[i]))
32891 return SDValue();
32892 }
32893
32894 // Only fold if at least one of the constants is only used once or
32895 // the combined shuffle has included a variable mask shuffle, this
32896 // is to avoid constant pool bloat.
32897 if (!OneUseConstantOp && !HasVariableMask)
32898 return SDValue();
32899
32900 // Shuffle the constant bits according to the mask.
32901 APInt UndefElts(NumMaskElts, 0);
32902 APInt ZeroElts(NumMaskElts, 0);
32903 APInt ConstantElts(NumMaskElts, 0);
32904 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
32905 APInt::getNullValue(MaskSizeInBits));
32906 for (unsigned i = 0; i != NumMaskElts; ++i) {
32907 int M = Mask[i];
32908 if (M == SM_SentinelUndef) {
32909 UndefElts.setBit(i);
32910 continue;
32911 } else if (M == SM_SentinelZero) {
32912 ZeroElts.setBit(i);
32913 continue;
32914 }
32915 assert(0 <= M && M < (int)(NumMaskElts * NumOps))((0 <= M && M < (int)(NumMaskElts * NumOps)) ? static_cast
<void> (0) : __assert_fail ("0 <= M && M < (int)(NumMaskElts * NumOps)"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32915, __PRETTY_FUNCTION__))
;
32916
32917 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
32918 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
32919
32920 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
32921 if (SrcUndefElts[SrcMaskIdx]) {
32922 UndefElts.setBit(i);
32923 continue;
32924 }
32925
32926 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
32927 APInt &Bits = SrcEltBits[SrcMaskIdx];
32928 if (!Bits) {
32929 ZeroElts.setBit(i);
32930 continue;
32931 }
32932
32933 ConstantElts.setBit(i);
32934 ConstantBitData[i] = Bits;
32935 }
32936 assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue())(((UndefElts | ZeroElts | ConstantElts).isAllOnesValue()) ? static_cast
<void> (0) : __assert_fail ("(UndefElts | ZeroElts | ConstantElts).isAllOnesValue()"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 32936, __PRETTY_FUNCTION__))
;
32937
32938 // Create the constant data.
32939 MVT MaskSVT;
32940 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
32941 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
32942 else
32943 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
32944
32945 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
32946
32947 SDLoc DL(Root);
32948 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
32949 return DAG.getBitcast(VT, CstOp);
32950}
32951
32952/// Fully generic combining of x86 shuffle instructions.
32953///
32954/// This should be the last combine run over the x86 shuffle instructions. Once
32955/// they have been fully optimized, this will recursively consider all chains
32956/// of single-use shuffle instructions, build a generic model of the cumulative
32957/// shuffle operation, and check for simpler instructions which implement this
32958/// operation. We use this primarily for two purposes:
32959///
32960/// 1) Collapse generic shuffles to specialized single instructions when
32961/// equivalent. In most cases, this is just an encoding size win, but
32962/// sometimes we will collapse multiple generic shuffles into a single
32963/// special-purpose shuffle.
32964/// 2) Look for sequences of shuffle instructions with 3 or more total
32965/// instructions, and replace them with the slightly more expensive SSSE3
32966/// PSHUFB instruction if available. We do this as the last combining step
32967/// to ensure we avoid using PSHUFB if we can implement the shuffle with
32968/// a suitable short sequence of other instructions. The PSHUFB will either
32969/// use a register or have to read from memory and so is slightly (but only
32970/// slightly) more expensive than the other shuffle instructions.
32971///
32972/// Because this is inherently a quadratic operation (for each shuffle in
32973/// a chain, we recurse up the chain), the depth is limited to 8 instructions.
32974/// This should never be an issue in practice as the shuffle lowering doesn't
32975/// produce sequences of more than 8 instructions.
32976///
32977/// FIXME: We will currently miss some cases where the redundant shuffling
32978/// would simplify under the threshold for PSHUFB formation because of
32979/// combine-ordering. To fix this, we should do the redundant instruction
32980/// combining in this recursive walk.
32981static SDValue combineX86ShufflesRecursively(
32982 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
32983 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
32984 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
32985 const X86Subtarget &Subtarget) {
32986 // Bound the depth of our recursive combine because this is ultimately
32987 // quadratic in nature.
32988 const unsigned MaxRecursionDepth = 8;
32989 if (Depth >= MaxRecursionDepth)
1
Assuming 'Depth' is < 'MaxRecursionDepth'
2
Taking false branch
32990 return SDValue();
32991
32992 // Directly rip through bitcasts to find the underlying operand.
32993 SDValue Op = SrcOps[SrcOpIndex];
32994 Op = peekThroughOneUseBitcasts(Op);
32995
32996 MVT VT = Op.getSimpleValueType();
32997 if (!VT.isVector())
3
Calling 'MVT::isVector'
7
Returning from 'MVT::isVector'
8
Taking false branch
32998 return SDValue(); // Bail if we hit a non-vector.
32999
33000 assert(Root.getSimpleValueType().isVector() &&((Root.getSimpleValueType().isVector() && "Shuffles operate on vector types!"
) ? static_cast<void> (0) : __assert_fail ("Root.getSimpleValueType().isVector() && \"Shuffles operate on vector types!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33001, __PRETTY_FUNCTION__))
9
'?' condition is true
33001 "Shuffles operate on vector types!")((Root.getSimpleValueType().isVector() && "Shuffles operate on vector types!"
) ? static_cast<void> (0) : __assert_fail ("Root.getSimpleValueType().isVector() && \"Shuffles operate on vector types!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33001, __PRETTY_FUNCTION__))
;
33002 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&((VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits
() && "Can only combine shuffles of the same vector register size."
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() && \"Can only combine shuffles of the same vector register size.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33003, __PRETTY_FUNCTION__))
10
Assuming the condition is true
11
'?' condition is true
33003 "Can only combine shuffles of the same vector register size.")((VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits
() && "Can only combine shuffles of the same vector register size."
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() && \"Can only combine shuffles of the same vector register size.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33003, __PRETTY_FUNCTION__))
;
33004
33005 // Extract target shuffle mask and resolve sentinels and inputs.
33006 SmallVector<int, 64> OpMask;
33007 SmallVector<SDValue, 2> OpInputs;
33008 if (!getTargetShuffleInputs(Op, OpInputs, OpMask, DAG, Depth))
12
Calling 'getTargetShuffleInputs'
44
Returning from 'getTargetShuffleInputs'
45
Taking false branch
33009 return SDValue();
33010
33011 // Add the inputs to the Ops list, avoiding duplicates.
33012 SmallVector<SDValue, 16> Ops(SrcOps.begin(), SrcOps.end());
33013
33014 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
33015 // Attempt to find an existing match.
33016 SDValue InputBC = peekThroughBitcasts(Input);
33017 for (int i = 0, e = Ops.size(); i < e; ++i)
33018 if (InputBC == peekThroughBitcasts(Ops[i]))
33019 return i;
33020 // Match failed - should we replace an existing Op?
33021 if (InsertionPoint >= 0) {
33022 Ops[InsertionPoint] = Input;
33023 return InsertionPoint;
33024 }
33025 // Add to the end of the Ops list.
33026 Ops.push_back(Input);
33027 return Ops.size() - 1;
33028 };
33029
33030 SmallVector<int, 2> OpInputIdx;
33031 for (SDValue OpInput : OpInputs)
46
Assuming '__begin1' is equal to '__end1'
33032 OpInputIdx.push_back(AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
33033
33034 assert(((RootMask.size() > OpMask.size() &&((((RootMask.size() > OpMask.size() && RootMask.size
() % OpMask.size() == 0) || (OpMask.size() > RootMask.size
() && OpMask.size() % RootMask.size() == 0) || OpMask
.size() == RootMask.size()) && "The smaller number of elements must divide the larger."
) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33039, __PRETTY_FUNCTION__))
47
Assuming the condition is true
48
Calling 'SmallVectorBase::size'
50
Returning from 'SmallVectorBase::size'
51
Division by zero
33035 RootMask.size() % OpMask.size() == 0) ||((((RootMask.size() > OpMask.size() && RootMask.size
() % OpMask.size() == 0) || (OpMask.size() > RootMask.size
() && OpMask.size() % RootMask.size() == 0) || OpMask
.size() == RootMask.size()) && "The smaller number of elements must divide the larger."
) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33039, __PRETTY_FUNCTION__))
33036 (OpMask.size() > RootMask.size() &&((((RootMask.size() > OpMask.size() && RootMask.size
() % OpMask.size() == 0) || (OpMask.size() > RootMask.size
() && OpMask.size() % RootMask.size() == 0) || OpMask
.size() == RootMask.size()) && "The smaller number of elements must divide the larger."
) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33039, __PRETTY_FUNCTION__))
33037 OpMask.size() % RootMask.size() == 0) ||((((RootMask.size() > OpMask.size() && RootMask.size
() % OpMask.size() == 0) || (OpMask.size() > RootMask.size
() && OpMask.size() % RootMask.size() == 0) || OpMask
.size() == RootMask.size()) && "The smaller number of elements must divide the larger."
) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33039, __PRETTY_FUNCTION__))
33038 OpMask.size() == RootMask.size()) &&((((RootMask.size() > OpMask.size() && RootMask.size
() % OpMask.size() == 0) || (OpMask.size() > RootMask.size
() && OpMask.size() % RootMask.size() == 0) || OpMask
.size() == RootMask.size()) && "The smaller number of elements must divide the larger."
) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33039, __PRETTY_FUNCTION__))
33039 "The smaller number of elements must divide the larger.")((((RootMask.size() > OpMask.size() && RootMask.size
() % OpMask.size() == 0) || (OpMask.size() > RootMask.size
() && OpMask.size() % RootMask.size() == 0) || OpMask
.size() == RootMask.size()) && "The smaller number of elements must divide the larger."
) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33039, __PRETTY_FUNCTION__))
;
33040
33041 // This function can be performance-critical, so we rely on the power-of-2
33042 // knowledge that we have about the mask sizes to replace div/rem ops with
33043 // bit-masks and shifts.
33044 assert(isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(RootMask.size()) && \"Non-power-of-2 shuffle mask sizes\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33044, __PRETTY_FUNCTION__))
;
33045 assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(OpMask.size()) && \"Non-power-of-2 shuffle mask sizes\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33045, __PRETTY_FUNCTION__))
;
33046 unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
33047 unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
33048
33049 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
33050 unsigned RootRatio = std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
33051 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
33052 assert((RootRatio == 1 || OpRatio == 1) &&(((RootRatio == 1 || OpRatio == 1) && "Must not have a ratio for both incoming and op masks!"
) ? static_cast<void> (0) : __assert_fail ("(RootRatio == 1 || OpRatio == 1) && \"Must not have a ratio for both incoming and op masks!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33053, __PRETTY_FUNCTION__))
33053 "Must not have a ratio for both incoming and op masks!")(((RootRatio == 1 || OpRatio == 1) && "Must not have a ratio for both incoming and op masks!"
) ? static_cast<void> (0) : __assert_fail ("(RootRatio == 1 || OpRatio == 1) && \"Must not have a ratio for both incoming and op masks!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33053, __PRETTY_FUNCTION__))
;
33054
33055 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(MaskWidth) && \"Non-power-of-2 shuffle mask sizes\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33055, __PRETTY_FUNCTION__))
;
33056 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(RootRatio) && \"Non-power-of-2 shuffle mask sizes\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33056, __PRETTY_FUNCTION__))
;
33057 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes"
) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(OpRatio) && \"Non-power-of-2 shuffle mask sizes\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33057, __PRETTY_FUNCTION__))
;
33058 unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
33059 unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
33060
33061 SmallVector<int, 64> Mask(MaskWidth, SM_SentinelUndef);
33062
33063 // Merge this shuffle operation's mask into our accumulated mask. Note that
33064 // this shuffle's mask will be the first applied to the input, followed by the
33065 // root mask to get us all the way to the root value arrangement. The reason
33066 // for this order is that we are recursing up the operation chain.
33067 for (unsigned i = 0; i < MaskWidth; ++i) {
33068 unsigned RootIdx = i >> RootRatioLog2;
33069 if (RootMask[RootIdx] < 0) {
33070 // This is a zero or undef lane, we're done.
33071 Mask[i] = RootMask[RootIdx];
33072 continue;
33073 }
33074
33075 unsigned RootMaskedIdx =
33076 RootRatio == 1
33077 ? RootMask[RootIdx]
33078 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
33079
33080 // Just insert the scaled root mask value if it references an input other
33081 // than the SrcOp we're currently inserting.
33082 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
33083 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
33084 Mask[i] = RootMaskedIdx;
33085 continue;
33086 }
33087
33088 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
33089 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
33090 if (OpMask[OpIdx] < 0) {
33091 // The incoming lanes are zero or undef, it doesn't matter which ones we
33092 // are using.
33093 Mask[i] = OpMask[OpIdx];
33094 continue;
33095 }
33096
33097 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
33098 unsigned OpMaskedIdx =
33099 OpRatio == 1
33100 ? OpMask[OpIdx]
33101 : (OpMask[OpIdx] << OpRatioLog2) + (RootMaskedIdx & (OpRatio - 1));
33102
33103 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
33104 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
33105 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input")((0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input"
) ? static_cast<void> (0) : __assert_fail ("0 <= OpInputIdx[InputIdx] && \"Unknown target shuffle input\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33105, __PRETTY_FUNCTION__))
;
33106 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
33107
33108 Mask[i] = OpMaskedIdx;
33109 }
33110
33111 // Remove unused/repeated shuffle source ops.
33112 resolveTargetShuffleInputsAndMask(Ops, Mask);
33113
33114 // Handle the all undef/zero cases early.
33115 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
33116 return DAG.getUNDEF(Root.getValueType());
33117
33118 // TODO - should we handle the mixed zero/undef case as well? Just returning
33119 // a zero mask will lose information on undef elements possibly reducing
33120 // future combine possibilities.
33121 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
33122 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
33123 SDLoc(Root));
33124
33125 assert(!Ops.empty() && "Shuffle with no inputs detected")((!Ops.empty() && "Shuffle with no inputs detected") ?
static_cast<void> (0) : __assert_fail ("!Ops.empty() && \"Shuffle with no inputs detected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33125, __PRETTY_FUNCTION__))
;
33126 HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode());
33127
33128 // Update the list of shuffle nodes that have been combined so far.
33129 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
33130 SrcNodes.end());
33131 CombinedNodes.push_back(Op.getNode());
33132
33133 // See if we can recurse into each shuffle source op (if it's a target
33134 // shuffle). The source op should only be generally combined if it either has
33135 // a single use (i.e. current Op) or all its users have already been combined,
33136 // if not then we can still combine but should prevent generation of variable
33137 // shuffles to avoid constant pool bloat.
33138 // Don't recurse if we already have more source ops than we can combine in
33139 // the remaining recursion depth.
33140 if (Ops.size() < (MaxRecursionDepth - Depth)) {
33141 for (int i = 0, e = Ops.size(); i < e; ++i) {
33142 bool AllowVar = false;
33143 if (Ops[i].getNode()->hasOneUse() ||
33144 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
33145 AllowVar = AllowVariableMask;
33146 if (SDValue Res = combineX86ShufflesRecursively(
33147 Ops, i, Root, Mask, CombinedNodes, Depth + 1, HasVariableMask,
33148 AllowVar, DAG, Subtarget))
33149 return Res;
33150 }
33151 }
33152
33153 // Attempt to constant fold all of the constant source ops.
33154 if (SDValue Cst = combineX86ShufflesConstants(
33155 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
33156 return Cst;
33157
33158 // We can only combine unary and binary shuffle mask cases.
33159 if (Ops.size() <= 2) {
33160 // Minor canonicalization of the accumulated shuffle mask to make it easier
33161 // to match below. All this does is detect masks with sequential pairs of
33162 // elements, and shrink them to the half-width mask. It does this in a loop
33163 // so it will reduce the size of the mask to the minimal width mask which
33164 // performs an equivalent shuffle.
33165 SmallVector<int, 64> WidenedMask;
33166 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
33167 Mask = std::move(WidenedMask);
33168 }
33169
33170 // Canonicalization of binary shuffle masks to improve pattern matching by
33171 // commuting the inputs.
33172 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
33173 ShuffleVectorSDNode::commuteMask(Mask);
33174 std::swap(Ops[0], Ops[1]);
33175 }
33176
33177 // Finally, try to combine into a single shuffle instruction.
33178 return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
33179 AllowVariableMask, DAG, Subtarget);
33180 }
33181
33182 // If that failed and any input is extracted then try to combine as a
33183 // shuffle with the larger type.
33184 return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth,
33185 HasVariableMask, AllowVariableMask,
33186 DAG, Subtarget);
33187}
33188
33189/// Helper entry wrapper to combineX86ShufflesRecursively.
33190static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
33191 const X86Subtarget &Subtarget) {
33192 return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 0,
33193 /*HasVarMask*/ false,
33194 /*AllowVarMask*/ true, DAG, Subtarget);
33195}
33196
33197/// Get the PSHUF-style mask from PSHUF node.
33198///
33199/// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
33200/// PSHUF-style masks that can be reused with such instructions.
33201static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
33202 MVT VT = N.getSimpleValueType();
33203 SmallVector<int, 4> Mask;
33204 SmallVector<SDValue, 2> Ops;
33205 bool IsUnary;
33206 bool HaveMask =
33207 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
33208 (void)HaveMask;
33209 assert(HaveMask)((HaveMask) ? static_cast<void> (0) : __assert_fail ("HaveMask"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33209, __PRETTY_FUNCTION__))
;
33210
33211 // If we have more than 128-bits, only the low 128-bits of shuffle mask
33212 // matter. Check that the upper masks are repeats and remove them.
33213 if (VT.getSizeInBits() > 128) {
33214 int LaneElts = 128 / VT.getScalarSizeInBits();
33215#ifndef NDEBUG
33216 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
33217 for (int j = 0; j < LaneElts; ++j)
33218 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&((Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
"Mask doesn't repeat in high 128-bit lanes!") ? static_cast<
void> (0) : __assert_fail ("Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) && \"Mask doesn't repeat in high 128-bit lanes!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33219, __PRETTY_FUNCTION__))
33219 "Mask doesn't repeat in high 128-bit lanes!")((Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
"Mask doesn't repeat in high 128-bit lanes!") ? static_cast<
void> (0) : __assert_fail ("Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) && \"Mask doesn't repeat in high 128-bit lanes!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33219, __PRETTY_FUNCTION__))
;
33220#endif
33221 Mask.resize(LaneElts);
33222 }
33223
33224 switch (N.getOpcode()) {
33225 case X86ISD::PSHUFD:
33226 return Mask;
33227 case X86ISD::PSHUFLW:
33228 Mask.resize(4);
33229 return Mask;
33230 case X86ISD::PSHUFHW:
33231 Mask.erase(Mask.begin(), Mask.begin() + 4);
33232 for (int &M : Mask)
33233 M -= 4;
33234 return Mask;
33235 default:
33236 llvm_unreachable("No valid shuffle instruction found!")::llvm::llvm_unreachable_internal("No valid shuffle instruction found!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33236)
;
33237 }
33238}
33239
33240/// Search for a combinable shuffle across a chain ending in pshufd.
33241///
33242/// We walk up the chain and look for a combinable shuffle, skipping over
33243/// shuffles that we could hoist this shuffle's transformation past without
33244/// altering anything.
33245static SDValue
33246combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
33247 SelectionDAG &DAG) {
33248 assert(N.getOpcode() == X86ISD::PSHUFD &&((N.getOpcode() == X86ISD::PSHUFD && "Called with something other than an x86 128-bit half shuffle!"
) ? static_cast<void> (0) : __assert_fail ("N.getOpcode() == X86ISD::PSHUFD && \"Called with something other than an x86 128-bit half shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33249, __PRETTY_FUNCTION__))
33249 "Called with something other than an x86 128-bit half shuffle!")((N.getOpcode() == X86ISD::PSHUFD && "Called with something other than an x86 128-bit half shuffle!"
) ? static_cast<void> (0) : __assert_fail ("N.getOpcode() == X86ISD::PSHUFD && \"Called with something other than an x86 128-bit half shuffle!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33249, __PRETTY_FUNCTION__))
;
33250 SDLoc DL(N);
33251
33252 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
33253 // of the shuffles in the chain so that we can form a fresh chain to replace
33254 // this one.
33255 SmallVector<SDValue, 8> Chain;
33256 SDValue V = N.getOperand(0);
33257 for (; V.hasOneUse(); V = V.getOperand(0)) {
33258 switch (V.getOpcode()) {
33259 default:
33260 return SDValue(); // Nothing combined!
33261
33262 case ISD::BITCAST:
33263 // Skip bitcasts as we always know the type for the target specific
33264 // instructions.
33265 continue;
33266
33267 case X86ISD::PSHUFD:
33268 // Found another dword shuffle.
33269 break;
33270
33271 case X86ISD::PSHUFLW:
33272 // Check that the low words (being shuffled) are the identity in the
33273 // dword shuffle, and the high words are self-contained.
33274 if (Mask[0] != 0 || Mask[1] != 1 ||
33275 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
33276 return SDValue();
33277
33278 Chain.push_back(V);
33279 continue;
33280
33281 case X86ISD::PSHUFHW:
33282 // Check that the high words (being shuffled) are the identity in the
33283 // dword shuffle, and the low words are self-contained.
33284 if (Mask[2] != 2 || Mask[3] != 3 ||
33285 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
33286 return SDValue();
33287
33288 Chain.push_back(V);
33289 continue;
33290
33291 case X86ISD::UNPCKL:
33292 case X86ISD::UNPCKH:
33293 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
33294 // shuffle into a preceding word shuffle.
33295 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
33296 V.getSimpleValueType().getVectorElementType() != MVT::i16)
33297 return SDValue();
33298
33299 // Search for a half-shuffle which we can combine with.
33300 unsigned CombineOp =
33301 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
33302 if (V.getOperand(0) != V.getOperand(1) ||
33303 !V->isOnlyUserOf(V.getOperand(0).getNode()))
33304 return SDValue();
33305 Chain.push_back(V);
33306 V = V.getOperand(0);
33307 do {
33308 switch (V.getOpcode()) {
33309 default:
33310 return SDValue(); // Nothing to combine.
33311
33312 case X86ISD::PSHUFLW:
33313 case X86ISD::PSHUFHW:
33314 if (V.getOpcode() == CombineOp)
33315 break;
33316
33317 Chain.push_back(V);
33318
33319 LLVM_FALLTHROUGH[[gnu::fallthrough]];
33320 case ISD::BITCAST:
33321 V = V.getOperand(0);
33322 continue;
33323 }
33324 break;
33325 } while (V.hasOneUse());
33326 break;
33327 }
33328 // Break out of the loop if we break out of the switch.
33329 break;
33330 }
33331
33332 if (!V.hasOneUse())
33333 // We fell out of the loop without finding a viable combining instruction.
33334 return SDValue();
33335
33336 // Merge this node's mask and our incoming mask.
33337 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
33338 for (int &M : Mask)
33339 M = VMask[M];
33340 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
33341 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
33342
33343 // Rebuild the chain around this new shuffle.
33344 while (!Chain.empty()) {
33345 SDValue W = Chain.pop_back_val();
33346
33347 if (V.getValueType() != W.getOperand(0).getValueType())
33348 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
33349
33350 switch (W.getOpcode()) {
33351 default:
33352 llvm_unreachable("Only PSHUF and UNPCK instructions get here!")::llvm::llvm_unreachable_internal("Only PSHUF and UNPCK instructions get here!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33352)
;
33353
33354 case X86ISD::UNPCKL:
33355 case X86ISD::UNPCKH:
33356 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
33357 break;
33358
33359 case X86ISD::PSHUFD:
33360 case X86ISD::PSHUFLW:
33361 case X86ISD::PSHUFHW:
33362 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
33363 break;
33364 }
33365 }
33366 if (V.getValueType() != N.getValueType())
33367 V = DAG.getBitcast(N.getValueType(), V);
33368
33369 // Return the new chain to replace N.
33370 return V;
33371}
33372
33373/// Try to combine x86 target specific shuffles.
33374static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
33375 TargetLowering::DAGCombinerInfo &DCI,
33376 const X86Subtarget &Subtarget) {
33377 SDLoc DL(N);
33378 MVT VT = N.getSimpleValueType();
33379 SmallVector<int, 4> Mask;
33380 unsigned Opcode = N.getOpcode();
33381
33382 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
33383 // single instruction.
33384 if (VT.getScalarSizeInBits() == 64 &&
33385 (Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH ||
33386 Opcode == X86ISD::UNPCKL)) {
33387 auto BC0 = peekThroughBitcasts(N.getOperand(0));
33388 auto BC1 = peekThroughBitcasts(N.getOperand(1));
33389 EVT VT0 = BC0.getValueType();
33390 EVT VT1 = BC1.getValueType();
33391 unsigned Opcode0 = BC0.getOpcode();
33392 unsigned Opcode1 = BC1.getOpcode();
33393 if (Opcode0 == Opcode1 && VT0 == VT1 &&
33394 (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
33395 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB ||
33396 Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
33397 SDValue Lo, Hi;
33398 if (Opcode == X86ISD::MOVSD) {
33399 Lo = BC1.getOperand(0);
33400 Hi = BC0.getOperand(1);
33401 } else {
33402 Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
33403 Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0);
33404 }
33405 SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
33406 return DAG.getBitcast(VT, Horiz);
33407 }
33408 }
33409
33410 switch (Opcode) {
33411 case X86ISD::VBROADCAST: {
33412 SDValue Src = N.getOperand(0);
33413 SDValue BC = peekThroughBitcasts(Src);
33414 EVT SrcVT = Src.getValueType();
33415 EVT BCVT = BC.getValueType();
33416
33417 // If broadcasting from another shuffle, attempt to simplify it.
33418 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
33419 if (isTargetShuffle(BC.getOpcode()) &&
33420 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
33421 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
33422 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
33423 SM_SentinelUndef);
33424 for (unsigned i = 0; i != Scale; ++i)
33425 DemandedMask[i] = i;
33426 if (SDValue Res = combineX86ShufflesRecursively(
33427 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
33428 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
33429 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
33430 DAG.getBitcast(SrcVT, Res));
33431 }
33432
33433 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
33434 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
33435 if (Src.getOpcode() == ISD::BITCAST &&
33436 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits()) {
33437 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
33438 VT.getVectorNumElements());
33439 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
33440 }
33441
33442 // Reduce broadcast source vector to lowest 128-bits.
33443 if (SrcVT.getSizeInBits() > 128)
33444 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
33445 extract128BitVector(Src, 0, DAG, DL));
33446
33447 // broadcast(scalar_to_vector(x)) -> broadcast(x).
33448 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
33449 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
33450
33451 // Share broadcast with the longest vector and extract low subvector (free).
33452 for (SDNode *User : Src->uses())
33453 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
33454 User->getValueSizeInBits(0) > VT.getSizeInBits()) {
33455 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
33456 VT.getSizeInBits());
33457 }
33458
33459 // vbroadcast(scalarload X) -> vbroadcast_load X
33460 // For float loads, extract other uses of the scalar from the broadcast.
33461 if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
33462 ISD::isNormalLoad(Src.getNode())) {
33463 LoadSDNode *LN = cast<LoadSDNode>(Src);
33464 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
33465 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
33466 SDValue BcastLd =
33467 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
33468 LN->getMemoryVT(), LN->getMemOperand());
33469 // If the load value is used only by N, replace it via CombineTo N.
33470 bool NoReplaceExtract = Src.hasOneUse();
33471 DCI.CombineTo(N.getNode(), BcastLd);
33472 if (NoReplaceExtract) {
33473 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
33474 DCI.recursivelyDeleteUnusedNodes(LN);
33475 } else {
33476 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
33477 DAG.getIntPtrConstant(0, DL));
33478 DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
33479 }
33480 return N; // Return N so it doesn't get rechecked!
33481 }
33482
33483 return SDValue();
33484 }
33485 case X86ISD::BLENDI: {
33486 SDValue N0 = N.getOperand(0);
33487 SDValue N1 = N.getOperand(1);
33488
33489 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
33490 // TODO: Handle MVT::v16i16 repeated blend mask.
33491 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
33492 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
33493 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
33494 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
33495 SrcVT.getScalarSizeInBits() >= 32) {
33496 unsigned BlendMask = N.getConstantOperandVal(2);
33497 unsigned Size = VT.getVectorNumElements();
33498 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
33499 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
33500 return DAG.getBitcast(
33501 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
33502 N1.getOperand(0),
33503 DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
33504 }
33505 }
33506 return SDValue();
33507 }
33508 case X86ISD::VPERMI: {
33509 // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
33510 // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
33511 SDValue N0 = N.getOperand(0);
33512 SDValue N1 = N.getOperand(1);
33513 unsigned EltSizeInBits = VT.getScalarSizeInBits();
33514 if (N0.getOpcode() == ISD::BITCAST &&
33515 N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
33516 SDValue Src = N0.getOperand(0);
33517 EVT SrcVT = Src.getValueType();
33518 SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
33519 return DAG.getBitcast(VT, Res);
33520 }
33521 return SDValue();
33522 }
33523 case X86ISD::PSHUFD:
33524 case X86ISD::PSHUFLW:
33525 case X86ISD::PSHUFHW:
33526 Mask = getPSHUFShuffleMask(N);
33527 assert(Mask.size() == 4)((Mask.size() == 4) ? static_cast<void> (0) : __assert_fail
("Mask.size() == 4", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33527, __PRETTY_FUNCTION__))
;
33528 break;
33529 case X86ISD::MOVSD:
33530 case X86ISD::MOVSS: {
33531 SDValue N0 = N.getOperand(0);
33532 SDValue N1 = N.getOperand(1);
33533
33534 // Canonicalize scalar FPOps:
33535 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
33536 // If commutable, allow OP(N1[0], N0[0]).
33537 unsigned Opcode1 = N1.getOpcode();
33538 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
33539 Opcode1 == ISD::FDIV) {
33540 SDValue N10 = N1.getOperand(0);
33541 SDValue N11 = N1.getOperand(1);
33542 if (N10 == N0 ||
33543 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
33544 if (N10 != N0)
33545 std::swap(N10, N11);
33546 MVT SVT = VT.getVectorElementType();
33547 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
33548 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
33549 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
33550 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
33551 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
33552 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
33553 }
33554 }
33555
33556 return SDValue();
33557 }
33558 case X86ISD::INSERTPS: {
33559 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32")((VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32"
) ? static_cast<void> (0) : __assert_fail ("VT == MVT::v4f32 && \"INSERTPS ValueType must be MVT::v4f32\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33559, __PRETTY_FUNCTION__))
;
33560 SDValue Op0 = N.getOperand(0);
33561 SDValue Op1 = N.getOperand(1);
33562 SDValue Op2 = N.getOperand(2);
33563 unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue();
33564 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
33565 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
33566 unsigned ZeroMask = InsertPSMask & 0xF;
33567
33568 // If we zero out all elements from Op0 then we don't need to reference it.
33569 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
33570 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
33571 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33572
33573 // If we zero out the element from Op1 then we don't need to reference it.
33574 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
33575 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
33576 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33577
33578 // Attempt to merge insertps Op1 with an inner target shuffle node.
33579 SmallVector<int, 8> TargetMask1;
33580 SmallVector<SDValue, 2> Ops1;
33581 APInt KnownUndef1, KnownZero1;
33582 if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
33583 KnownZero1)) {
33584 if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
33585 // Zero/UNDEF insertion - zero out element and remove dependency.
33586 InsertPSMask |= (1u << DstIdx);
33587 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
33588 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33589 }
33590 // Update insertps mask srcidx and reference the source input directly.
33591 int M = TargetMask1[SrcIdx];
33592 assert(0 <= M && M < 8 && "Shuffle index out of range")((0 <= M && M < 8 && "Shuffle index out of range"
) ? static_cast<void> (0) : __assert_fail ("0 <= M && M < 8 && \"Shuffle index out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33592, __PRETTY_FUNCTION__))
;
33593 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
33594 Op1 = Ops1[M < 4 ? 0 : 1];
33595 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
33596 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33597 }
33598
33599 // Attempt to merge insertps Op0 with an inner target shuffle node.
33600 SmallVector<int, 8> TargetMask0;
33601 SmallVector<SDValue, 2> Ops0;
33602 APInt KnownUndef0, KnownZero0;
33603 if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
33604 KnownZero0)) {
33605 bool Updated = false;
33606 bool UseInput00 = false;
33607 bool UseInput01 = false;
33608 for (int i = 0; i != 4; ++i) {
33609 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
33610 // No change if element is already zero or the inserted element.
33611 continue;
33612 } else if (KnownUndef0[i] || KnownZero0[i]) {
33613 // If the target mask is undef/zero then we must zero the element.
33614 InsertPSMask |= (1u << i);
33615 Updated = true;
33616 continue;
33617 }
33618
33619 // The input vector element must be inline.
33620 int M = TargetMask0[i];
33621 if (M != i && M != (i + 4))
33622 return SDValue();
33623
33624 // Determine which inputs of the target shuffle we're using.
33625 UseInput00 |= (0 <= M && M < 4);
33626 UseInput01 |= (4 <= M);
33627 }
33628
33629 // If we're not using both inputs of the target shuffle then use the
33630 // referenced input directly.
33631 if (UseInput00 && !UseInput01) {
33632 Updated = true;
33633 Op0 = Ops0[0];
33634 } else if (!UseInput00 && UseInput01) {
33635 Updated = true;
33636 Op0 = Ops0[1];
33637 }
33638
33639 if (Updated)
33640 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
33641 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
33642 }
33643
33644 // If we're inserting an element from a vbroadcast load, fold the
33645 // load into the X86insertps instruction. We need to convert the scalar
33646 // load to a vector and clear the source lane of the INSERTPS control.
33647 if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
33648 auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
33649 if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
33650 SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
33651 MemIntr->getBasePtr(),
33652 MemIntr->getMemOperand());
33653 SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
33654 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
33655 Load),
33656 DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
33657 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
33658 return Insert;
33659 }
33660 }
33661
33662 return SDValue();
33663 }
33664 default:
33665 return SDValue();
33666 }
33667
33668 // Nuke no-op shuffles that show up after combining.
33669 if (isNoopShuffleMask(Mask))
33670 return N.getOperand(0);
33671
33672 // Look for simplifications involving one or two shuffle instructions.
33673 SDValue V = N.getOperand(0);
33674 switch (N.getOpcode()) {
33675 default:
33676 break;
33677 case X86ISD::PSHUFLW:
33678 case X86ISD::PSHUFHW:
33679 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!")((VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!"
) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i16 && \"Bad word shuffle type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33679, __PRETTY_FUNCTION__))
;
33680
33681 // See if this reduces to a PSHUFD which is no more expensive and can
33682 // combine with more operations. Note that it has to at least flip the
33683 // dwords as otherwise it would have been removed as a no-op.
33684 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
33685 int DMask[] = {0, 1, 2, 3};
33686 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
33687 DMask[DOffset + 0] = DOffset + 1;
33688 DMask[DOffset + 1] = DOffset + 0;
33689 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
33690 V = DAG.getBitcast(DVT, V);
33691 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
33692 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
33693 return DAG.getBitcast(VT, V);
33694 }
33695
33696 // Look for shuffle patterns which can be implemented as a single unpack.
33697 // FIXME: This doesn't handle the location of the PSHUFD generically, and
33698 // only works when we have a PSHUFD followed by two half-shuffles.
33699 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
33700 (V.getOpcode() == X86ISD::PSHUFLW ||
33701 V.getOpcode() == X86ISD::PSHUFHW) &&
33702 V.getOpcode() != N.getOpcode() &&
33703 V.hasOneUse()) {
33704 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
33705 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
33706 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
33707 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
33708 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
33709 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
33710 int WordMask[8];
33711 for (int i = 0; i < 4; ++i) {
33712 WordMask[i + NOffset] = Mask[i] + NOffset;
33713 WordMask[i + VOffset] = VMask[i] + VOffset;
33714 }
33715 // Map the word mask through the DWord mask.
33716 int MappedMask[8];
33717 for (int i = 0; i < 8; ++i)
33718 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
33719 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
33720 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
33721 // We can replace all three shuffles with an unpack.
33722 V = DAG.getBitcast(VT, D.getOperand(0));
33723 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
33724 : X86ISD::UNPCKH,
33725 DL, VT, V, V);
33726 }
33727 }
33728 }
33729
33730 break;
33731
33732 case X86ISD::PSHUFD:
33733 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
33734 return NewN;
33735
33736 break;
33737 }
33738
33739 return SDValue();
33740}
33741
33742/// Checks if the shuffle mask takes subsequent elements
33743/// alternately from two vectors.
33744/// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
33745static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
33746
33747 int ParitySrc[2] = {-1, -1};
33748 unsigned Size = Mask.size();
33749 for (unsigned i = 0; i != Size; ++i) {
33750 int M = Mask[i];
33751 if (M < 0)
33752 continue;
33753
33754 // Make sure we are using the matching element from the input.
33755 if ((M % Size) != i)
33756 return false;
33757
33758 // Make sure we use the same input for all elements of the same parity.
33759 int Src = M / Size;
33760 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
33761 return false;
33762 ParitySrc[i % 2] = Src;
33763 }
33764
33765 // Make sure each input is used.
33766 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
33767 return false;
33768
33769 Op0Even = ParitySrc[0] == 0;
33770 return true;
33771}
33772
33773/// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
33774/// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
33775/// are written to the parameters \p Opnd0 and \p Opnd1.
33776///
33777/// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
33778/// so it is easier to generically match. We also insert dummy vector shuffle
33779/// nodes for the operands which explicitly discard the lanes which are unused
33780/// by this operation to try to flow through the rest of the combiner
33781/// the fact that they're unused.
33782static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
33783 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
33784 bool &IsSubAdd) {
33785
33786 EVT VT = N->getValueType(0);
33787 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33788 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
33789 !VT.getSimpleVT().isFloatingPoint())
33790 return false;
33791
33792 // We only handle target-independent shuffles.
33793 // FIXME: It would be easy and harmless to use the target shuffle mask
33794 // extraction tool to support more.
33795 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
33796 return false;
33797
33798 SDValue V1 = N->getOperand(0);
33799 SDValue V2 = N->getOperand(1);
33800
33801 // Make sure we have an FADD and an FSUB.
33802 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
33803 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
33804 V1.getOpcode() == V2.getOpcode())
33805 return false;
33806
33807 // If there are other uses of these operations we can't fold them.
33808 if (!V1->hasOneUse() || !V2->hasOneUse())
33809 return false;
33810
33811 // Ensure that both operations have the same operands. Note that we can
33812 // commute the FADD operands.
33813 SDValue LHS, RHS;
33814 if (V1.getOpcode() == ISD::FSUB) {
33815 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
33816 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
33817 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
33818 return false;
33819 } else {
33820 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode")((V2.getOpcode() == ISD::FSUB && "Unexpected opcode")
? static_cast<void> (0) : __assert_fail ("V2.getOpcode() == ISD::FSUB && \"Unexpected opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 33820, __PRETTY_FUNCTION__))
;
33821 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
33822 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
33823 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
33824 return false;
33825 }
33826
33827 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33828 bool Op0Even;
33829 if (!isAddSubOrSubAddMask(Mask, Op0Even))
33830 return false;
33831
33832 // It's a subadd if the vector in the even parity is an FADD.
33833 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
33834 : V2->getOpcode() == ISD::FADD;
33835
33836 Opnd0 = LHS;
33837 Opnd1 = RHS;
33838 return true;
33839}
33840
33841/// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
33842static SDValue combineShuffleToFMAddSub(SDNode *N,
33843 const X86Subtarget &Subtarget,
33844 SelectionDAG &DAG) {
33845 // We only handle target-independent shuffles.
33846 // FIXME: It would be easy and harmless to use the target shuffle mask
33847 // extraction tool to support more.
33848 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
33849 return SDValue();
33850
33851 MVT VT = N->getSimpleValueType(0);
33852 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
33853 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
33854 return SDValue();
33855
33856 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
33857 SDValue Op0 = N->getOperand(0);
33858 SDValue Op1 = N->getOperand(1);
33859 SDValue FMAdd = Op0, FMSub = Op1;
33860 if (FMSub.getOpcode() != X86ISD::FMSUB)
33861 std::swap(FMAdd, FMSub);
33862
33863 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
33864 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
33865 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
33866 FMAdd.getOperand(2) != FMSub.getOperand(2))
33867 return SDValue();
33868
33869 // Check for correct shuffle mask.
33870 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
33871 bool Op0Even;
33872 if (!isAddSubOrSubAddMask(Mask, Op0Even))
33873 return SDValue();
33874
33875 // FMAddSub takes zeroth operand from FMSub node.
33876 SDLoc DL(N);
33877 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
33878 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
33879 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
33880 FMAdd.getOperand(2));
33881}
33882
33883/// Try to combine a shuffle into a target-specific add-sub or
33884/// mul-add-sub node.
33885static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
33886 const X86Subtarget &Subtarget,
33887 SelectionDAG &DAG) {
33888 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
33889 return V;
33890
33891 SDValue Opnd0, Opnd1;
33892 bool IsSubAdd;
33893 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
33894 return SDValue();
33895
33896 MVT VT = N->getSimpleValueType(0);
33897 SDLoc DL(N);
33898
33899 // Try to generate X86ISD::FMADDSUB node here.
33900 SDValue Opnd2;
33901 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
33902 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
33903 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
33904 }
33905
33906 if (IsSubAdd)
33907 return SDValue();
33908
33909 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
33910 // the ADDSUB idiom has been successfully recognized. There are no known
33911 // X86 targets with 512-bit ADDSUB instructions!
33912 if (VT.is512BitVector())
33913 return SDValue();
33914
33915 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
33916}
33917
33918// We are looking for a shuffle where both sources are concatenated with undef
33919// and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
33920// if we can express this as a single-source shuffle, that's preferable.
33921static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
33922 const X86Subtarget &Subtarget) {
33923 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
33924 return SDValue();
33925
33926 EVT VT = N->getValueType(0);
33927
33928 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
33929 if (!VT.is128BitVector() && !VT.is256BitVector())
33930 return SDValue();
33931
33932 if (VT.getVectorElementType() != MVT::i32 &&
33933 VT.getVectorElementType() != MVT::i64 &&
33934 VT.getVectorElementType() != MVT::f32 &&
33935 VT.getVectorElementType() != MVT::f64)
33936 return SDValue();
33937
33938 SDValue N0 = N->getOperand(0);
33939 SDValue N1 = N->getOperand(1);
33940
33941 // Check that both sources are concats with undef.
33942 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
33943 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
33944 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
33945 !N1.getOperand(1).isUndef())
33946 return SDValue();
33947
33948 // Construct the new shuffle mask. Elements from the first source retain their
33949 // index, but elements from the second source no longer need to skip an undef.
33950 SmallVector<int, 8> Mask;
33951 int NumElts = VT.getVectorNumElements();
33952
33953 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
33954 for (int Elt : SVOp->getMask())
33955 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
33956
33957 SDLoc DL(N);
33958 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
33959 N1.getOperand(0));
33960 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
33961}
33962
33963/// Eliminate a redundant shuffle of a horizontal math op.
33964static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
33965 unsigned Opcode = N->getOpcode();
33966 if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
33967 if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
33968 return SDValue();
33969
33970 // For a broadcast, peek through an extract element of index 0 to find the
33971 // horizontal op: broadcast (ext_vec_elt HOp, 0)
33972 EVT VT = N->getValueType(0);
33973 if (Opcode == X86ISD::VBROADCAST) {
33974 SDValue SrcOp = N->getOperand(0);
33975 if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
33976 SrcOp.getValueType() == MVT::f64 &&
33977 SrcOp.getOperand(0).getValueType() == VT &&
33978 isNullConstant(SrcOp.getOperand(1)))
33979 N = SrcOp.getNode();
33980 }
33981
33982 SDValue HOp = N->getOperand(0);
33983 if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
33984 HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
33985 return SDValue();
33986
33987 // 128-bit horizontal math instructions are defined to operate on adjacent
33988 // lanes of each operand as:
33989 // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
33990 // ...similarly for v2f64 and v8i16.
33991 if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
33992 HOp.getOperand(0) != HOp.getOperand(1))
33993 return SDValue();
33994
33995 // The shuffle that we are eliminating may have allowed the horizontal op to
33996 // have an undemanded (undefined) operand. Duplicate the other (defined)
33997 // operand to ensure that the results are defined across all lanes without the
33998 // shuffle.
33999 auto updateHOp = [](SDValue HorizOp, SelectionDAG &DAG) {
34000 SDValue X;
34001 if (HorizOp.getOperand(0).isUndef()) {
34002 assert(!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op")((!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op"
) ? static_cast<void> (0) : __assert_fail ("!HorizOp.getOperand(1).isUndef() && \"Not expecting foldable h-op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34002, __PRETTY_FUNCTION__))
;
34003 X = HorizOp.getOperand(1);
34004 } else if (HorizOp.getOperand(1).isUndef()) {
34005 assert(!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op")((!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op"
) ? static_cast<void> (0) : __assert_fail ("!HorizOp.getOperand(0).isUndef() && \"Not expecting foldable h-op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34005, __PRETTY_FUNCTION__))
;
34006 X = HorizOp.getOperand(0);
34007 } else {
34008 return HorizOp;
34009 }
34010 return DAG.getNode(HorizOp.getOpcode(), SDLoc(HorizOp),
34011 HorizOp.getValueType(), X, X);
34012 };
34013
34014 // When the operands of a horizontal math op are identical, the low half of
34015 // the result is the same as the high half. If a target shuffle is also
34016 // replicating low and high halves (and without changing the type/length of
34017 // the vector), we don't need the shuffle.
34018 if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
34019 if (HOp.getScalarValueSizeInBits() == 64 && HOp.getValueType() == VT) {
34020 // movddup (hadd X, X) --> hadd X, X
34021 // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
34022 assert((HOp.getValueType() == MVT::v2f64 ||(((HOp.getValueType() == MVT::v2f64 || HOp.getValueType() == MVT
::v4f64) && "Unexpected type for h-op") ? static_cast
<void> (0) : __assert_fail ("(HOp.getValueType() == MVT::v2f64 || HOp.getValueType() == MVT::v4f64) && \"Unexpected type for h-op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34023, __PRETTY_FUNCTION__))
34023 HOp.getValueType() == MVT::v4f64) && "Unexpected type for h-op")(((HOp.getValueType() == MVT::v2f64 || HOp.getValueType() == MVT
::v4f64) && "Unexpected type for h-op") ? static_cast
<void> (0) : __assert_fail ("(HOp.getValueType() == MVT::v2f64 || HOp.getValueType() == MVT::v4f64) && \"Unexpected type for h-op\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34023, __PRETTY_FUNCTION__))
;
34024 return updateHOp(HOp, DAG);
34025 }
34026 return SDValue();
34027 }
34028
34029 // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
34030 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
34031 // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
34032 // but this should be tied to whatever horizontal op matching and shuffle
34033 // canonicalization are producing.
34034 if (HOp.getValueSizeInBits() == 128 &&
34035 (isTargetShuffleEquivalent(Mask, {0, 0}) ||
34036 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
34037 isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
34038 return updateHOp(HOp, DAG);
34039
34040 if (HOp.getValueSizeInBits() == 256 &&
34041 (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
34042 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
34043 isTargetShuffleEquivalent(
34044 Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
34045 return updateHOp(HOp, DAG);
34046
34047 return SDValue();
34048}
34049
34050/// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
34051/// low half of each source vector and does not set any high half elements in
34052/// the destination vector, narrow the shuffle to half its original size.
34053static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
34054 if (!Shuf->getValueType(0).isSimple())
34055 return SDValue();
34056 MVT VT = Shuf->getSimpleValueType(0);
34057 if (!VT.is256BitVector() && !VT.is512BitVector())
34058 return SDValue();
34059
34060 // See if we can ignore all of the high elements of the shuffle.
34061 ArrayRef<int> Mask = Shuf->getMask();
34062 if (!isUndefUpperHalf(Mask))
34063 return SDValue();
34064
34065 // Check if the shuffle mask accesses only the low half of each input vector
34066 // (half-index output is 0 or 2).
34067 int HalfIdx1, HalfIdx2;
34068 SmallVector<int, 8> HalfMask(Mask.size() / 2);
34069 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
34070 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
34071 return SDValue();
34072
34073 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
34074 // The trick is knowing that all of the insert/extract are actually free
34075 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
34076 // of narrow inputs into a narrow output, and that is always cheaper than
34077 // the wide shuffle that we started with.
34078 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
34079 Shuf->getOperand(1), HalfMask, HalfIdx1,
34080 HalfIdx2, false, DAG, /*UseConcat*/true);
34081}
34082
34083static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
34084 TargetLowering::DAGCombinerInfo &DCI,
34085 const X86Subtarget &Subtarget) {
34086 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
34087 if (SDValue V = narrowShuffle(Shuf, DAG))
34088 return V;
34089
34090 // If we have legalized the vector types, look for blends of FADD and FSUB
34091 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
34092 SDLoc dl(N);
34093 EVT VT = N->getValueType(0);
34094 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
34095 if (TLI.isTypeLegal(VT)) {
34096 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
34097 return AddSub;
34098
34099 if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
34100 return HAddSub;
34101 }
34102
34103 // Attempt to combine into a vector load/broadcast.
34104 if (SDValue LD = combineToConsecutiveLoads(VT, N, dl, DAG, Subtarget, true))
34105 return LD;
34106
34107 // For AVX2, we sometimes want to combine
34108 // (vector_shuffle <mask> (concat_vectors t1, undef)
34109 // (concat_vectors t2, undef))
34110 // Into:
34111 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
34112 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
34113 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
34114 return ShufConcat;
34115
34116 if (isTargetShuffle(N->getOpcode())) {
34117 SDValue Op(N, 0);
34118 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
34119 return Shuffle;
34120
34121 // Try recursively combining arbitrary sequences of x86 shuffle
34122 // instructions into higher-order shuffles. We do this after combining
34123 // specific PSHUF instruction sequences into their minimal form so that we
34124 // can evaluate how many specialized shuffle instructions are involved in
34125 // a particular chain.
34126 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
34127 return Res;
34128
34129 // Simplify source operands based on shuffle mask.
34130 // TODO - merge this into combineX86ShufflesRecursively.
34131 APInt KnownUndef, KnownZero;
34132 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
34133 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
34134 return SDValue(N, 0);
34135 }
34136
34137 // Look for a v2i64/v2f64 VZEXT_MOVL of a node that already produces zeros
34138 // in the upper 64 bits.
34139 // TODO: Can we generalize this using computeKnownBits.
34140 if (N->getOpcode() == X86ISD::VZEXT_MOVL &&
34141 (VT == MVT::v2f64 || VT == MVT::v2i64) &&
34142 N->getOperand(0).getOpcode() == ISD::BITCAST &&
34143 (N->getOperand(0).getOperand(0).getValueType() == MVT::v4f32 ||
34144 N->getOperand(0).getOperand(0).getValueType() == MVT::v4i32)) {
34145 SDValue In = N->getOperand(0).getOperand(0);
34146 switch (In.getOpcode()) {
34147 default:
34148 break;
34149 case X86ISD::CVTP2SI: case X86ISD::CVTP2UI:
34150 case X86ISD::MCVTP2SI: case X86ISD::MCVTP2UI:
34151 case X86ISD::CVTTP2SI: case X86ISD::CVTTP2UI:
34152 case X86ISD::MCVTTP2SI: case X86ISD::MCVTTP2UI:
34153 case X86ISD::CVTSI2P: case X86ISD::CVTUI2P:
34154 case X86ISD::MCVTSI2P: case X86ISD::MCVTUI2P:
34155 case X86ISD::VFPROUND: case X86ISD::VMFPROUND:
34156 if (In.getOperand(0).getValueType() == MVT::v2f64 ||
34157 In.getOperand(0).getValueType() == MVT::v2i64)
34158 return N->getOperand(0); // return the bitcast
34159 break;
34160 }
34161 }
34162
34163 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
34164 // insert into a zero vector. This helps get VZEXT_MOVL closer to
34165 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
34166 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
34167 if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
34168 N->getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR &&
34169 N->getOperand(0).hasOneUse() &&
34170 N->getOperand(0).getOperand(0).isUndef() &&
34171 isNullConstant(N->getOperand(0).getOperand(2))) {
34172 SDValue In = N->getOperand(0).getOperand(1);
34173 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, In.getValueType(), In);
34174 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
34175 getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
34176 Movl, N->getOperand(0).getOperand(2));
34177 }
34178
34179 // If this a vzmovl of a full vector load, replace it with a vzload, unless
34180 // the load is volatile.
34181 if (N->getOpcode() == X86ISD::VZEXT_MOVL && N->getOperand(0).hasOneUse() &&
34182 ISD::isNormalLoad(N->getOperand(0).getNode())) {
34183 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
34184 if (LN->isSimple()) {
34185 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
34186 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
34187 SDValue VZLoad =
34188 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
34189 VT.getVectorElementType(),
34190 LN->getPointerInfo(),
34191 LN->getAlignment(),
34192 MachineMemOperand::MOLoad);
34193 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
34194 return VZLoad;
34195 }
34196 }
34197
34198 return SDValue();
34199}
34200
34201bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
34202 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
34203 TargetLoweringOpt &TLO, unsigned Depth) const {
34204 int NumElts = DemandedElts.getBitWidth();
34205 unsigned Opc = Op.getOpcode();
34206 EVT VT = Op.getValueType();
34207
34208 // Handle special case opcodes.
34209 switch (Opc) {
34210 case X86ISD::PMULDQ:
34211 case X86ISD::PMULUDQ: {
34212 APInt LHSUndef, LHSZero;
34213 APInt RHSUndef, RHSZero;
34214 SDValue LHS = Op.getOperand(0);
34215 SDValue RHS = Op.getOperand(1);
34216 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
34217 Depth + 1))
34218 return true;
34219 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
34220 Depth + 1))
34221 return true;
34222 // Multiply by zero.
34223 KnownZero = LHSZero | RHSZero;
34224 break;
34225 }
34226 case X86ISD::VSHL:
34227 case X86ISD::VSRL:
34228 case X86ISD::VSRA: {
34229 // We only need the bottom 64-bits of the (128-bit) shift amount.
34230 SDValue Amt = Op.getOperand(1);
34231 MVT AmtVT = Amt.getSimpleValueType();
34232 assert(AmtVT.is128BitVector() && "Unexpected value type")((AmtVT.is128BitVector() && "Unexpected value type") ?
static_cast<void> (0) : __assert_fail ("AmtVT.is128BitVector() && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34232, __PRETTY_FUNCTION__))
;
34233
34234 // If we reuse the shift amount just for sse shift amounts then we know that
34235 // only the bottom 64-bits are only ever used.
34236 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
34237 unsigned UseOpc = Use->getOpcode();
34238 return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
34239 UseOpc == X86ISD::VSRA) &&
34240 Use->getOperand(0) != Amt;
34241 });
34242
34243 APInt AmtUndef, AmtZero;
34244 unsigned NumAmtElts = AmtVT.getVectorNumElements();
34245 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
34246 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
34247 Depth + 1, AssumeSingleUse))
34248 return true;
34249 LLVM_FALLTHROUGH[[gnu::fallthrough]];
34250 }
34251 case X86ISD::VSHLI:
34252 case X86ISD::VSRLI:
34253 case X86ISD::VSRAI: {
34254 SDValue Src = Op.getOperand(0);
34255 APInt SrcUndef;
34256 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
34257 Depth + 1))
34258 return true;
34259 // TODO convert SrcUndef to KnownUndef.
34260 break;
34261 }
34262 case X86ISD::KSHIFTL: {
34263 SDValue Src = Op.getOperand(0);
34264 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
34265 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount")((Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount"
) ? static_cast<void> (0) : __assert_fail ("Amt->getAPIntValue().ult(NumElts) && \"Out of range shift amount\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34265, __PRETTY_FUNCTION__))
;
34266 unsigned ShiftAmt = Amt->getZExtValue();
34267
34268 if (ShiftAmt == 0)
34269 return TLO.CombineTo(Op, Src);
34270
34271 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
34272 // single shift. We can do this if the bottom bits (which are shifted
34273 // out) are never demanded.
34274 if (Src.getOpcode() == X86ISD::KSHIFTR) {
34275 if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
34276 unsigned C1 = Src.getConstantOperandVal(1);
34277 unsigned NewOpc = X86ISD::KSHIFTL;
34278 int Diff = ShiftAmt - C1;
34279 if (Diff < 0) {
34280 Diff = -Diff;
34281 NewOpc = X86ISD::KSHIFTR;
34282 }
34283
34284 SDLoc dl(Op);
34285 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
34286 return TLO.CombineTo(
34287 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
34288 }
34289 }
34290
34291 APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
34292 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
34293 Depth + 1))
34294 return true;
34295
34296 KnownUndef <<= ShiftAmt;
34297 KnownZero <<= ShiftAmt;
34298 KnownZero.setLowBits(ShiftAmt);
34299 break;
34300 }
34301 case X86ISD::KSHIFTR: {
34302 SDValue Src = Op.getOperand(0);
34303 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
34304 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount")((Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount"
) ? static_cast<void> (0) : __assert_fail ("Amt->getAPIntValue().ult(NumElts) && \"Out of range shift amount\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34304, __PRETTY_FUNCTION__))
;
34305 unsigned ShiftAmt = Amt->getZExtValue();
34306
34307 if (ShiftAmt == 0)
34308 return TLO.CombineTo(Op, Src);
34309
34310 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
34311 // single shift. We can do this if the top bits (which are shifted
34312 // out) are never demanded.
34313 if (Src.getOpcode() == X86ISD::KSHIFTL) {
34314 if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
34315 unsigned C1 = Src.getConstantOperandVal(1);
34316 unsigned NewOpc = X86ISD::KSHIFTR;
34317 int Diff = ShiftAmt - C1;
34318 if (Diff < 0) {
34319 Diff = -Diff;
34320 NewOpc = X86ISD::KSHIFTL;
34321 }
34322
34323 SDLoc dl(Op);
34324 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
34325 return TLO.CombineTo(
34326 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
34327 }
34328 }
34329
34330 APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
34331 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
34332 Depth + 1))
34333 return true;
34334
34335 KnownUndef.lshrInPlace(ShiftAmt);
34336 KnownZero.lshrInPlace(ShiftAmt);
34337 KnownZero.setHighBits(ShiftAmt);
34338 break;
34339 }
34340 case X86ISD::CVTSI2P:
34341 case X86ISD::CVTUI2P: {
34342 SDValue Src = Op.getOperand(0);
34343 MVT SrcVT = Src.getSimpleValueType();
34344 APInt SrcUndef, SrcZero;
34345 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
34346 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
34347 Depth + 1))
34348 return true;
34349 break;
34350 }
34351 case X86ISD::PACKSS:
34352 case X86ISD::PACKUS: {
34353 SDValue N0 = Op.getOperand(0);
34354 SDValue N1 = Op.getOperand(1);
34355
34356 APInt DemandedLHS, DemandedRHS;
34357 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
34358
34359 APInt SrcUndef, SrcZero;
34360 if (SimplifyDemandedVectorElts(N0, DemandedLHS, SrcUndef, SrcZero, TLO,
34361 Depth + 1))
34362 return true;
34363 if (SimplifyDemandedVectorElts(N1, DemandedRHS, SrcUndef, SrcZero, TLO,
34364 Depth + 1))
34365 return true;
34366
34367 // Aggressively peek through ops to get at the demanded elts.
34368 // TODO - we should do this for all target/faux shuffles ops.
34369 if (!DemandedElts.isAllOnesValue()) {
34370 APInt DemandedSrcBits =
34371 APInt::getAllOnesValue(N0.getScalarValueSizeInBits());
34372 SDValue NewN0 = SimplifyMultipleUseDemandedBits(
34373 N0, DemandedSrcBits, DemandedLHS, TLO.DAG, Depth + 1);
34374 SDValue NewN1 = SimplifyMultipleUseDemandedBits(
34375 N1, DemandedSrcBits, DemandedRHS, TLO.DAG, Depth + 1);
34376 if (NewN0 || NewN1) {
34377 NewN0 = NewN0 ? NewN0 : N0;
34378 NewN1 = NewN1 ? NewN1 : N1;
34379 return TLO.CombineTo(Op,
34380 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
34381 }
34382 }
34383 break;
34384 }
34385 case X86ISD::HADD:
34386 case X86ISD::HSUB:
34387 case X86ISD::FHADD:
34388 case X86ISD::FHSUB: {
34389 APInt DemandedLHS, DemandedRHS;
34390 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
34391
34392 APInt LHSUndef, LHSZero;
34393 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
34394 LHSZero, TLO, Depth + 1))
34395 return true;
34396 APInt RHSUndef, RHSZero;
34397 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
34398 RHSZero, TLO, Depth + 1))
34399 return true;
34400 break;
34401 }
34402 case X86ISD::VTRUNC:
34403 case X86ISD::VTRUNCS:
34404 case X86ISD::VTRUNCUS: {
34405 SDValue Src = Op.getOperand(0);
34406 MVT SrcVT = Src.getSimpleValueType();
34407 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
34408 APInt SrcUndef, SrcZero;
34409 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
34410 Depth + 1))
34411 return true;
34412 KnownZero = SrcZero.zextOrTrunc(NumElts);
34413 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
34414 break;
34415 }
34416 case X86ISD::BLENDV: {
34417 APInt SelUndef, SelZero;
34418 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
34419 SelZero, TLO, Depth + 1))
34420 return true;
34421
34422 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
34423 APInt LHSUndef, LHSZero;
34424 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
34425 LHSZero, TLO, Depth + 1))
34426 return true;
34427
34428 APInt RHSUndef, RHSZero;
34429 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
34430 RHSZero, TLO, Depth + 1))
34431 return true;
34432
34433 KnownZero = LHSZero & RHSZero;
34434 KnownUndef = LHSUndef & RHSUndef;
34435 break;
34436 }
34437 case X86ISD::VBROADCAST: {
34438 SDValue Src = Op.getOperand(0);
34439 MVT SrcVT = Src.getSimpleValueType();
34440 if (!SrcVT.isVector())
34441 return false;
34442 // Don't bother broadcasting if we just need the 0'th element.
34443 if (DemandedElts == 1) {
34444 if (Src.getValueType() != VT)
34445 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
34446 SDLoc(Op));
34447 return TLO.CombineTo(Op, Src);
34448 }
34449 APInt SrcUndef, SrcZero;
34450 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
34451 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
34452 Depth + 1))
34453 return true;
34454 break;
34455 }
34456 case X86ISD::VPERMV: {
34457 SDValue Mask = Op.getOperand(0);
34458 APInt MaskUndef, MaskZero;
34459 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
34460 Depth + 1))
34461 return true;
34462 break;
34463 }
34464 case X86ISD::PSHUFB:
34465 case X86ISD::VPERMV3:
34466 case X86ISD::VPERMILPV: {
34467 SDValue Mask = Op.getOperand(1);
34468 APInt MaskUndef, MaskZero;
34469 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
34470 Depth + 1))
34471 return true;
34472 break;
34473 }
34474 case X86ISD::VPPERM:
34475 case X86ISD::VPERMIL2: {
34476 SDValue Mask = Op.getOperand(2);
34477 APInt MaskUndef, MaskZero;
34478 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
34479 Depth + 1))
34480 return true;
34481 break;
34482 }
34483 }
34484
34485 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
34486 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
34487 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
34488 if ((VT.is256BitVector() || VT.is512BitVector()) &&
34489 DemandedElts.lshr(NumElts / 2) == 0) {
34490 unsigned SizeInBits = VT.getSizeInBits();
34491 unsigned ExtSizeInBits = SizeInBits / 2;
34492
34493 // See if 512-bit ops only use the bottom 128-bits.
34494 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
34495 ExtSizeInBits = SizeInBits / 4;
34496
34497 switch (Opc) {
34498 // Zero upper elements.
34499 case X86ISD::VZEXT_MOVL: {
34500 SDLoc DL(Op);
34501 SDValue Ext0 =
34502 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34503 SDValue ExtOp =
34504 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0);
34505 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34506 SDValue Insert =
34507 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34508 return TLO.CombineTo(Op, Insert);
34509 }
34510 // Subvector broadcast.
34511 case X86ISD::SUBV_BROADCAST: {
34512 SDLoc DL(Op);
34513 SDValue Src = Op.getOperand(0);
34514 if (Src.getValueSizeInBits() > ExtSizeInBits)
34515 Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
34516 else if (Src.getValueSizeInBits() < ExtSizeInBits) {
34517 MVT SrcSVT = Src.getSimpleValueType().getScalarType();
34518 MVT SrcVT =
34519 MVT::getVectorVT(SrcSVT, ExtSizeInBits / SrcSVT.getSizeInBits());
34520 Src = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, DL, SrcVT, Src);
34521 }
34522 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Src, 0,
34523 TLO.DAG, DL, ExtSizeInBits));
34524 }
34525 // Byte shifts by immediate.
34526 case X86ISD::VSHLDQ:
34527 case X86ISD::VSRLDQ:
34528 // Shift by uniform.
34529 case X86ISD::VSHL:
34530 case X86ISD::VSRL:
34531 case X86ISD::VSRA:
34532 // Shift by immediate.
34533 case X86ISD::VSHLI:
34534 case X86ISD::VSRLI:
34535 case X86ISD::VSRAI: {
34536 SDLoc DL(Op);
34537 SDValue Ext0 =
34538 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34539 SDValue ExtOp =
34540 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
34541 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34542 SDValue Insert =
34543 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34544 return TLO.CombineTo(Op, Insert);
34545 }
34546 case X86ISD::VPERMI: {
34547 // Simplify PERMPD/PERMQ to extract_subvector.
34548 // TODO: This should be done in shuffle combining.
34549 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
34550 SmallVector<int, 4> Mask;
34551 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
34552 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
34553 SDLoc DL(Op);
34554 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
34555 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34556 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
34557 return TLO.CombineTo(Op, Insert);
34558 }
34559 }
34560 break;
34561 }
34562 // Target Shuffles.
34563 case X86ISD::PSHUFB:
34564 case X86ISD::UNPCKL:
34565 case X86ISD::UNPCKH:
34566 // Saturated Packs.
34567 case X86ISD::PACKSS:
34568 case X86ISD::PACKUS:
34569 // Horizontal Ops.
34570 case X86ISD::HADD:
34571 case X86ISD::HSUB:
34572 case X86ISD::FHADD:
34573 case X86ISD::FHSUB: {
34574 SDLoc DL(Op);
34575 MVT ExtVT = VT.getSimpleVT();
34576 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
34577 ExtSizeInBits / ExtVT.getScalarSizeInBits());
34578 SDValue Ext0 =
34579 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
34580 SDValue Ext1 =
34581 extractSubVector(Op.getOperand(1), 0, TLO.DAG, DL, ExtSizeInBits);
34582 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ext0, Ext1);
34583 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
34584 SDValue Insert =
34585 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
34586 return TLO.CombineTo(Op, Insert);
34587 }
34588 }
34589 }
34590
34591 // Get target/faux shuffle mask.
34592 APInt OpUndef, OpZero;
34593 SmallVector<int, 64> OpMask;
34594 SmallVector<SDValue, 2> OpInputs;
34595 if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
34596 OpZero, TLO.DAG, Depth, false))
34597 return false;
34598
34599 // Shuffle inputs must be the same size as the result.
34600 if (OpMask.size() != (unsigned)NumElts ||
34601 llvm::any_of(OpInputs, [VT](SDValue V) {
34602 return VT.getSizeInBits() != V.getValueSizeInBits() ||
34603 !V.getValueType().isVector();
34604 }))
34605 return false;
34606
34607 KnownZero = OpZero;
34608 KnownUndef = OpUndef;
34609
34610 // Check if shuffle mask can be simplified to undef/zero/identity.
34611 int NumSrcs = OpInputs.size();
34612 for (int i = 0; i != NumElts; ++i)
34613 if (!DemandedElts[i])
34614 OpMask[i] = SM_SentinelUndef;
34615
34616 if (isUndefInRange(OpMask, 0, NumElts)) {
34617 KnownUndef.setAllBits();
34618 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
34619 }
34620 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
34621 KnownZero.setAllBits();
34622 return TLO.CombineTo(
34623 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
34624 }
34625 for (int Src = 0; Src != NumSrcs; ++Src)
34626 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
34627 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
34628
34629 // Attempt to simplify inputs.
34630 for (int Src = 0; Src != NumSrcs; ++Src) {
34631 // TODO: Support inputs of different types.
34632 if (OpInputs[Src].getValueType() != VT)
34633 continue;
34634
34635 int Lo = Src * NumElts;
34636 APInt SrcElts = APInt::getNullValue(NumElts);
34637 for (int i = 0; i != NumElts; ++i)
34638 if (DemandedElts[i]) {
34639 int M = OpMask[i] - Lo;
34640 if (0 <= M && M < NumElts)
34641 SrcElts.setBit(M);
34642 }
34643
34644 // TODO - Propagate input undef/zero elts.
34645 APInt SrcUndef, SrcZero;
34646 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
34647 TLO, Depth + 1))
34648 return true;
34649 }
34650
34651 return false;
34652}
34653
34654bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
34655 SDValue Op, const APInt &OriginalDemandedBits,
34656 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
34657 unsigned Depth) const {
34658 EVT VT = Op.getValueType();
34659 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
34660 unsigned Opc = Op.getOpcode();
34661 switch(Opc) {
34662 case X86ISD::PMULDQ:
34663 case X86ISD::PMULUDQ: {
34664 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
34665 KnownBits KnownOp;
34666 SDValue LHS = Op.getOperand(0);
34667 SDValue RHS = Op.getOperand(1);
34668 // FIXME: Can we bound this better?
34669 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
34670 if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
34671 TLO, Depth + 1))
34672 return true;
34673 if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
34674 TLO, Depth + 1))
34675 return true;
34676
34677 // Aggressively peek through ops to get at the demanded low bits.
34678 SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
34679 LHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
34680 SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
34681 RHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
34682 if (DemandedLHS || DemandedRHS) {
34683 DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
34684 DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
34685 return TLO.CombineTo(
34686 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
34687 }
34688 break;
34689 }
34690 case X86ISD::VSHLI: {
34691 SDValue Op0 = Op.getOperand(0);
34692 SDValue Op1 = Op.getOperand(1);
34693
34694 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
34695 if (ShiftImm->getAPIntValue().uge(BitWidth))
34696 break;
34697
34698 unsigned ShAmt = ShiftImm->getZExtValue();
34699 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
34700
34701 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
34702 // single shift. We can do this if the bottom bits (which are shifted
34703 // out) are never demanded.
34704 if (Op0.getOpcode() == X86ISD::VSRLI &&
34705 OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
34706 if (auto *Shift2Imm = dyn_cast<ConstantSDNode>(Op0.getOperand(1))) {
34707 if (Shift2Imm->getAPIntValue().ult(BitWidth)) {
34708 int Diff = ShAmt - Shift2Imm->getZExtValue();
34709 if (Diff == 0)
34710 return TLO.CombineTo(Op, Op0.getOperand(0));
34711
34712 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
34713 SDValue NewShift = TLO.DAG.getNode(
34714 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
34715 TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
34716 return TLO.CombineTo(Op, NewShift);
34717 }
34718 }
34719 }
34720
34721 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
34722 TLO, Depth + 1))
34723 return true;
34724
34725 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34725, __PRETTY_FUNCTION__))
;
34726 Known.Zero <<= ShAmt;
34727 Known.One <<= ShAmt;
34728
34729 // Low bits known zero.
34730 Known.Zero.setLowBits(ShAmt);
34731 }
34732 break;
34733 }
34734 case X86ISD::VSRLI: {
34735 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
34736 if (ShiftImm->getAPIntValue().uge(BitWidth))
34737 break;
34738
34739 unsigned ShAmt = ShiftImm->getZExtValue();
34740 APInt DemandedMask = OriginalDemandedBits << ShAmt;
34741
34742 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
34743 OriginalDemandedElts, Known, TLO, Depth + 1))
34744 return true;
34745
34746 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34746, __PRETTY_FUNCTION__))
;
34747 Known.Zero.lshrInPlace(ShAmt);
34748 Known.One.lshrInPlace(ShAmt);
34749
34750 // High bits known zero.
34751 Known.Zero.setHighBits(ShAmt);
34752 }
34753 break;
34754 }
34755 case X86ISD::VSRAI: {
34756 SDValue Op0 = Op.getOperand(0);
34757 SDValue Op1 = Op.getOperand(1);
34758
34759 if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) {
34760 if (ShiftImm->getAPIntValue().uge(BitWidth))
34761 break;
34762
34763 unsigned ShAmt = ShiftImm->getZExtValue();
34764 APInt DemandedMask = OriginalDemandedBits << ShAmt;
34765
34766 // If we just want the sign bit then we don't need to shift it.
34767 if (OriginalDemandedBits.isSignMask())
34768 return TLO.CombineTo(Op, Op0);
34769
34770 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
34771 if (Op0.getOpcode() == X86ISD::VSHLI && Op1 == Op0.getOperand(1)) {
34772 SDValue Op00 = Op0.getOperand(0);
34773 unsigned NumSignBits =
34774 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
34775 if (ShAmt < NumSignBits)
34776 return TLO.CombineTo(Op, Op00);
34777 }
34778
34779 // If any of the demanded bits are produced by the sign extension, we also
34780 // demand the input sign bit.
34781 if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
34782 DemandedMask.setSignBit();
34783
34784 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
34785 TLO, Depth + 1))
34786 return true;
34787
34788 assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?"
) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34788, __PRETTY_FUNCTION__))
;
34789 Known.Zero.lshrInPlace(ShAmt);
34790 Known.One.lshrInPlace(ShAmt);
34791
34792 // If the input sign bit is known to be zero, or if none of the top bits
34793 // are demanded, turn this into an unsigned shift right.
34794 if (Known.Zero[BitWidth - ShAmt - 1] ||
34795 OriginalDemandedBits.countLeadingZeros() >= ShAmt)
34796 return TLO.CombineTo(
34797 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
34798
34799 // High bits are known one.
34800 if (Known.One[BitWidth - ShAmt - 1])
34801 Known.One.setHighBits(ShAmt);
34802 }
34803 break;
34804 }
34805 case X86ISD::PEXTRB:
34806 case X86ISD::PEXTRW: {
34807 SDValue Vec = Op.getOperand(0);
34808 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
34809 MVT VecVT = Vec.getSimpleValueType();
34810 unsigned NumVecElts = VecVT.getVectorNumElements();
34811
34812 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
34813 unsigned Idx = CIdx->getZExtValue();
34814 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
34815
34816 // If we demand no bits from the vector then we must have demanded
34817 // bits from the implict zext - simplify to zero.
34818 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
34819 if (DemandedVecBits == 0)
34820 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
34821
34822 APInt KnownUndef, KnownZero;
34823 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
34824 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
34825 KnownZero, TLO, Depth + 1))
34826 return true;
34827
34828 KnownBits KnownVec;
34829 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
34830 KnownVec, TLO, Depth + 1))
34831 return true;
34832
34833 if (SDValue V = SimplifyMultipleUseDemandedBits(
34834 Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
34835 return TLO.CombineTo(
34836 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
34837
34838 Known = KnownVec.zext(BitWidth, true);
34839 return false;
34840 }
34841 break;
34842 }
34843 case X86ISD::PINSRB:
34844 case X86ISD::PINSRW: {
34845 SDValue Vec = Op.getOperand(0);
34846 SDValue Scl = Op.getOperand(1);
34847 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
34848 MVT VecVT = Vec.getSimpleValueType();
34849
34850 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
34851 unsigned Idx = CIdx->getZExtValue();
34852 if (!OriginalDemandedElts[Idx])
34853 return TLO.CombineTo(Op, Vec);
34854
34855 KnownBits KnownVec;
34856 APInt DemandedVecElts(OriginalDemandedElts);
34857 DemandedVecElts.clearBit(Idx);
34858 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
34859 KnownVec, TLO, Depth + 1))
34860 return true;
34861
34862 KnownBits KnownScl;
34863 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
34864 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
34865 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
34866 return true;
34867
34868 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
34869 Known.One = KnownVec.One & KnownScl.One;
34870 Known.Zero = KnownVec.Zero & KnownScl.Zero;
34871 return false;
34872 }
34873 break;
34874 }
34875 case X86ISD::PACKSS:
34876 // PACKSS saturates to MIN/MAX integer values. So if we just want the
34877 // sign bit then we can just ask for the source operands sign bit.
34878 // TODO - add known bits handling.
34879 if (OriginalDemandedBits.isSignMask()) {
34880 APInt DemandedLHS, DemandedRHS;
34881 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
34882
34883 KnownBits KnownLHS, KnownRHS;
34884 APInt SignMask = APInt::getSignMask(BitWidth * 2);
34885 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
34886 KnownLHS, TLO, Depth + 1))
34887 return true;
34888 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
34889 KnownRHS, TLO, Depth + 1))
34890 return true;
34891 }
34892 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
34893 break;
34894 case X86ISD::PCMPGT:
34895 // icmp sgt(0, R) == ashr(R, BitWidth-1).
34896 // iff we only need the sign bit then we can use R directly.
34897 if (OriginalDemandedBits.isSignMask() &&
34898 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
34899 return TLO.CombineTo(Op, Op.getOperand(1));
34900 break;
34901 case X86ISD::MOVMSK: {
34902 SDValue Src = Op.getOperand(0);
34903 MVT SrcVT = Src.getSimpleValueType();
34904 unsigned SrcBits = SrcVT.getScalarSizeInBits();
34905 unsigned NumElts = SrcVT.getVectorNumElements();
34906
34907 // If we don't need the sign bits at all just return zero.
34908 if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
34909 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
34910
34911 // Only demand the vector elements of the sign bits we need.
34912 APInt KnownUndef, KnownZero;
34913 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
34914 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
34915 TLO, Depth + 1))
34916 return true;
34917
34918 Known.Zero = KnownZero.zextOrSelf(BitWidth);
34919 Known.Zero.setHighBits(BitWidth - NumElts);
34920
34921 // MOVMSK only uses the MSB from each vector element.
34922 KnownBits KnownSrc;
34923 if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts,
34924 KnownSrc, TLO, Depth + 1))
34925 return true;
34926
34927 if (KnownSrc.One[SrcBits - 1])
34928 Known.One.setLowBits(NumElts);
34929 else if (KnownSrc.Zero[SrcBits - 1])
34930 Known.Zero.setLowBits(NumElts);
34931 return false;
34932 }
34933 }
34934
34935 return TargetLowering::SimplifyDemandedBitsForTargetNode(
34936 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
34937}
34938
34939SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
34940 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
34941 SelectionDAG &DAG, unsigned Depth) const {
34942 int NumElts = DemandedElts.getBitWidth();
34943 unsigned Opc = Op.getOpcode();
34944 EVT VT = Op.getValueType();
34945
34946 switch (Opc) {
34947 case X86ISD::PINSRB:
34948 case X86ISD::PINSRW: {
34949 // If we don't demand the inserted element, return the base vector.
34950 SDValue Vec = Op.getOperand(0);
34951 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
34952 MVT VecVT = Vec.getSimpleValueType();
34953 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
34954 !DemandedElts[CIdx->getZExtValue()])
34955 return Vec;
34956 break;
34957 }
34958 }
34959
34960 APInt ShuffleUndef, ShuffleZero;
34961 SmallVector<int, 16> ShuffleMask;
34962 SmallVector<SDValue, 2> ShuffleOps;
34963 if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
34964 ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
34965 // If all the demanded elts are from one operand and are inline,
34966 // then we can use the operand directly.
34967 int NumOps = ShuffleOps.size();
34968 if (ShuffleMask.size() == (unsigned)NumElts &&
34969 llvm::all_of(ShuffleOps, [VT](SDValue V) {
34970 return VT.getSizeInBits() == V.getValueSizeInBits();
34971 })) {
34972
34973 if (DemandedElts.isSubsetOf(ShuffleUndef))
34974 return DAG.getUNDEF(VT);
34975 if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
34976 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
34977
34978 // Bitmask that indicates which ops have only been accessed 'inline'.
34979 APInt IdentityOp = APInt::getAllOnesValue(NumOps);
34980 for (int i = 0; i != NumElts; ++i) {
34981 int M = ShuffleMask[i];
34982 if (!DemandedElts[i] || ShuffleUndef[i])
34983 continue;
34984 int Op = M / NumElts;
34985 int Index = M % NumElts;
34986 if (M < 0 || Index != i) {
34987 IdentityOp.clearAllBits();
34988 break;
34989 }
34990 IdentityOp &= APInt::getOneBitSet(NumOps, Op);
34991 if (IdentityOp == 0)
34992 break;
34993 }
34994 assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&(((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
"Multiple identity shuffles detected") ? static_cast<void
> (0) : __assert_fail ("(IdentityOp == 0 || IdentityOp.countPopulation() == 1) && \"Multiple identity shuffles detected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34995, __PRETTY_FUNCTION__))
34995 "Multiple identity shuffles detected")(((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
"Multiple identity shuffles detected") ? static_cast<void
> (0) : __assert_fail ("(IdentityOp == 0 || IdentityOp.countPopulation() == 1) && \"Multiple identity shuffles detected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 34995, __PRETTY_FUNCTION__))
;
34996
34997 if (IdentityOp != 0)
34998 return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
34999 }
35000 }
35001
35002 return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
35003 Op, DemandedBits, DemandedElts, DAG, Depth);
35004}
35005
35006/// Check if a vector extract from a target-specific shuffle of a load can be
35007/// folded into a single element load.
35008/// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
35009/// shuffles have been custom lowered so we need to handle those here.
35010static SDValue
35011XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
35012 TargetLowering::DAGCombinerInfo &DCI) {
35013 if (DCI.isBeforeLegalizeOps())
35014 return SDValue();
35015
35016 SDValue InVec = N->getOperand(0);
35017 SDValue EltNo = N->getOperand(1);
35018 EVT EltVT = N->getValueType(0);
35019
35020 if (!isa<ConstantSDNode>(EltNo))
35021 return SDValue();
35022
35023 EVT OriginalVT = InVec.getValueType();
35024 unsigned NumOriginalElts = OriginalVT.getVectorNumElements();
35025
35026 // Peek through bitcasts, don't duplicate a load with other uses.
35027 InVec = peekThroughOneUseBitcasts(InVec);
35028
35029 EVT CurrentVT = InVec.getValueType();
35030 if (!CurrentVT.isVector())
35031 return SDValue();
35032
35033 unsigned NumCurrentElts = CurrentVT.getVectorNumElements();
35034 if ((NumOriginalElts % NumCurrentElts) != 0)
35035 return SDValue();
35036
35037 if (!isTargetShuffle(InVec.getOpcode()))
35038 return SDValue();
35039
35040 // Don't duplicate a load with other uses.
35041 if (!InVec.hasOneUse())
35042 return SDValue();
35043
35044 SmallVector<int, 16> ShuffleMask;
35045 SmallVector<SDValue, 2> ShuffleOps;
35046 bool UnaryShuffle;
35047 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true,
35048 ShuffleOps, ShuffleMask, UnaryShuffle))
35049 return SDValue();
35050
35051 unsigned Scale = NumOriginalElts / NumCurrentElts;
35052 if (Scale > 1) {
35053 SmallVector<int, 16> ScaledMask;
35054 scaleShuffleMask<int>(Scale, ShuffleMask, ScaledMask);
35055 ShuffleMask = std::move(ScaledMask);
35056 }
35057 assert(ShuffleMask.size() == NumOriginalElts && "Shuffle mask size mismatch")((ShuffleMask.size() == NumOriginalElts && "Shuffle mask size mismatch"
) ? static_cast<void> (0) : __assert_fail ("ShuffleMask.size() == NumOriginalElts && \"Shuffle mask size mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35057, __PRETTY_FUNCTION__))
;
35058
35059 // Select the input vector, guarding against out of range extract vector.
35060 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
35061 int Idx = (Elt > (int)NumOriginalElts) ? SM_SentinelUndef : ShuffleMask[Elt];
35062
35063 if (Idx == SM_SentinelZero)
35064 return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT)
35065 : DAG.getConstantFP(+0.0, SDLoc(N), EltVT);
35066 if (Idx == SM_SentinelUndef)
35067 return DAG.getUNDEF(EltVT);
35068
35069 // Bail if any mask element is SM_SentinelZero - getVectorShuffle below
35070 // won't handle it.
35071 if (llvm::any_of(ShuffleMask, [](int M) { return M == SM_SentinelZero; }))
35072 return SDValue();
35073
35074 assert(0 <= Idx && Idx < (int)(2 * NumOriginalElts) &&((0 <= Idx && Idx < (int)(2 * NumOriginalElts) &&
"Shuffle index out of range") ? static_cast<void> (0) :
__assert_fail ("0 <= Idx && Idx < (int)(2 * NumOriginalElts) && \"Shuffle index out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35075, __PRETTY_FUNCTION__))
35075 "Shuffle index out of range")((0 <= Idx && Idx < (int)(2 * NumOriginalElts) &&
"Shuffle index out of range") ? static_cast<void> (0) :
__assert_fail ("0 <= Idx && Idx < (int)(2 * NumOriginalElts) && \"Shuffle index out of range\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35075, __PRETTY_FUNCTION__))
;
35076 SDValue LdNode = (Idx < (int)NumOriginalElts) ? ShuffleOps[0] : ShuffleOps[1];
35077
35078 // If inputs to shuffle are the same for both ops, then allow 2 uses
35079 unsigned AllowedUses =
35080 (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1;
35081
35082 if (LdNode.getOpcode() == ISD::BITCAST) {
35083 // Don't duplicate a load with other uses.
35084 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
35085 return SDValue();
35086
35087 AllowedUses = 1; // only allow 1 load use if we have a bitcast
35088 LdNode = LdNode.getOperand(0);
35089 }
35090
35091 if (!ISD::isNormalLoad(LdNode.getNode()))
35092 return SDValue();
35093
35094 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
35095
35096 if (!LN0 || !LN0->hasNUsesOfValue(AllowedUses, 0) || !LN0->isSimple())
35097 return SDValue();
35098
35099 // If there's a bitcast before the shuffle, check if the load type and
35100 // alignment is valid.
35101 unsigned Align = LN0->getAlignment();
35102 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35103 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
35104 EltVT.getTypeForEVT(*DAG.getContext()));
35105
35106 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
35107 return SDValue();
35108
35109 // All checks match so transform back to vector_shuffle so that DAG combiner
35110 // can finish the job
35111 SDLoc dl(N);
35112
35113 // Create shuffle node taking into account the case that its a unary shuffle
35114 SDValue Shuffle = UnaryShuffle ? DAG.getUNDEF(OriginalVT)
35115 : DAG.getBitcast(OriginalVT, ShuffleOps[1]);
35116 Shuffle = DAG.getVectorShuffle(OriginalVT, dl,
35117 DAG.getBitcast(OriginalVT, ShuffleOps[0]),
35118 Shuffle, ShuffleMask);
35119 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
35120 EltNo);
35121}
35122
35123// Helper to peek through bitops/setcc to determine size of source vector.
35124// Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
35125static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
35126 switch (Src.getOpcode()) {
35127 case ISD::SETCC:
35128 return Src.getOperand(0).getValueSizeInBits() == Size;
35129 case ISD::AND:
35130 case ISD::XOR:
35131 case ISD::OR:
35132 return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
35133 checkBitcastSrcVectorSize(Src.getOperand(1), Size);
35134 }
35135 return false;
35136}
35137
35138// Helper to push sign extension of vXi1 SETCC result through bitops.
35139static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
35140 SDValue Src, const SDLoc &DL) {
35141 switch (Src.getOpcode()) {
35142 case ISD::SETCC:
35143 return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
35144 case ISD::AND:
35145 case ISD::XOR:
35146 case ISD::OR:
35147 return DAG.getNode(
35148 Src.getOpcode(), DL, SExtVT,
35149 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
35150 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
35151 }
35152 llvm_unreachable("Unexpected node type for vXi1 sign extension")::llvm::llvm_unreachable_internal("Unexpected node type for vXi1 sign extension"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35152)
;
35153}
35154
35155// Try to match patterns such as
35156// (i16 bitcast (v16i1 x))
35157// ->
35158// (i16 movmsk (16i8 sext (v16i1 x)))
35159// before the illegal vector is scalarized on subtargets that don't have legal
35160// vxi1 types.
35161static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
35162 const SDLoc &DL,
35163 const X86Subtarget &Subtarget) {
35164 EVT SrcVT = Src.getValueType();
35165 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
35166 return SDValue();
35167
35168 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
35169 // movmskb even with avx512. This will be better than truncating to vXi1 and
35170 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
35171 // vpcmpeqb/vpcmpgtb.
35172 bool IsTruncated = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
35173 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
35174 Src.getOperand(0).getValueType() == MVT::v32i8 ||
35175 Src.getOperand(0).getValueType() == MVT::v64i8);
35176
35177 // With AVX512 vxi1 types are legal and we prefer using k-regs.
35178 // MOVMSK is supported in SSE2 or later.
35179 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !IsTruncated))
35180 return SDValue();
35181
35182 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
35183 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
35184 // v8i16 and v16i16.
35185 // For these two cases, we can shuffle the upper element bytes to a
35186 // consecutive sequence at the start of the vector and treat the results as
35187 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
35188 // for v16i16 this is not the case, because the shuffle is expensive, so we
35189 // avoid sign-extending to this type entirely.
35190 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
35191 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
35192 MVT SExtVT;
35193 bool PropagateSExt = false;
35194 switch (SrcVT.getSimpleVT().SimpleTy) {
35195 default:
35196 return SDValue();
35197 case MVT::v2i1:
35198 SExtVT = MVT::v2i64;
35199 break;
35200 case MVT::v4i1:
35201 SExtVT = MVT::v4i32;
35202 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
35203 // sign-extend to a 256-bit operation to avoid truncation.
35204 if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256)) {
35205 SExtVT = MVT::v4i64;
35206 PropagateSExt = true;
35207 }
35208 break;
35209 case MVT::v8i1:
35210 SExtVT = MVT::v8i16;
35211 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
35212 // sign-extend to a 256-bit operation to match the compare.
35213 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
35214 // 256-bit because the shuffle is cheaper than sign extending the result of
35215 // the compare.
35216 if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256) ||
35217 checkBitcastSrcVectorSize(Src, 512))) {
35218 SExtVT = MVT::v8i32;
35219 PropagateSExt = true;
35220 }
35221 break;
35222 case MVT::v16i1:
35223 SExtVT = MVT::v16i8;
35224 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
35225 // it is not profitable to sign-extend to 256-bit because this will
35226 // require an extra cross-lane shuffle which is more expensive than
35227 // truncating the result of the compare to 128-bits.
35228 break;
35229 case MVT::v32i1:
35230 SExtVT = MVT::v32i8;
35231 break;
35232 case MVT::v64i1:
35233 // If we have AVX512F, but not AVX512BW and the input is truncated from
35234 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
35235 if (Subtarget.hasAVX512() && !Subtarget.hasBWI()) {
35236 SExtVT = MVT::v64i8;
35237 break;
35238 }
35239 return SDValue();
35240 };
35241
35242 SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
35243 : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
35244
35245 if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
35246 V = getPMOVMSKB(DL, V, DAG, Subtarget);
35247 } else {
35248 if (SExtVT == MVT::v8i16)
35249 V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
35250 DAG.getUNDEF(MVT::v8i16));
35251 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
35252 }
35253
35254 EVT IntVT =
35255 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
35256 V = DAG.getZExtOrTrunc(V, DL, IntVT);
35257 return DAG.getBitcast(VT, V);
35258}
35259
35260// Convert a vXi1 constant build vector to the same width scalar integer.
35261static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
35262 EVT SrcVT = Op.getValueType();
35263 assert(SrcVT.getVectorElementType() == MVT::i1 &&((SrcVT.getVectorElementType() == MVT::i1 && "Expected a vXi1 vector"
) ? static_cast<void> (0) : __assert_fail ("SrcVT.getVectorElementType() == MVT::i1 && \"Expected a vXi1 vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35264, __PRETTY_FUNCTION__))
35264 "Expected a vXi1 vector")((SrcVT.getVectorElementType() == MVT::i1 && "Expected a vXi1 vector"
) ? static_cast<void> (0) : __assert_fail ("SrcVT.getVectorElementType() == MVT::i1 && \"Expected a vXi1 vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35264, __PRETTY_FUNCTION__))
;
35265 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
"Expected a constant build vector") ? static_cast<void>
(0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && \"Expected a constant build vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35266, __PRETTY_FUNCTION__))
35266 "Expected a constant build vector")((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
"Expected a constant build vector") ? static_cast<void>
(0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && \"Expected a constant build vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35266, __PRETTY_FUNCTION__))
;
35267
35268 APInt Imm(SrcVT.getVectorNumElements(), 0);
35269 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
35270 SDValue In = Op.getOperand(Idx);
35271 if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
35272 Imm.setBit(Idx);
35273 }
35274 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
35275 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
35276}
35277
35278static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
35279 TargetLowering::DAGCombinerInfo &DCI,
35280 const X86Subtarget &Subtarget) {
35281 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast")((N->getOpcode() == ISD::BITCAST && "Expected a bitcast"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::BITCAST && \"Expected a bitcast\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35281, __PRETTY_FUNCTION__))
;
35282
35283 if (!DCI.isBeforeLegalizeOps())
35284 return SDValue();
35285
35286 // Only do this if we have k-registers.
35287 if (!Subtarget.hasAVX512())
35288 return SDValue();
35289
35290 EVT DstVT = N->getValueType(0);
35291 SDValue Op = N->getOperand(0);
35292 EVT SrcVT = Op.getValueType();
35293
35294 if (!Op.hasOneUse())
35295 return SDValue();
35296
35297 // Look for logic ops.
35298 if (Op.getOpcode() != ISD::AND &&
35299 Op.getOpcode() != ISD::OR &&
35300 Op.getOpcode() != ISD::XOR)
35301 return SDValue();
35302
35303 // Make sure we have a bitcast between mask registers and a scalar type.
35304 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
35305 DstVT.isScalarInteger()) &&
35306 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
35307 SrcVT.isScalarInteger()))
35308 return SDValue();
35309
35310 SDValue LHS = Op.getOperand(0);
35311 SDValue RHS = Op.getOperand(1);
35312
35313 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
35314 LHS.getOperand(0).getValueType() == DstVT)
35315 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
35316 DAG.getBitcast(DstVT, RHS));
35317
35318 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
35319 RHS.getOperand(0).getValueType() == DstVT)
35320 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
35321 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
35322
35323 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
35324 // Most of these have to move a constant from the scalar domain anyway.
35325 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
35326 RHS = combinevXi1ConstantToInteger(RHS, DAG);
35327 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
35328 DAG.getBitcast(DstVT, LHS), RHS);
35329 }
35330
35331 return SDValue();
35332}
35333
35334static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
35335 const X86Subtarget &Subtarget) {
35336 SDLoc DL(BV);
35337 unsigned NumElts = BV->getNumOperands();
35338 SDValue Splat = BV->getSplatValue();
35339
35340 // Build MMX element from integer GPR or SSE float values.
35341 auto CreateMMXElement = [&](SDValue V) {
35342 if (V.isUndef())
35343 return DAG.getUNDEF(MVT::x86mmx);
35344 if (V.getValueType().isFloatingPoint()) {
35345 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
35346 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
35347 V = DAG.getBitcast(MVT::v2i64, V);
35348 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
35349 }
35350 V = DAG.getBitcast(MVT::i32, V);
35351 } else {
35352 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
35353 }
35354 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
35355 };
35356
35357 // Convert build vector ops to MMX data in the bottom elements.
35358 SmallVector<SDValue, 8> Ops;
35359
35360 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
35361 if (Splat) {
35362 if (Splat.isUndef())
35363 return DAG.getUNDEF(MVT::x86mmx);
35364
35365 Splat = CreateMMXElement(Splat);
35366
35367 if (Subtarget.hasSSE1()) {
35368 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
35369 if (NumElts == 8)
35370 Splat = DAG.getNode(
35371 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
35372 DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
35373 Splat);
35374
35375 // Use PSHUFW to repeat 16-bit elements.
35376 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
35377 return DAG.getNode(
35378 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
35379 DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32),
35380 Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
35381 }
35382 Ops.append(NumElts, Splat);
35383 } else {
35384 for (unsigned i = 0; i != NumElts; ++i)
35385 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
35386 }
35387
35388 // Use tree of PUNPCKLs to build up general MMX vector.
35389 while (Ops.size() > 1) {
35390 unsigned NumOps = Ops.size();
35391 unsigned IntrinOp =
35392 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
35393 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
35394 : Intrinsic::x86_mmx_punpcklbw));
35395 SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
35396 for (unsigned i = 0; i != NumOps; i += 2)
35397 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
35398 Ops[i], Ops[i + 1]);
35399 Ops.resize(NumOps / 2);
35400 }
35401
35402 return Ops[0];
35403}
35404
35405static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
35406 TargetLowering::DAGCombinerInfo &DCI,
35407 const X86Subtarget &Subtarget) {
35408 SDValue N0 = N->getOperand(0);
35409 EVT VT = N->getValueType(0);
35410 EVT SrcVT = N0.getValueType();
35411
35412 // Try to match patterns such as
35413 // (i16 bitcast (v16i1 x))
35414 // ->
35415 // (i16 movmsk (16i8 sext (v16i1 x)))
35416 // before the setcc result is scalarized on subtargets that don't have legal
35417 // vxi1 types.
35418 if (DCI.isBeforeLegalize()) {
35419 SDLoc dl(N);
35420 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
35421 return V;
35422
35423 // Recognize the IR pattern for the movmsk intrinsic under SSE1 befoer type
35424 // legalization destroys the v4i32 type.
35425 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && SrcVT == MVT::v4i1 &&
35426 VT.isScalarInteger() && N0.getOpcode() == ISD::SETCC &&
35427 N0.getOperand(0).getValueType() == MVT::v4i32 &&
35428 ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()) &&
35429 cast<CondCodeSDNode>(N0.getOperand(2))->get() == ISD::SETLT) {
35430 SDValue N00 = N0.getOperand(0);
35431 // Only do this if we can avoid scalarizing the input.
35432 if (ISD::isNormalLoad(N00.getNode()) ||
35433 (N00.getOpcode() == ISD::BITCAST &&
35434 N00.getOperand(0).getValueType() == MVT::v4f32)) {
35435 SDValue V = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32,
35436 DAG.getBitcast(MVT::v4f32, N00));
35437 return DAG.getZExtOrTrunc(V, dl, VT);
35438 }
35439 }
35440
35441 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
35442 // type, widen both sides to avoid a trip through memory.
35443 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
35444 Subtarget.hasAVX512()) {
35445 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
35446 N0 = DAG.getBitcast(MVT::v8i1, N0);
35447 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
35448 DAG.getIntPtrConstant(0, dl));
35449 }
35450
35451 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
35452 // type, widen both sides to avoid a trip through memory.
35453 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
35454 Subtarget.hasAVX512()) {
35455 // Use zeros for the widening if we already have some zeroes. This can
35456 // allow SimplifyDemandedBits to remove scalar ANDs that may be down
35457 // stream of this.
35458 // FIXME: It might make sense to detect a concat_vectors with a mix of
35459 // zeroes and undef and turn it into insert_subvector for i1 vectors as
35460 // a separate combine. What we can't do is canonicalize the operands of
35461 // such a concat or we'll get into a loop with SimplifyDemandedBits.
35462 if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
35463 SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
35464 if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
35465 SrcVT = LastOp.getValueType();
35466 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
35467 SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
35468 Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
35469 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
35470 N0 = DAG.getBitcast(MVT::i8, N0);
35471 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
35472 }
35473 }
35474
35475 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
35476 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
35477 Ops[0] = N0;
35478 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
35479 N0 = DAG.getBitcast(MVT::i8, N0);
35480 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
35481 }
35482 }
35483
35484 // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
35485 // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
35486 // due to insert_subvector legalization on KNL. By promoting the copy to i16
35487 // we can help with known bits propagation from the vXi1 domain to the
35488 // scalar domain.
35489 if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
35490 !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
35491 N0.getOperand(0).getValueType() == MVT::v16i1 &&
35492 isNullConstant(N0.getOperand(1)))
35493 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
35494 DAG.getBitcast(MVT::i16, N0.getOperand(0)));
35495
35496 // Combine (bitcast (vbroadcast_load)) -> (vbroadcast_load). The memory VT
35497 // determines // the number of bits loaded. Remaining bits are zero.
35498 if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
35499 VT.getScalarSizeInBits() == SrcVT.getScalarSizeInBits()) {
35500 auto *BCast = cast<MemIntrinsicSDNode>(N0);
35501 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
35502 SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
35503 SDValue ResNode =
35504 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
35505 VT.getVectorElementType(),
35506 BCast->getMemOperand());
35507 DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
35508 return ResNode;
35509 }
35510
35511 // Since MMX types are special and don't usually play with other vector types,
35512 // it's better to handle them early to be sure we emit efficient code by
35513 // avoiding store-load conversions.
35514 if (VT == MVT::x86mmx) {
35515 // Detect MMX constant vectors.
35516 APInt UndefElts;
35517 SmallVector<APInt, 1> EltBits;
35518 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
35519 SDLoc DL(N0);
35520 // Handle zero-extension of i32 with MOVD.
35521 if (EltBits[0].countLeadingZeros() >= 32)
35522 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
35523 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
35524 // Else, bitcast to a double.
35525 // TODO - investigate supporting sext 32-bit immediates on x86_64.
35526 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
35527 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
35528 }
35529
35530 // Detect bitcasts to x86mmx low word.
35531 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
35532 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
35533 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
35534 bool LowUndef = true, AllUndefOrZero = true;
35535 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
35536 SDValue Op = N0.getOperand(i);
35537 LowUndef &= Op.isUndef() || (i >= e/2);
35538 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
35539 }
35540 if (AllUndefOrZero) {
35541 SDValue N00 = N0.getOperand(0);
35542 SDLoc dl(N00);
35543 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
35544 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
35545 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
35546 }
35547 }
35548
35549 // Detect bitcasts of 64-bit build vectors and convert to a
35550 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
35551 // lowest element.
35552 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
35553 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
35554 SrcVT == MVT::v8i8))
35555 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
35556
35557 // Detect bitcasts between element or subvector extraction to x86mmx.
35558 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
35559 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
35560 isNullConstant(N0.getOperand(1))) {
35561 SDValue N00 = N0.getOperand(0);
35562 if (N00.getValueType().is128BitVector())
35563 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
35564 DAG.getBitcast(MVT::v2i64, N00));
35565 }
35566
35567 // Detect bitcasts from FP_TO_SINT to x86mmx.
35568 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
35569 SDLoc DL(N0);
35570 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
35571 DAG.getUNDEF(MVT::v2i32));
35572 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
35573 DAG.getBitcast(MVT::v2i64, Res));
35574 }
35575 }
35576
35577 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
35578 // most of these to scalar anyway.
35579 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
35580 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
35581 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
35582 return combinevXi1ConstantToInteger(N0, DAG);
35583 }
35584
35585 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
35586 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
35587 isa<ConstantSDNode>(N0)) {
35588 auto *C = cast<ConstantSDNode>(N0);
35589 if (C->isAllOnesValue())
35590 return DAG.getConstant(1, SDLoc(N0), VT);
35591 if (C->isNullValue())
35592 return DAG.getConstant(0, SDLoc(N0), VT);
35593 }
35594
35595 // Try to remove bitcasts from input and output of mask arithmetic to
35596 // remove GPR<->K-register crossings.
35597 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
35598 return V;
35599
35600 // Convert a bitcasted integer logic operation that has one bitcasted
35601 // floating-point operand into a floating-point logic operation. This may
35602 // create a load of a constant, but that is cheaper than materializing the
35603 // constant in an integer register and transferring it to an SSE register or
35604 // transferring the SSE operand to integer register and back.
35605 unsigned FPOpcode;
35606 switch (N0.getOpcode()) {
35607 case ISD::AND: FPOpcode = X86ISD::FAND; break;
35608 case ISD::OR: FPOpcode = X86ISD::FOR; break;
35609 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
35610 default: return SDValue();
35611 }
35612
35613 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
35614 (Subtarget.hasSSE2() && VT == MVT::f64)))
35615 return SDValue();
35616
35617 SDValue LogicOp0 = N0.getOperand(0);
35618 SDValue LogicOp1 = N0.getOperand(1);
35619 SDLoc DL0(N0);
35620
35621 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
35622 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
35623 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
35624 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
35625 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
35626 return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
35627 }
35628 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
35629 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
35630 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
35631 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
35632 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
35633 return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
35634 }
35635
35636 return SDValue();
35637}
35638
35639// Given a ABS node, detect the following pattern:
35640// (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
35641// This is useful as it is the input into a SAD pattern.
35642static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
35643 SDValue AbsOp1 = Abs->getOperand(0);
35644 if (AbsOp1.getOpcode() != ISD::SUB)
35645 return false;
35646
35647 Op0 = AbsOp1.getOperand(0);
35648 Op1 = AbsOp1.getOperand(1);
35649
35650 // Check if the operands of the sub are zero-extended from vectors of i8.
35651 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
35652 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
35653 Op1.getOpcode() != ISD::ZERO_EXTEND ||
35654 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
35655 return false;
35656
35657 return true;
35658}
35659
35660// Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
35661// to these zexts.
35662static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
35663 const SDValue &Zext1, const SDLoc &DL,
35664 const X86Subtarget &Subtarget) {
35665 // Find the appropriate width for the PSADBW.
35666 EVT InVT = Zext0.getOperand(0).getValueType();
35667 unsigned RegSize = std::max(128u, InVT.getSizeInBits());
35668
35669 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
35670 // fill in the missing vector elements with 0.
35671 unsigned NumConcat = RegSize / InVT.getSizeInBits();
35672 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
35673 Ops[0] = Zext0.getOperand(0);
35674 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
35675 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
35676 Ops[0] = Zext1.getOperand(0);
35677 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
35678
35679 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
35680 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
35681 ArrayRef<SDValue> Ops) {
35682 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
35683 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
35684 };
35685 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
35686 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
35687 PSADBWBuilder);
35688}
35689
35690// Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
35691// PHMINPOSUW.
35692static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
35693 const X86Subtarget &Subtarget) {
35694 // Bail without SSE41.
35695 if (!Subtarget.hasSSE41())
35696 return SDValue();
35697
35698 EVT ExtractVT = Extract->getValueType(0);
35699 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
35700 return SDValue();
35701
35702 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
35703 ISD::NodeType BinOp;
35704 SDValue Src = DAG.matchBinOpReduction(
35705 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
35706 if (!Src)
35707 return SDValue();
35708
35709 EVT SrcVT = Src.getValueType();
35710 EVT SrcSVT = SrcVT.getScalarType();
35711 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
35712 return SDValue();
35713
35714 SDLoc DL(Extract);
35715 SDValue MinPos = Src;
35716
35717 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
35718 while (SrcVT.getSizeInBits() > 128) {
35719 unsigned NumElts = SrcVT.getVectorNumElements();
35720 unsigned NumSubElts = NumElts / 2;
35721 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts);
35722 unsigned SubSizeInBits = SrcVT.getSizeInBits();
35723 SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits);
35724 SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits);
35725 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
35726 }
35727 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||((((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || (
SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
"Unexpected value type") ? static_cast<void> (0) : __assert_fail
("((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35729, __PRETTY_FUNCTION__))
35728 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&((((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || (
SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
"Unexpected value type") ? static_cast<void> (0) : __assert_fail
("((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35729, __PRETTY_FUNCTION__))
35729 "Unexpected value type")((((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || (
SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
"Unexpected value type") ? static_cast<void> (0) : __assert_fail
("((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35729, __PRETTY_FUNCTION__))
;
35730
35731 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
35732 // to flip the value accordingly.
35733 SDValue Mask;
35734 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
35735 if (BinOp == ISD::SMAX)
35736 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
35737 else if (BinOp == ISD::SMIN)
35738 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
35739 else if (BinOp == ISD::UMAX)
35740 Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
35741
35742 if (Mask)
35743 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
35744
35745 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
35746 // shuffling each upper element down and insert zeros. This means that the
35747 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
35748 // ready for the PHMINPOS.
35749 if (ExtractVT == MVT::i8) {
35750 SDValue Upper = DAG.getVectorShuffle(
35751 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
35752 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
35753 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
35754 }
35755
35756 // Perform the PHMINPOS on a v8i16 vector,
35757 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
35758 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
35759 MinPos = DAG.getBitcast(SrcVT, MinPos);
35760
35761 if (Mask)
35762 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
35763
35764 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
35765 DAG.getIntPtrConstant(0, DL));
35766}
35767
35768// Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
35769static SDValue combineHorizontalPredicateResult(SDNode *Extract,
35770 SelectionDAG &DAG,
35771 const X86Subtarget &Subtarget) {
35772 // Bail without SSE2.
35773 if (!Subtarget.hasSSE2())
35774 return SDValue();
35775
35776 EVT ExtractVT = Extract->getValueType(0);
35777 unsigned BitWidth = ExtractVT.getSizeInBits();
35778 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
35779 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
35780 return SDValue();
35781
35782 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
35783 ISD::NodeType BinOp;
35784 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
35785 if (!Match && ExtractVT == MVT::i1)
35786 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
35787 if (!Match)
35788 return SDValue();
35789
35790 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
35791 // which we can't support here for now.
35792 if (Match.getScalarValueSizeInBits() != BitWidth)
35793 return SDValue();
35794
35795 SDValue Movmsk;
35796 SDLoc DL(Extract);
35797 EVT MatchVT = Match.getValueType();
35798 unsigned NumElts = MatchVT.getVectorNumElements();
35799 unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
35800 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
35801
35802 if (ExtractVT == MVT::i1) {
35803 // Special case for (pre-legalization) vXi1 reductions.
35804 if (NumElts > 64 || !isPowerOf2_32(NumElts))
35805 return SDValue();
35806 if (TLI.isTypeLegal(MatchVT)) {
35807 // If this is a legal AVX512 predicate type then we can just bitcast.
35808 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
35809 Movmsk = DAG.getBitcast(MovmskVT, Match);
35810 } else {
35811 // Use combineBitcastvxi1 to create the MOVMSK.
35812 while (NumElts > MaxElts) {
35813 SDValue Lo, Hi;
35814 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
35815 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
35816 NumElts /= 2;
35817 }
35818 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
35819 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
35820 }
35821 if (!Movmsk)
35822 return SDValue();
35823 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
35824 } else {
35825 // Bail with AVX512VL (which uses predicate registers).
35826 if (Subtarget.hasVLX())
35827 return SDValue();
35828
35829 unsigned MatchSizeInBits = Match.getValueSizeInBits();
35830 if (!(MatchSizeInBits == 128 ||
35831 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
35832 return SDValue();
35833
35834 // Make sure this isn't a vector of 1 element. The perf win from using
35835 // MOVMSK diminishes with less elements in the reduction, but it is
35836 // generally better to get the comparison over to the GPRs as soon as
35837 // possible to reduce the number of vector ops.
35838 if (Match.getValueType().getVectorNumElements() < 2)
35839 return SDValue();
35840
35841 // Check that we are extracting a reduction of all sign bits.
35842 if (DAG.ComputeNumSignBits(Match) != BitWidth)
35843 return SDValue();
35844
35845 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
35846 SDValue Lo, Hi;
35847 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
35848 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
35849 MatchSizeInBits = Match.getValueSizeInBits();
35850 }
35851
35852 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
35853 MVT MaskSrcVT;
35854 if (64 == BitWidth || 32 == BitWidth)
35855 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
35856 MatchSizeInBits / BitWidth);
35857 else
35858 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
35859
35860 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
35861 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
35862 NumElts = MaskSrcVT.getVectorNumElements();
35863 }
35864 assert((NumElts <= 32 || NumElts == 64) &&(((NumElts <= 32 || NumElts == 64) && "Not expecting more than 64 elements"
) ? static_cast<void> (0) : __assert_fail ("(NumElts <= 32 || NumElts == 64) && \"Not expecting more than 64 elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35865, __PRETTY_FUNCTION__))
35865 "Not expecting more than 64 elements")(((NumElts <= 32 || NumElts == 64) && "Not expecting more than 64 elements"
) ? static_cast<void> (0) : __assert_fail ("(NumElts <= 32 || NumElts == 64) && \"Not expecting more than 64 elements\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 35865, __PRETTY_FUNCTION__))
;
35866
35867 MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
35868 if (BinOp == ISD::XOR) {
35869 // parity -> (AND (CTPOP(MOVMSK X)), 1)
35870 SDValue Mask = DAG.getConstant(1, DL, CmpVT);
35871 SDValue Result = DAG.getNode(ISD::CTPOP, DL, CmpVT, Movmsk);
35872 Result = DAG.getNode(ISD::AND, DL, CmpVT, Result, Mask);
35873 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
35874 }
35875
35876 SDValue CmpC;
35877 ISD::CondCode CondCode;
35878 if (BinOp == ISD::OR) {
35879 // any_of -> MOVMSK != 0
35880 CmpC = DAG.getConstant(0, DL, CmpVT);
35881 CondCode = ISD::CondCode::SETNE;
35882 } else {
35883 // all_of -> MOVMSK == ((1 << NumElts) - 1)
35884 CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
35885 DL, CmpVT);
35886 CondCode = ISD::CondCode::SETEQ;
35887 }
35888
35889 // The setcc produces an i8 of 0/1, so extend that to the result width and
35890 // negate to get the final 0/-1 mask value.
35891 EVT SetccVT =
35892 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
35893 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
35894 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
35895 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
35896 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
35897}
35898
35899static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
35900 const X86Subtarget &Subtarget) {
35901 // PSADBW is only supported on SSE2 and up.
35902 if (!Subtarget.hasSSE2())
35903 return SDValue();
35904
35905 // Verify the type we're extracting from is any integer type above i16.
35906 EVT VT = Extract->getOperand(0).getValueType();
35907 if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16))
35908 return SDValue();
35909
35910 unsigned RegSize = 128;
35911 if (Subtarget.useBWIRegs())
35912 RegSize = 512;
35913 else if (Subtarget.hasAVX())
35914 RegSize = 256;
35915
35916 // We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512.
35917 // TODO: We should be able to handle larger vectors by splitting them before
35918 // feeding them into several SADs, and then reducing over those.
35919 if (RegSize / VT.getVectorNumElements() < 8)
35920 return SDValue();
35921
35922 // Match shuffle + add pyramid.
35923 ISD::NodeType BinOp;
35924 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
35925
35926 // The operand is expected to be zero extended from i8
35927 // (verified in detectZextAbsDiff).
35928 // In order to convert to i64 and above, additional any/zero/sign
35929 // extend is expected.
35930 // The zero extend from 32 bit has no mathematical effect on the result.
35931 // Also the sign extend is basically zero extend
35932 // (extends the sign bit which is zero).
35933 // So it is correct to skip the sign/zero extend instruction.
35934 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
35935 Root.getOpcode() == ISD::ZERO_EXTEND ||
35936 Root.getOpcode() == ISD::ANY_EXTEND))
35937 Root = Root.getOperand(0);
35938
35939 // If there was a match, we want Root to be a select that is the root of an
35940 // abs-diff pattern.
35941 if (!Root || Root.getOpcode() != ISD::ABS)
35942 return SDValue();
35943
35944 // Check whether we have an abs-diff pattern feeding into the select.
35945 SDValue Zext0, Zext1;
35946 if (!detectZextAbsDiff(Root, Zext0, Zext1))
35947 return SDValue();
35948
35949 // Create the SAD instruction.
35950 SDLoc DL(Extract);
35951 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
35952
35953 // If the original vector was wider than 8 elements, sum over the results
35954 // in the SAD vector.
35955 unsigned Stages = Log2_32(VT.getVectorNumElements());
35956 MVT SadVT = SAD.getSimpleValueType();
35957 if (Stages > 3) {
35958 unsigned SadElems = SadVT.getVectorNumElements();
35959
35960 for(unsigned i = Stages - 3; i > 0; --i) {
35961 SmallVector<int, 16> Mask(SadElems, -1);
35962 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
35963 Mask[j] = MaskEnd + j;
35964
35965 SDValue Shuffle =
35966 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
35967 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
35968 }
35969 }
35970
35971 MVT Type = Extract->getSimpleValueType(0);
35972 unsigned TypeSizeInBits = Type.getSizeInBits();
35973 // Return the lowest TypeSizeInBits bits.
35974 MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits);
35975 SAD = DAG.getBitcast(ResVT, SAD);
35976 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD,
35977 Extract->getOperand(1));
35978}
35979
35980// Attempt to peek through a target shuffle and extract the scalar from the
35981// source.
35982static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
35983 TargetLowering::DAGCombinerInfo &DCI,
35984 const X86Subtarget &Subtarget) {
35985 if (DCI.isBeforeLegalizeOps())
35986 return SDValue();
35987
35988 SDLoc dl(N);
35989 SDValue Src = N->getOperand(0);
35990 SDValue Idx = N->getOperand(1);
35991
35992 EVT VT = N->getValueType(0);
35993 EVT SrcVT = Src.getValueType();
35994 EVT SrcSVT = SrcVT.getVectorElementType();
35995 unsigned NumSrcElts = SrcVT.getVectorNumElements();
35996
35997 // Don't attempt this for boolean mask vectors or unknown extraction indices.
35998 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
35999 return SDValue();
36000
36001 SDValue SrcBC = peekThroughBitcasts(Src);
36002
36003 // Handle extract(broadcast(scalar_value)), it doesn't matter what index is.
36004 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
36005 SDValue SrcOp = SrcBC.getOperand(0);
36006 if (SrcOp.getValueSizeInBits() == VT.getSizeInBits())
36007 return DAG.getBitcast(VT, SrcOp);
36008 }
36009
36010 // If we're extracting a single element from a broadcast load and there are
36011 // no other users, just create a single load.
36012 if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
36013 auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
36014 unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
36015 if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
36016 VT.getSizeInBits() == SrcBCWidth) {
36017 SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
36018 MemIntr->getBasePtr(),
36019 MemIntr->getPointerInfo(),
36020 MemIntr->getAlignment(),
36021 MemIntr->getMemOperand()->getFlags());
36022 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
36023 return Load;
36024 }
36025 }
36026
36027 // Handle extract(truncate(x)) for 0'th index.
36028 // TODO: Treat this as a faux shuffle?
36029 // TODO: When can we use this for general indices?
36030 if (ISD::TRUNCATE == Src.getOpcode() && SrcVT.is128BitVector() &&
36031 isNullConstant(Idx)) {
36032 Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
36033 Src = DAG.getBitcast(SrcVT, Src);
36034 return DAG.getNode(N->getOpcode(), dl, VT, Src, Idx);
36035 }
36036
36037 // Resolve the target shuffle inputs and mask.
36038 SmallVector<int, 16> Mask;
36039 SmallVector<SDValue, 2> Ops;
36040 if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
36041 return SDValue();
36042
36043 // Attempt to narrow/widen the shuffle mask to the correct size.
36044 if (Mask.size() != NumSrcElts) {
36045 if ((NumSrcElts % Mask.size()) == 0) {
36046 SmallVector<int, 16> ScaledMask;
36047 int Scale = NumSrcElts / Mask.size();
36048 scaleShuffleMask<int>(Scale, Mask, ScaledMask);
36049 Mask = std::move(ScaledMask);
36050 } else if ((Mask.size() % NumSrcElts) == 0) {
36051 // Simplify Mask based on demanded element.
36052 int ExtractIdx = (int)N->getConstantOperandVal(1);
36053 int Scale = Mask.size() / NumSrcElts;
36054 int Lo = Scale * ExtractIdx;
36055 int Hi = Scale * (ExtractIdx + 1);
36056 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
36057 if (i < Lo || Hi <= i)
36058 Mask[i] = SM_SentinelUndef;
36059
36060 SmallVector<int, 16> WidenedMask;
36061 while (Mask.size() > NumSrcElts &&
36062 canWidenShuffleElements(Mask, WidenedMask))
36063 Mask = std::move(WidenedMask);
36064 // TODO - investigate support for wider shuffle masks with known upper
36065 // undef/zero elements for implicit zero-extension.
36066 }
36067 }
36068
36069 // Check if narrowing/widening failed.
36070 if (Mask.size() != NumSrcElts)
36071 return SDValue();
36072
36073 int SrcIdx = Mask[N->getConstantOperandVal(1)];
36074
36075 // If the shuffle source element is undef/zero then we can just accept it.
36076 if (SrcIdx == SM_SentinelUndef)
36077 return DAG.getUNDEF(VT);
36078
36079 if (SrcIdx == SM_SentinelZero)
36080 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
36081 : DAG.getConstant(0, dl, VT);
36082
36083 SDValue SrcOp = Ops[SrcIdx / Mask.size()];
36084 SrcIdx = SrcIdx % Mask.size();
36085
36086 // We can only extract other elements from 128-bit vectors and in certain
36087 // circumstances, depending on SSE-level.
36088 // TODO: Investigate using extract_subvector for larger vectors.
36089 // TODO: Investigate float/double extraction if it will be just stored.
36090 if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
36091 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
36092 assert(SrcSVT == VT && "Unexpected extraction type")((SrcSVT == VT && "Unexpected extraction type") ? static_cast
<void> (0) : __assert_fail ("SrcSVT == VT && \"Unexpected extraction type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36092, __PRETTY_FUNCTION__))
;
36093 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
36094 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
36095 DAG.getIntPtrConstant(SrcIdx, dl));
36096 }
36097
36098 if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
36099 (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
36100 assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&((VT.getSizeInBits() >= SrcSVT.getSizeInBits() && "Unexpected extraction type"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() >= SrcSVT.getSizeInBits() && \"Unexpected extraction type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36101, __PRETTY_FUNCTION__))
36101 "Unexpected extraction type")((VT.getSizeInBits() >= SrcSVT.getSizeInBits() && "Unexpected extraction type"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() >= SrcSVT.getSizeInBits() && \"Unexpected extraction type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36101, __PRETTY_FUNCTION__))
;
36102 unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
36103 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
36104 SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
36105 DAG.getIntPtrConstant(SrcIdx, dl));
36106 return DAG.getZExtOrTrunc(ExtOp, dl, VT);
36107 }
36108
36109 return SDValue();
36110}
36111
36112/// Extracting a scalar FP value from vector element 0 is free, so extract each
36113/// operand first, then perform the math as a scalar op.
36114static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
36115 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract")((ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
"Expected extract") ? static_cast<void> (0) : __assert_fail
("ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && \"Expected extract\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36115, __PRETTY_FUNCTION__))
;
36116 SDValue Vec = ExtElt->getOperand(0);
36117 SDValue Index = ExtElt->getOperand(1);
36118 EVT VT = ExtElt->getValueType(0);
36119 EVT VecVT = Vec.getValueType();
36120
36121 // TODO: If this is a unary/expensive/expand op, allow extraction from a
36122 // non-zero element because the shuffle+scalar op will be cheaper?
36123 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
36124 return SDValue();
36125
36126 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
36127 // extract, the condition code), so deal with those as a special-case.
36128 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
36129 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
36130 if (OpVT != MVT::f32 && OpVT != MVT::f64)
36131 return SDValue();
36132
36133 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
36134 SDLoc DL(ExtElt);
36135 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
36136 Vec.getOperand(0), Index);
36137 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
36138 Vec.getOperand(1), Index);
36139 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
36140 }
36141
36142 if (VT != MVT::f32 && VT != MVT::f64)
36143 return SDValue();
36144
36145 // Vector FP selects don't fit the pattern of FP math ops (because the
36146 // condition has a different type and we have to change the opcode), so deal
36147 // with those here.
36148 // FIXME: This is restricted to pre type legalization by ensuring the setcc
36149 // has i1 elements. If we loosen this we need to convert vector bool to a
36150 // scalar bool.
36151 if (Vec.getOpcode() == ISD::VSELECT &&
36152 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
36153 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
36154 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
36155 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
36156 SDLoc DL(ExtElt);
36157 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
36158 Vec.getOperand(0).getValueType().getScalarType(),
36159 Vec.getOperand(0), Index);
36160 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
36161 Vec.getOperand(1), Index);
36162 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
36163 Vec.getOperand(2), Index);
36164 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
36165 }
36166
36167 // TODO: This switch could include FNEG and the x86-specific FP logic ops
36168 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
36169 // missed load folding and fma+fneg combining.
36170 switch (Vec.getOpcode()) {
36171 case ISD::FMA: // Begin 3 operands
36172 case ISD::FMAD:
36173 case ISD::FADD: // Begin 2 operands
36174 case ISD::FSUB:
36175 case ISD::FMUL:
36176 case ISD::FDIV:
36177 case ISD::FREM:
36178 case ISD::FCOPYSIGN:
36179 case ISD::FMINNUM:
36180 case ISD::FMAXNUM:
36181 case ISD::FMINNUM_IEEE:
36182 case ISD::FMAXNUM_IEEE:
36183 case ISD::FMAXIMUM:
36184 case ISD::FMINIMUM:
36185 case X86ISD::FMAX:
36186 case X86ISD::FMIN:
36187 case ISD::FABS: // Begin 1 operand
36188 case ISD::FSQRT:
36189 case ISD::FRINT:
36190 case ISD::FCEIL:
36191 case ISD::FTRUNC:
36192 case ISD::FNEARBYINT:
36193 case ISD::FROUND:
36194 case ISD::FFLOOR:
36195 case X86ISD::FRCP:
36196 case X86ISD::FRSQRT: {
36197 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
36198 SDLoc DL(ExtElt);
36199 SmallVector<SDValue, 4> ExtOps;
36200 for (SDValue Op : Vec->ops())
36201 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
36202 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
36203 }
36204 default:
36205 return SDValue();
36206 }
36207 llvm_unreachable("All opcodes should return within switch")::llvm::llvm_unreachable_internal("All opcodes should return within switch"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36207)
;
36208}
36209
36210/// Try to convert a vector reduction sequence composed of binops and shuffles
36211/// into horizontal ops.
36212static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
36213 const X86Subtarget &Subtarget) {
36214 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller")((ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
"Unexpected caller") ? static_cast<void> (0) : __assert_fail
("ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && \"Unexpected caller\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36214, __PRETTY_FUNCTION__))
;
36215
36216 ISD::NodeType Opc;
36217 SDValue Rdx =
36218 DAG.matchBinOpReduction(ExtElt, Opc, {ISD::ADD, ISD::FADD}, true);
36219 if (!Rdx)
36220 return SDValue();
36221
36222 SDValue Index = ExtElt->getOperand(1);
36223 assert(isNullConstant(Index) &&((isNullConstant(Index) && "Reduction doesn't end in an extract from index 0"
) ? static_cast<void> (0) : __assert_fail ("isNullConstant(Index) && \"Reduction doesn't end in an extract from index 0\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36224, __PRETTY_FUNCTION__))
36224 "Reduction doesn't end in an extract from index 0")((isNullConstant(Index) && "Reduction doesn't end in an extract from index 0"
) ? static_cast<void> (0) : __assert_fail ("isNullConstant(Index) && \"Reduction doesn't end in an extract from index 0\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36224, __PRETTY_FUNCTION__))
;
36225
36226 EVT VT = ExtElt->getValueType(0);
36227 EVT VecVT = Rdx.getValueType();
36228 if (VecVT.getScalarType() != VT)
36229 return SDValue();
36230
36231 SDLoc DL(ExtElt);
36232
36233 // vXi8 reduction - sub 128-bit vector.
36234 if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
36235 if (VecVT == MVT::v4i8) {
36236 // Pad with zero.
36237 if (Subtarget.hasSSE41()) {
36238 Rdx = DAG.getBitcast(MVT::i32, Rdx);
36239 Rdx = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
36240 DAG.getConstant(0, DL, MVT::v4i32), Rdx,
36241 DAG.getIntPtrConstant(0, DL));
36242 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
36243 } else {
36244 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx,
36245 DAG.getConstant(0, DL, VecVT));
36246 }
36247 }
36248 if (Rdx.getValueType() == MVT::v8i8) {
36249 // Pad with undef.
36250 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx,
36251 DAG.getUNDEF(MVT::v8i8));
36252 }
36253 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
36254 DAG.getConstant(0, DL, MVT::v16i8));
36255 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
36256 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
36257 }
36258
36259 // Must be a >=128-bit vector with pow2 elements.
36260 if ((VecVT.getSizeInBits() % 128) != 0 ||
36261 !isPowerOf2_32(VecVT.getVectorNumElements()))
36262 return SDValue();
36263
36264 // vXi8 reduction - sum lo/hi halves then use PSADBW.
36265 if (VT == MVT::i8) {
36266 while (Rdx.getValueSizeInBits() > 128) {
36267 unsigned HalfSize = VecVT.getSizeInBits() / 2;
36268 unsigned HalfElts = VecVT.getVectorNumElements() / 2;
36269 SDValue Lo = extractSubVector(Rdx, 0, DAG, DL, HalfSize);
36270 SDValue Hi = extractSubVector(Rdx, HalfElts, DAG, DL, HalfSize);
36271 Rdx = DAG.getNode(ISD::ADD, DL, Lo.getValueType(), Lo, Hi);
36272 VecVT = Rdx.getValueType();
36273 }
36274 assert(VecVT == MVT::v16i8 && "v16i8 reduction expected")((VecVT == MVT::v16i8 && "v16i8 reduction expected") ?
static_cast<void> (0) : __assert_fail ("VecVT == MVT::v16i8 && \"v16i8 reduction expected\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36274, __PRETTY_FUNCTION__))
;
36275
36276 SDValue Hi = DAG.getVectorShuffle(
36277 MVT::v16i8, DL, Rdx, Rdx,
36278 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
36279 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
36280 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
36281 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
36282 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
36283 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
36284 }
36285
36286 // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
36287 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
36288 if (!Subtarget.hasFastHorizontalOps() && !OptForSize)
36289 return SDValue();
36290
36291 unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
36292
36293 // 256-bit horizontal instructions operate on 128-bit chunks rather than
36294 // across the whole vector, so we need an extract + hop preliminary stage.
36295 // This is the only step where the operands of the hop are not the same value.
36296 // TODO: We could extend this to handle 512-bit or even longer vectors.
36297 if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
36298 ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
36299 unsigned NumElts = VecVT.getVectorNumElements();
36300 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
36301 SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
36302 Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
36303 VecVT = Rdx.getValueType();
36304 }
36305 if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
36306 !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
36307 return SDValue();
36308
36309 // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
36310 unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
36311 for (unsigned i = 0; i != ReductionSteps; ++i)
36312 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
36313
36314 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
36315}
36316
36317/// Detect vector gather/scatter index generation and convert it from being a
36318/// bunch of shuffles and extracts into a somewhat faster sequence.
36319/// For i686, the best sequence is apparently storing the value and loading
36320/// scalars back, while for x64 we should use 64-bit extracts and shifts.
36321static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
36322 TargetLowering::DAGCombinerInfo &DCI,
36323 const X86Subtarget &Subtarget) {
36324 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
36325 return NewOp;
36326
36327 SDValue InputVector = N->getOperand(0);
36328 SDValue EltIdx = N->getOperand(1);
36329 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
36330
36331 EVT SrcVT = InputVector.getValueType();
36332 EVT VT = N->getValueType(0);
36333 SDLoc dl(InputVector);
36334 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
36335
36336 if (CIdx && CIdx->getAPIntValue().uge(SrcVT.getVectorNumElements()))
36337 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
36338
36339 // Integer Constant Folding.
36340 if (CIdx && VT.isInteger()) {
36341 APInt UndefVecElts;
36342 SmallVector<APInt, 16> EltBits;
36343 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
36344 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
36345 EltBits, true, false)) {
36346 uint64_t Idx = CIdx->getZExtValue();
36347 if (UndefVecElts[Idx])
36348 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
36349 return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
36350 dl, VT);
36351 }
36352 }
36353
36354 if (IsPextr) {
36355 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36356 if (TLI.SimplifyDemandedBits(
36357 SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
36358 return SDValue(N, 0);
36359
36360 // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
36361 if ((InputVector.getOpcode() == X86ISD::PINSRB ||
36362 InputVector.getOpcode() == X86ISD::PINSRW) &&
36363 InputVector.getOperand(2) == EltIdx) {
36364 assert(SrcVT == InputVector.getOperand(0).getValueType() &&((SrcVT == InputVector.getOperand(0).getValueType() &&
"Vector type mismatch") ? static_cast<void> (0) : __assert_fail
("SrcVT == InputVector.getOperand(0).getValueType() && \"Vector type mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36365, __PRETTY_FUNCTION__))
36365 "Vector type mismatch")((SrcVT == InputVector.getOperand(0).getValueType() &&
"Vector type mismatch") ? static_cast<void> (0) : __assert_fail
("SrcVT == InputVector.getOperand(0).getValueType() && \"Vector type mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36365, __PRETTY_FUNCTION__))
;
36366 SDValue Scl = InputVector.getOperand(1);
36367 Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
36368 return DAG.getZExtOrTrunc(Scl, dl, VT);
36369 }
36370
36371 // TODO - Remove this once we can handle the implicit zero-extension of
36372 // X86ISD::PEXTRW/X86ISD::PEXTRB in XFormVExtractWithShuffleIntoLoad,
36373 // combineHorizontalPredicateResult and combineBasicSADPattern.
36374 return SDValue();
36375 }
36376
36377 if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI))
36378 return NewOp;
36379
36380 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
36381 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
36382 VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
36383 SDValue MMXSrc = InputVector.getOperand(0);
36384
36385 // The bitcast source is a direct mmx result.
36386 if (MMXSrc.getValueType() == MVT::x86mmx)
36387 return DAG.getBitcast(VT, InputVector);
36388 }
36389
36390 // Detect mmx to i32 conversion through a v2i32 elt extract.
36391 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
36392 VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
36393 SDValue MMXSrc = InputVector.getOperand(0);
36394
36395 // The bitcast source is a direct mmx result.
36396 if (MMXSrc.getValueType() == MVT::x86mmx)
36397 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
36398 }
36399
36400 // Check whether this extract is the root of a sum of absolute differences
36401 // pattern. This has to be done here because we really want it to happen
36402 // pre-legalization,
36403 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
36404 return SAD;
36405
36406 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
36407 if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
36408 return Cmp;
36409
36410 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
36411 if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
36412 return MinMax;
36413
36414 if (SDValue V = combineReductionToHorizontal(N, DAG, Subtarget))
36415 return V;
36416
36417 if (SDValue V = scalarizeExtEltFP(N, DAG))
36418 return V;
36419
36420 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
36421 // and then testing the relevant element.
36422 if (CIdx && SrcVT.getScalarType() == MVT::i1) {
36423 SmallVector<SDNode *, 16> BoolExtracts;
36424 auto IsBoolExtract = [&BoolExtracts](SDNode *Use) {
36425 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
36426 isa<ConstantSDNode>(Use->getOperand(1)) &&
36427 Use->getValueType(0) == MVT::i1) {
36428 BoolExtracts.push_back(Use);
36429 return true;
36430 }
36431 return false;
36432 };
36433 if (all_of(InputVector->uses(), IsBoolExtract) &&
36434 BoolExtracts.size() > 1) {
36435 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36436 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
36437 if (SDValue BC =
36438 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
36439 for (SDNode *Use : BoolExtracts) {
36440 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
36441 unsigned MaskIdx = Use->getConstantOperandVal(1);
36442 APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
36443 SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
36444 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
36445 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
36446 DCI.CombineTo(Use, Res);
36447 }
36448 return SDValue(N, 0);
36449 }
36450 }
36451 }
36452
36453 return SDValue();
36454}
36455
36456/// If a vector select has an operand that is -1 or 0, try to simplify the
36457/// select to a bitwise logic operation.
36458/// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
36459static SDValue
36460combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
36461 TargetLowering::DAGCombinerInfo &DCI,
36462 const X86Subtarget &Subtarget) {
36463 SDValue Cond = N->getOperand(0);
36464 SDValue LHS = N->getOperand(1);
36465 SDValue RHS = N->getOperand(2);
36466 EVT VT = LHS.getValueType();
36467 EVT CondVT = Cond.getValueType();
36468 SDLoc DL(N);
36469 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36470
36471 if (N->getOpcode() != ISD::VSELECT)
36472 return SDValue();
36473
36474 assert(CondVT.isVector() && "Vector select expects a vector selector!")((CondVT.isVector() && "Vector select expects a vector selector!"
) ? static_cast<void> (0) : __assert_fail ("CondVT.isVector() && \"Vector select expects a vector selector!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 36474, __PRETTY_FUNCTION__))
;
36475
36476 // Check if the first operand is all zeros and Cond type is vXi1.
36477 // This situation only applies to avx512.
36478 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
36479 // TODO: Can we assert that both operands are not zeros (because that should
36480 // get simplified at node creation time)?
36481 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
36482 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
36483
36484 // If both inputs are 0/undef, create a complete zero vector.
36485 // FIXME: As noted above this should be handled by DAGCombiner/getNode.
36486 if (TValIsAllZeros && FValIsAllZeros) {
36487 if (VT.isFloatingPoint())
36488 return DAG.getConstantFP(0.0, DL, VT);
36489 return DAG.getConstant(0, DL, VT);
36490 }
36491
36492 if (TValIsAllZeros && !FValIsAllZeros && Subtarget.hasAVX512() &&
36493 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1) {
36494 // Invert the cond to not(cond) : xor(op,allones)=not(op)
36495 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
36496 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
36497 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
36498 }
36499
36500 // To use the condition operand as a bitwise mask, it must have elements that
36501 // are the same size as the select elements. Ie, the condition operand must
36502 // have already been promoted from the IR select condition type <N x i1>.
36503 // Don't check if the types themselves are equal because that excludes
36504 // vector floating-point selects.
36505 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
36506 return SDValue();
36507
36508 // Try to invert the condition if true value is not all 1s and false value is
36509 // not all 0s. Only do this if the condition has one use.
36510 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
36511 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
36512 // Check if the selector will be produced by CMPP*/PCMP*.
36513 Cond.getOpcode() == ISD::SETCC &&
36514 // Check if SETCC has already been promoted.
36515 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
36516 CondVT) {
36517 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
36518
36519 if (TValIsAllZeros || FValIsAllOnes) {
36520 SDValue CC = Cond.getOperand(2);
36521 ISD::CondCode NewCC =
36522 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
36523 Cond.getOperand(0).getValueType().isInteger());
36524 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
36525 NewCC);
36526 std::swap(LHS, RHS);
36527 TValIsAllOnes = FValIsAllOnes;
36528 FValIsAllZeros = TValIsAllZeros;
36529 }
36530 }
36531
36532 // Cond value must be 'sign splat' to be converted to a logical op.
36533 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
36534 return SDValue();
36535
36536 // vselect Cond, 111..., 000... -> Cond
36537 if (TValIsAllOnes && FValIsAllZeros)
36538 return DAG.getBitcast(VT, Cond);
36539
36540 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
36541 return SDValue();
36542
36543 // vselect Cond, 111..., X -> or Cond, X
36544 if (TValIsAllOnes) {
36545 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
36546 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
36547 return DAG.getBitcast(VT, Or);
36548 }
36549
36550 // vselect Cond, X, 000... -> and Cond, X
36551 if (FValIsAllZeros) {
36552 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
36553 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
36554 return DAG.getBitcast(VT, And);
36555 }
36556
36557 // vselect Cond, 000..., X -> andn Cond, X
36558 if (TValIsAllZeros) {
36559 MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64);
36560 SDValue CastCond = DAG.getBitcast(AndNVT, Cond);
36561 SDValue CastRHS = DAG.getBitcast(AndNVT, RHS);
36562 SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS);
36563 return DAG.getBitcast(VT, AndN);
36564 }
36565
36566 return SDValue();
36567}
36568
36569/// If both arms of a vector select are concatenated vectors, split the select,
36570/// and concatenate the result to eliminate a wide (256-bit) vector instruction:
36571/// vselect Cond, (concat T0, T1), (concat F0, F1) -->
36572/// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
36573static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
36574 const X86Subtarget &Subtarget) {
36575 unsigned Opcode = N->getOpcode();
36576 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
36577 return SDValue();
36578
36579 // TODO: Split 512-bit vectors too?
36580 EVT VT = N->getValueType(0);
36581 if (!VT.is256BitVector())
36582 return SDValue();
36583
36584 // TODO: Split as long as any 2 of the 3 operands are concatenated?
36585 SDValue Cond = N->getOperand(0);
36586 SDValue TVal = N->getOperand(1);
36587 SDValue FVal = N->getOperand(2);
36588 SmallVector<SDValue, 4> CatOpsT, CatOpsF;
36589 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
36590 !collectConcatOps(TVal.getNode(), CatOpsT) ||
36591 !collectConcatOps(FVal.getNode(), CatOpsF))
36592 return SDValue();
36593
36594 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
36595 ArrayRef<SDValue> Ops) {
36596 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
36597 };
36598 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
36599 makeBlend, /*CheckBWI*/ false);
36600}
36601
36602static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
36603 SDValue Cond = N->getOperand(0);
36604 SDValue LHS = N->getOperand(1);
36605 SDValue RHS = N->getOperand(2);
36606 SDLoc DL(N);
36607
36608 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
36609 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
36610 if (!TrueC || !FalseC)
36611 return SDValue();
36612
36613 // Don't do this for crazy integer types.
36614 EVT VT = N->getValueType(0);
36615 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
36616 return SDValue();
36617
36618 // We're going to use the condition bit in math or logic ops. We could allow
36619 // this with a wider condition value (post-legalization it becomes an i8),
36620 // but if nothing is creating selects that late, it doesn't matter.
36621 if (Cond.getValueType() != MVT::i1)
36622 return SDValue();
36623
36624 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
36625 // 3, 5, or 9 with i32/i64, so those get transformed too.
36626 // TODO: For constants that overflow or do not differ by power-of-2 or small
36627 // multiplier, convert to 'and' + 'add'.
36628 const APInt &TrueVal = TrueC->getAPIntValue();
36629 const APInt &FalseVal = FalseC->getAPIntValue();
36630 bool OV;
36631 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
36632 if (OV)
36633 return SDValue();
36634
36635 APInt AbsDiff = Diff.abs();
36636 if (AbsDiff.isPowerOf2() ||
36637 ((VT == MVT::i32 || VT == MVT::i64) &&
36638 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
36639
36640 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
36641 // of the condition can usually be folded into a compare predicate, but even
36642 // without that, the sequence should be cheaper than a CMOV alternative.
36643 if (TrueVal.slt(FalseVal)) {
36644 Cond = DAG.getNOT(DL, Cond, MVT::i1);
36645 std::swap(TrueC, FalseC);
36646 }
36647
36648 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
36649 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
36650
36651 // Multiply condition by the difference if non-one.
36652 if (!AbsDiff.isOneValue())
36653 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
36654
36655 // Add the base if non-zero.
36656 if (!FalseC->isNullValue())
36657 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
36658
36659 return R;
36660 }
36661
36662 return SDValue();
36663}
36664
36665/// If this is a *dynamic* select (non-constant condition) and we can match
36666/// this node with one of the variable blend instructions, restructure the
36667/// condition so that blends can use the high (sign) bit of each element.
36668/// This function will also call SimplifyDemandedBits on already created
36669/// BLENDV to perform additional simplifications.
36670static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
36671 TargetLowering::DAGCombinerInfo &DCI,
36672 const X86Subtarget &Subtarget) {
36673 SDValue Cond = N->getOperand(0);
36674 if ((N->getOpcode() != ISD::VSELECT &&
36675 N->getOpcode() != X86ISD::BLENDV) ||
36676 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
36677 return SDValue();
36678
36679 // Don't optimize before the condition has been transformed to a legal type
36680 // and don't ever optimize vector selects that map to AVX512 mask-registers.
36681 unsigned BitWidth = Cond.getScalarValueSizeInBits();
36682 if (BitWidth < 8 || BitWidth > 64)
36683 return SDValue();
36684
36685 // We can only handle the cases where VSELECT is directly legal on the
36686 // subtarget. We custom lower VSELECT nodes with constant conditions and
36687 // this makes it hard to see whether a dynamic VSELECT will correctly
36688 // lower, so we both check the operation's status and explicitly handle the
36689 // cases where a *dynamic* blend will fail even though a constant-condition
36690 // blend could be custom lowered.
36691 // FIXME: We should find a better way to handle this class of problems.
36692 // Potentially, we should combine constant-condition vselect nodes
36693 // pre-legalization into shuffles and not mark as many types as custom
36694 // lowered.
36695 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36696 EVT VT = N->getValueType(0);
36697 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
36698 return SDValue();
36699 // FIXME: We don't support i16-element blends currently. We could and
36700 // should support them by making *all* the bits in the condition be set
36701 // rather than just the high bit and using an i8-element blend.
36702 if (VT.getVectorElementType() == MVT::i16)
36703 return SDValue();
36704 // Dynamic blending was only available from SSE4.1 onward.
36705 if (VT.is128BitVector() && !Subtarget.hasSSE41())
36706 return SDValue();
36707 // Byte blends are only available in AVX2
36708 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
36709 return SDValue();
36710 // There are no 512-bit blend instructions that use sign bits.
36711 if (VT.is512BitVector())
36712 return SDValue();
36713
36714 // TODO: Add other opcodes eventually lowered into BLEND.
36715 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
36716 UI != UE; ++UI)
36717 if ((UI->getOpcode() != ISD::VSELECT &&
36718 UI->getOpcode() != X86ISD::BLENDV) ||
36719 UI.getOperandNo() != 0)
36720 return SDValue();
36721
36722 APInt DemandedMask(APInt::getSignMask(BitWidth));
36723 KnownBits Known;
36724 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
36725 !DCI.isBeforeLegalizeOps());
36726 if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true))
36727 return SDValue();
36728
36729 // If we changed the computation somewhere in the DAG, this change will
36730 // affect all users of Cond. Update all the nodes so that we do not use
36731 // the generic VSELECT anymore. Otherwise, we may perform wrong
36732 // optimizations as we messed with the actual expectation for the vector
36733 // boolean values.
36734 for (SDNode *U : Cond->uses()) {
36735 if (U->getOpcode() == X86ISD::BLENDV)
36736 continue;
36737
36738 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
36739 Cond, U->getOperand(1), U->getOperand(2));
36740 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
36741 DCI.AddToWorklist(U);
36742 }
36743 DCI.CommitTargetLoweringOpt(TLO);
36744 return SDValue(N, 0);
36745}
36746
36747/// Do target-specific dag combines on SELECT and VSELECT nodes.
36748static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
36749 TargetLowering::DAGCombinerInfo &DCI,
36750 const X86Subtarget &Subtarget) {
36751 SDLoc DL(N);
36752 SDValue Cond = N->getOperand(0);
36753 SDValue LHS = N->getOperand(1);
36754 SDValue RHS = N->getOperand(2);
36755
36756 // Try simplification again because we use this function to optimize
36757 // BLENDV nodes that are not handled by the generic combiner.
36758 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
36759 return V;
36760
36761 EVT VT = LHS.getValueType();
36762 EVT CondVT = Cond.getValueType();
36763 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36764
36765 // Convert vselects with constant condition into shuffles.
36766 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
36767 DCI.isBeforeLegalizeOps()) {
36768 SmallVector<int, 64> Mask;
36769 if (createShuffleMaskFromVSELECT(Mask, Cond))
36770 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
36771 }
36772
36773 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
36774 // instructions match the semantics of the common C idiom x<y?x:y but not
36775 // x<=y?x:y, because of how they handle negative zero (which can be
36776 // ignored in unsafe-math mode).
36777 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
36778 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
36779 VT != MVT::f80 && VT != MVT::f128 &&
36780 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
36781 (Subtarget.hasSSE2() ||
36782 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
36783 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36784
36785 unsigned Opcode = 0;
36786 // Check for x CC y ? x : y.
36787 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
36788 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
36789 switch (CC) {
36790 default: break;
36791 case ISD::SETULT:
36792 // Converting this to a min would handle NaNs incorrectly, and swapping
36793 // the operands would cause it to handle comparisons between positive
36794 // and negative zero incorrectly.
36795 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
36796 if (!DAG.getTarget().Options.UnsafeFPMath &&
36797 !(DAG.isKnownNeverZeroFloat(LHS) ||
36798 DAG.isKnownNeverZeroFloat(RHS)))
36799 break;
36800 std::swap(LHS, RHS);
36801 }
36802 Opcode = X86ISD::FMIN;
36803 break;
36804 case ISD::SETOLE:
36805 // Converting this to a min would handle comparisons between positive
36806 // and negative zero incorrectly.
36807 if (!DAG.getTarget().Options.UnsafeFPMath &&
36808 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
36809 break;
36810 Opcode = X86ISD::FMIN;
36811 break;
36812 case ISD::SETULE:
36813 // Converting this to a min would handle both negative zeros and NaNs
36814 // incorrectly, but we can swap the operands to fix both.
36815 std::swap(LHS, RHS);
36816 LLVM_FALLTHROUGH[[gnu::fallthrough]];
36817 case ISD::SETOLT:
36818 case ISD::SETLT:
36819 case ISD::SETLE:
36820 Opcode = X86ISD::FMIN;
36821 break;
36822
36823 case ISD::SETOGE:
36824 // Converting this to a max would handle comparisons between positive
36825 // and negative zero incorrectly.
36826 if (!DAG.getTarget().Options.UnsafeFPMath &&
36827 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
36828 break;
36829 Opcode = X86ISD::FMAX;
36830 break;
36831 case ISD::SETUGT:
36832 // Converting this to a max would handle NaNs incorrectly, and swapping
36833 // the operands would cause it to handle comparisons between positive
36834 // and negative zero incorrectly.
36835 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
36836 if (!DAG.getTarget().Options.UnsafeFPMath &&
36837 !(DAG.isKnownNeverZeroFloat(LHS) ||
36838 DAG.isKnownNeverZeroFloat(RHS)))
36839 break;
36840 std::swap(LHS, RHS);
36841 }
36842 Opcode = X86ISD::FMAX;
36843 break;
36844 case ISD::SETUGE:
36845 // Converting this to a max would handle both negative zeros and NaNs
36846 // incorrectly, but we can swap the operands to fix both.
36847 std::swap(LHS, RHS);
36848 LLVM_FALLTHROUGH[[gnu::fallthrough]];
36849 case ISD::SETOGT:
36850 case ISD::SETGT:
36851 case ISD::SETGE:
36852 Opcode = X86ISD::FMAX;
36853 break;
36854 }
36855 // Check for x CC y ? y : x -- a min/max with reversed arms.
36856 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
36857 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
36858 switch (CC) {
36859 default: break;
36860 case ISD::SETOGE:
36861 // Converting this to a min would handle comparisons between positive
36862 // and negative zero incorrectly, and swapping the operands would
36863 // cause it to handle NaNs incorrectly.
36864 if (!DAG.getTarget().Options.UnsafeFPMath &&
36865 !(DAG.isKnownNeverZeroFloat(LHS) ||
36866 DAG.isKnownNeverZeroFloat(RHS))) {
36867 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36868 break;
36869 std::swap(LHS, RHS);
36870 }
36871 Opcode = X86ISD::FMIN;
36872 break;
36873 case ISD::SETUGT:
36874 // Converting this to a min would handle NaNs incorrectly.
36875 if (!DAG.getTarget().Options.UnsafeFPMath &&
36876 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
36877 break;
36878 Opcode = X86ISD::FMIN;
36879 break;
36880 case ISD::SETUGE:
36881 // Converting this to a min would handle both negative zeros and NaNs
36882 // incorrectly, but we can swap the operands to fix both.
36883 std::swap(LHS, RHS);
36884 LLVM_FALLTHROUGH[[gnu::fallthrough]];
36885 case ISD::SETOGT:
36886 case ISD::SETGT:
36887 case ISD::SETGE:
36888 Opcode = X86ISD::FMIN;
36889 break;
36890
36891 case ISD::SETULT:
36892 // Converting this to a max would handle NaNs incorrectly.
36893 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36894 break;
36895 Opcode = X86ISD::FMAX;
36896 break;
36897 case ISD::SETOLE:
36898 // Converting this to a max would handle comparisons between positive
36899 // and negative zero incorrectly, and swapping the operands would
36900 // cause it to handle NaNs incorrectly.
36901 if (!DAG.getTarget().Options.UnsafeFPMath &&
36902 !DAG.isKnownNeverZeroFloat(LHS) &&
36903 !DAG.isKnownNeverZeroFloat(RHS)) {
36904 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
36905 break;
36906 std::swap(LHS, RHS);
36907 }
36908 Opcode = X86ISD::FMAX;
36909 break;
36910 case ISD::SETULE:
36911 // Converting this to a max would handle both negative zeros and NaNs
36912 // incorrectly, but we can swap the operands to fix both.
36913 std::swap(LHS, RHS);
36914 LLVM_FALLTHROUGH[[gnu::fallthrough]];
36915 case ISD::SETOLT:
36916 case ISD::SETLT:
36917 case ISD::SETLE:
36918 Opcode = X86ISD::FMAX;
36919 break;
36920 }
36921 }
36922
36923 if (Opcode)
36924 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
36925 }
36926
36927 // Some mask scalar intrinsics rely on checking if only one bit is set
36928 // and implement it in C code like this:
36929 // A[0] = (U & 1) ? A[0] : W[0];
36930 // This creates some redundant instructions that break pattern matching.
36931 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
36932 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
36933 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
36934 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
36935 SDValue AndNode = Cond.getOperand(0);
36936 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
36937 isNullConstant(Cond.getOperand(1)) &&
36938 isOneConstant(AndNode.getOperand(1))) {
36939 // LHS and RHS swapped due to
36940 // setcc outputting 1 when AND resulted in 0 and vice versa.
36941 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
36942 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
36943 }
36944 }
36945
36946 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
36947 // lowering on KNL. In this case we convert it to
36948 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
36949 // The same situation all vectors of i8 and i16 without BWI.
36950 // Make sure we extend these even before type legalization gets a chance to
36951 // split wide vectors.
36952 // Since SKX these selects have a proper lowering.
36953 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
36954 CondVT.getVectorElementType() == MVT::i1 &&
36955 (VT.getVectorElementType() == MVT::i8 ||
36956 VT.getVectorElementType() == MVT::i16)) {
36957 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
36958 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
36959 }
36960
36961 // AVX512 - Extend select with zero to merge with target shuffle.
36962 // select(mask, extract_subvector(shuffle(x)), zero) -->
36963 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
36964 // TODO - support non target shuffles as well.
36965 if (Subtarget.hasAVX512() && CondVT.isVector() &&
36966 CondVT.getVectorElementType() == MVT::i1) {
36967 auto SelectableOp = [&TLI](SDValue Op) {
36968 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
36969 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
36970 isNullConstant(Op.getOperand(1)) &&
36971 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
36972 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
36973 };
36974
36975 bool SelectableLHS = SelectableOp(LHS);
36976 bool SelectableRHS = SelectableOp(RHS);
36977 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
36978 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
36979
36980 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
36981 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
36982 : RHS.getOperand(0).getValueType();
36983 unsigned NumSrcElts = SrcVT.getVectorNumElements();
36984 EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
36985 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
36986 VT.getSizeInBits());
36987 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
36988 VT.getSizeInBits());
36989 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
36990 DAG.getUNDEF(SrcCondVT), Cond,
36991 DAG.getIntPtrConstant(0, DL));
36992 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
36993 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
36994 }
36995 }
36996
36997 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
36998 return V;
36999
37000 // Canonicalize max and min:
37001 // (x > y) ? x : y -> (x >= y) ? x : y
37002 // (x < y) ? x : y -> (x <= y) ? x : y
37003 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
37004 // the need for an extra compare
37005 // against zero. e.g.
37006 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
37007 // subl %esi, %edi
37008 // testl %edi, %edi
37009 // movl $0, %eax
37010 // cmovgl %edi, %eax
37011 // =>
37012 // xorl %eax, %eax
37013 // subl %esi, $edi
37014 // cmovsl %eax, %edi
37015 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
37016 Cond.hasOneUse() &&
37017 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
37018 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
37019 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
37020 switch (CC) {
37021 default: break;
37022 case ISD::SETLT:
37023 case ISD::SETGT: {
37024 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
37025 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
37026 Cond.getOperand(0), Cond.getOperand(1), NewCC);
37027 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
37028 }
37029 }
37030 }
37031
37032 // Match VSELECTs into subs with unsigned saturation.
37033 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
37034 // psubus is available in SSE2 for i8 and i16 vectors.
37035 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
37036 isPowerOf2_32(VT.getVectorNumElements()) &&
37037 (VT.getVectorElementType() == MVT::i8 ||
37038 VT.getVectorElementType() == MVT::i16)) {
37039 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
37040
37041 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
37042 // left side invert the predicate to simplify logic below.
37043 SDValue Other;
37044 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
37045 Other = RHS;
37046 CC = ISD::getSetCCInverse(CC, true);
37047 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
37048 Other = LHS;
37049 }
37050
37051 if (Other.getNode() && Other->getNumOperands() == 2 &&
37052 Other->getOperand(0) == Cond.getOperand(0)) {
37053 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
37054 SDValue CondRHS = Cond->getOperand(1);
37055
37056 // Look for a general sub with unsigned saturation first.
37057 // x >= y ? x-y : 0 --> subus x, y
37058 // x > y ? x-y : 0 --> subus x, y
37059 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
37060 Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
37061 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
37062
37063 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
37064 if (isa<BuildVectorSDNode>(CondRHS)) {
37065 // If the RHS is a constant we have to reverse the const
37066 // canonicalization.
37067 // x > C-1 ? x+-C : 0 --> subus x, C
37068 auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
37069 return (!Op && !Cond) ||
37070 (Op && Cond &&
37071 Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
37072 };
37073 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
37074 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
37075 /*AllowUndefs*/ true)) {
37076 OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
37077 OpRHS);
37078 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
37079 }
37080
37081 // Another special case: If C was a sign bit, the sub has been
37082 // canonicalized into a xor.
37083 // FIXME: Would it be better to use computeKnownBits to determine
37084 // whether it's safe to decanonicalize the xor?
37085 // x s< 0 ? x^C : 0 --> subus x, C
37086 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
37087 if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
37088 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
37089 OpRHSConst->getAPIntValue().isSignMask()) {
37090 // Note that we have to rebuild the RHS constant here to ensure we
37091 // don't rely on particular values of undef lanes.
37092 OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
37093 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
37094 }
37095 }
37096 }
37097 }
37098 }
37099 }
37100
37101 // Match VSELECTs into add with unsigned saturation.
37102 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
37103 // paddus is available in SSE2 for i8 and i16 vectors.
37104 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
37105 isPowerOf2_32(VT.getVectorNumElements()) &&
37106 (VT.getVectorElementType() == MVT::i8 ||
37107 VT.getVectorElementType() == MVT::i16)) {
37108 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
37109
37110 SDValue CondLHS = Cond->getOperand(0);
37111 SDValue CondRHS = Cond->getOperand(1);
37112
37113 // Check if one of the arms of the VSELECT is vector with all bits set.
37114 // If it's on the left side invert the predicate to simplify logic below.
37115 SDValue Other;
37116 if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
37117 Other = RHS;
37118 CC = ISD::getSetCCInverse(CC, true);
37119 } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
37120 Other = LHS;
37121 }
37122
37123 if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
37124 SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
37125
37126 // Canonicalize condition operands.
37127 if (CC == ISD::SETUGE) {
37128 std::swap(CondLHS, CondRHS);
37129 CC = ISD::SETULE;
37130 }
37131
37132 // We can test against either of the addition operands.
37133 // x <= x+y ? x+y : ~0 --> addus x, y
37134 // x+y >= x ? x+y : ~0 --> addus x, y
37135 if (CC == ISD::SETULE && Other == CondRHS &&
37136 (OpLHS == CondLHS || OpRHS == CondLHS))
37137 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
37138
37139 if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
37140 CondLHS == OpLHS) {
37141 // If the RHS is a constant we have to reverse the const
37142 // canonicalization.
37143 // x > ~C ? x+C : ~0 --> addus x, C
37144 auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
37145 return Cond->getAPIntValue() == ~Op->getAPIntValue();
37146 };
37147 if (CC == ISD::SETULE &&
37148 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
37149 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
37150 }
37151 }
37152 }
37153
37154 // Early exit check
37155 if (!TLI.isTypeLegal(VT))
37156 return SDValue();
37157
37158 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
37159 return V;
37160
37161 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
37162 return V;
37163
37164 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
37165 return V;
37166
37167 // select(~Cond, X, Y) -> select(Cond, Y, X)
37168 if (CondVT.getScalarType() != MVT::i1)
37169 if (SDValue CondNot = IsNOT(Cond, DAG))
37170 return DAG.getNode(N->getOpcode(), DL, VT,
37171 DAG.getBitcast(CondVT, CondNot), RHS, LHS);
37172
37173 // Custom action for SELECT MMX
37174 if (VT == MVT::x86mmx) {
37175 LHS = DAG.getBitcast(MVT::i64, LHS);
37176 RHS = DAG.getBitcast(MVT::i64, RHS);
37177 SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS);
37178 return DAG.getBitcast(VT, newSelect);
37179 }
37180
37181 return SDValue();
37182}
37183
37184/// Combine:
37185/// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
37186/// to:
37187/// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
37188/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
37189/// Note that this is only legal for some op/cc combinations.
37190static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
37191 SelectionDAG &DAG,
37192 const X86Subtarget &Subtarget) {
37193 // This combine only operates on CMP-like nodes.
37194 if (!(Cmp.getOpcode() == X86ISD::CMP ||
37195 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
37196 return SDValue();
37197
37198 // Can't replace the cmp if it has more uses than the one we're looking at.
37199 // FIXME: We would like to be able to handle this, but would need to make sure
37200 // all uses were updated.
37201 if (!Cmp.hasOneUse())
37202 return SDValue();
37203
37204 // This only applies to variations of the common case:
37205 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
37206 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
37207 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
37208 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
37209 // Using the proper condcodes (see below), overflow is checked for.
37210
37211 // FIXME: We can generalize both constraints:
37212 // - XOR/OR/AND (if they were made to survive AtomicExpand)
37213 // - LHS != 1
37214 // if the result is compared.
37215
37216 SDValue CmpLHS = Cmp.getOperand(0);
37217 SDValue CmpRHS = Cmp.getOperand(1);
37218
37219 if (!CmpLHS.hasOneUse())
37220 return SDValue();
37221
37222 unsigned Opc = CmpLHS.getOpcode();
37223 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
37224 return SDValue();
37225
37226 SDValue OpRHS = CmpLHS.getOperand(2);
37227 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
37228 if (!OpRHSC)
37229 return SDValue();
37230
37231 APInt Addend = OpRHSC->getAPIntValue();
37232 if (Opc == ISD::ATOMIC_LOAD_SUB)
37233 Addend = -Addend;
37234
37235 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
37236 if (!CmpRHSC)
37237 return SDValue();
37238
37239 APInt Comparison = CmpRHSC->getAPIntValue();
37240
37241 // If the addend is the negation of the comparison value, then we can do
37242 // a full comparison by emitting the atomic arithmetic as a locked sub.
37243 if (Comparison == -Addend) {
37244 // The CC is fine, but we need to rewrite the LHS of the comparison as an
37245 // atomic sub.
37246 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
37247 auto AtomicSub = DAG.getAtomic(
37248 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
37249 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
37250 /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
37251 AN->getMemOperand());
37252 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
37253 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
37254 DAG.getUNDEF(CmpLHS.getValueType()));
37255 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
37256 return LockOp;
37257 }
37258
37259 // We can handle comparisons with zero in a number of cases by manipulating
37260 // the CC used.
37261 if (!Comparison.isNullValue())
37262 return SDValue();
37263
37264 if (CC == X86::COND_S && Addend == 1)
37265 CC = X86::COND_LE;
37266 else if (CC == X86::COND_NS && Addend == 1)
37267 CC = X86::COND_G;
37268 else if (CC == X86::COND_G && Addend == -1)
37269 CC = X86::COND_GE;
37270 else if (CC == X86::COND_LE && Addend == -1)
37271 CC = X86::COND_L;
37272 else
37273 return SDValue();
37274
37275 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
37276 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
37277 DAG.getUNDEF(CmpLHS.getValueType()));
37278 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
37279 return LockOp;
37280}
37281
37282// Check whether a boolean test is testing a boolean value generated by
37283// X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
37284// code.
37285//
37286// Simplify the following patterns:
37287// (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
37288// (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
37289// to (Op EFLAGS Cond)
37290//
37291// (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
37292// (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
37293// to (Op EFLAGS !Cond)
37294//
37295// where Op could be BRCOND or CMOV.
37296//
37297static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
37298 // This combine only operates on CMP-like nodes.
37299 if (!(Cmp.getOpcode() == X86ISD::CMP ||
37300 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
37301 return SDValue();
37302
37303 // Quit if not used as a boolean value.
37304 if (CC != X86::COND_E && CC != X86::COND_NE)
37305 return SDValue();
37306
37307 // Check CMP operands. One of them should be 0 or 1 and the other should be
37308 // an SetCC or extended from it.
37309 SDValue Op1 = Cmp.getOperand(0);
37310 SDValue Op2 = Cmp.getOperand(1);
37311
37312 SDValue SetCC;
37313 const ConstantSDNode* C = nullptr;
37314 bool needOppositeCond = (CC == X86::COND_E);
37315 bool checkAgainstTrue = false; // Is it a comparison against 1?
37316
37317 if ((C = dyn_cast<ConstantSDNode>(Op1)))
37318 SetCC = Op2;
37319 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
37320 SetCC = Op1;
37321 else // Quit if all operands are not constants.
37322 return SDValue();
37323
37324 if (C->getZExtValue() == 1) {
37325 needOppositeCond = !needOppositeCond;
37326 checkAgainstTrue = true;
37327 } else if (C->getZExtValue() != 0)
37328 // Quit if the constant is neither 0 or 1.
37329 return SDValue();
37330
37331 bool truncatedToBoolWithAnd = false;
37332 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
37333 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
37334 SetCC.getOpcode() == ISD::TRUNCATE ||
37335 SetCC.getOpcode() == ISD::AND) {
37336 if (SetCC.getOpcode() == ISD::AND) {
37337 int OpIdx = -1;
37338 if (isOneConstant(SetCC.getOperand(0)))
37339 OpIdx = 1;
37340 if (isOneConstant(SetCC.getOperand(1)))
37341 OpIdx = 0;
37342 if (OpIdx < 0)
37343 break;
37344 SetCC = SetCC.getOperand(OpIdx);
37345 truncatedToBoolWithAnd = true;
37346 } else
37347 SetCC = SetCC.getOperand(0);
37348 }
37349
37350 switch (SetCC.getOpcode()) {
37351 case X86ISD::SETCC_CARRY:
37352 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
37353 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
37354 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
37355 // truncated to i1 using 'and'.
37356 if (checkAgainstTrue && !truncatedToBoolWithAnd)
37357 break;
37358 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&((X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B
&& "Invalid use of SETCC_CARRY!") ? static_cast<void
> (0) : __assert_fail ("X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && \"Invalid use of SETCC_CARRY!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 37359, __PRETTY_FUNCTION__))
37359 "Invalid use of SETCC_CARRY!")((X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B
&& "Invalid use of SETCC_CARRY!") ? static_cast<void
> (0) : __assert_fail ("X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && \"Invalid use of SETCC_CARRY!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 37359, __PRETTY_FUNCTION__))
;
37360 LLVM_FALLTHROUGH[[gnu::fallthrough]];
37361 case X86ISD::SETCC:
37362 // Set the condition code or opposite one if necessary.
37363 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
37364 if (needOppositeCond)
37365 CC = X86::GetOppositeBranchCondition(CC);
37366 return SetCC.getOperand(1);
37367 case X86ISD::CMOV: {
37368 // Check whether false/true value has canonical one, i.e. 0 or 1.
37369 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
37370 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
37371 // Quit if true value is not a constant.
37372 if (!TVal)
37373 return SDValue();
37374 // Quit if false value is not a constant.
37375 if (!FVal) {
37376 SDValue Op = SetCC.getOperand(0);
37377 // Skip 'zext' or 'trunc' node.
37378 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
37379 Op.getOpcode() == ISD::TRUNCATE)
37380 Op = Op.getOperand(0);
37381 // A special case for rdrand/rdseed, where 0 is set if false cond is
37382 // found.
37383 if ((Op.getOpcode() != X86ISD::RDRAND &&
37384 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
37385 return SDValue();
37386 }
37387 // Quit if false value is not the constant 0 or 1.
37388 bool FValIsFalse = true;
37389 if (FVal && FVal->getZExtValue() != 0) {
37390 if (FVal->getZExtValue() != 1)
37391 return SDValue();
37392 // If FVal is 1, opposite cond is needed.
37393 needOppositeCond = !needOppositeCond;
37394 FValIsFalse = false;
37395 }
37396 // Quit if TVal is not the constant opposite of FVal.
37397 if (FValIsFalse && TVal->getZExtValue() != 1)
37398 return SDValue();
37399 if (!FValIsFalse && TVal->getZExtValue() != 0)
37400 return SDValue();
37401 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
37402 if (needOppositeCond)
37403 CC = X86::GetOppositeBranchCondition(CC);
37404 return SetCC.getOperand(3);
37405 }
37406 }
37407
37408 return SDValue();
37409}
37410
37411/// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
37412/// Match:
37413/// (X86or (X86setcc) (X86setcc))
37414/// (X86cmp (and (X86setcc) (X86setcc)), 0)
37415static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
37416 X86::CondCode &CC1, SDValue &Flags,
37417 bool &isAnd) {
37418 if (Cond->getOpcode() == X86ISD::CMP) {
37419 if (!isNullConstant(Cond->getOperand(1)))
37420 return false;
37421
37422 Cond = Cond->getOperand(0);
37423 }
37424
37425 isAnd = false;
37426
37427 SDValue SetCC0, SetCC1;
37428 switch (Cond->getOpcode()) {
37429 default: return false;
37430 case ISD::AND:
37431 case X86ISD::AND:
37432 isAnd = true;
37433 LLVM_FALLTHROUGH[[gnu::fallthrough]];
37434 case ISD::OR:
37435 case X86ISD::OR:
37436 SetCC0 = Cond->getOperand(0);
37437 SetCC1 = Cond->getOperand(1);
37438 break;
37439 };
37440
37441 // Make sure we have SETCC nodes, using the same flags value.
37442 if (SetCC0.getOpcode() != X86ISD::SETCC ||
37443 SetCC1.getOpcode() != X86ISD::SETCC ||
37444 SetCC0->getOperand(1) != SetCC1->getOperand(1))
37445 return false;
37446
37447 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
37448 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
37449 Flags = SetCC0->getOperand(1);
37450 return true;
37451}
37452
37453// When legalizing carry, we create carries via add X, -1
37454// If that comes from an actual carry, via setcc, we use the
37455// carry directly.
37456static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
37457 if (EFLAGS.getOpcode() == X86ISD::ADD) {
37458 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
37459 SDValue Carry = EFLAGS.getOperand(0);
37460 while (Carry.getOpcode() == ISD::TRUNCATE ||
37461 Carry.getOpcode() == ISD::ZERO_EXTEND ||
37462 Carry.getOpcode() == ISD::SIGN_EXTEND ||
37463 Carry.getOpcode() == ISD::ANY_EXTEND ||
37464 (Carry.getOpcode() == ISD::AND &&
37465 isOneConstant(Carry.getOperand(1))))
37466 Carry = Carry.getOperand(0);
37467 if (Carry.getOpcode() == X86ISD::SETCC ||
37468 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
37469 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
37470 uint64_t CarryCC = Carry.getConstantOperandVal(0);
37471 SDValue CarryOp1 = Carry.getOperand(1);
37472 if (CarryCC == X86::COND_B)
37473 return CarryOp1;
37474 if (CarryCC == X86::COND_A) {
37475 // Try to convert COND_A into COND_B in an attempt to facilitate
37476 // materializing "setb reg".
37477 //
37478 // Do not flip "e > c", where "c" is a constant, because Cmp
37479 // instruction cannot take an immediate as its first operand.
37480 //
37481 if (CarryOp1.getOpcode() == X86ISD::SUB &&
37482 CarryOp1.getNode()->hasOneUse() &&
37483 CarryOp1.getValueType().isInteger() &&
37484 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
37485 SDValue SubCommute =
37486 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
37487 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
37488 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
37489 }
37490 }
37491 // If this is a check of the z flag of an add with 1, switch to the
37492 // C flag.
37493 if (CarryCC == X86::COND_E &&
37494 CarryOp1.getOpcode() == X86ISD::ADD &&
37495 isOneConstant(CarryOp1.getOperand(1)))
37496 return CarryOp1;
37497 }
37498 }
37499 }
37500
37501 return SDValue();
37502}
37503
37504/// Optimize an EFLAGS definition used according to the condition code \p CC
37505/// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
37506/// uses of chain values.
37507static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
37508 SelectionDAG &DAG,
37509 const X86Subtarget &Subtarget) {
37510 if (CC == X86::COND_B)
37511 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
37512 return Flags;
37513
37514 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
37515 return R;
37516 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
37517}
37518
37519/// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
37520static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
37521 TargetLowering::DAGCombinerInfo &DCI,
37522 const X86Subtarget &Subtarget) {
37523 SDLoc DL(N);
37524
37525 SDValue FalseOp = N->getOperand(0);
37526 SDValue TrueOp = N->getOperand(1);
37527 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
37528 SDValue Cond = N->getOperand(3);
37529
37530 // cmov X, X, ?, ? --> X
37531 if (TrueOp == FalseOp)
37532 return TrueOp;
37533
37534 // Try to simplify the EFLAGS and condition code operands.
37535 // We can't always do this as FCMOV only supports a subset of X86 cond.
37536 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
37537 if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
37538 SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
37539 Flags};
37540 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
37541 }
37542 }
37543
37544 // If this is a select between two integer constants, try to do some
37545 // optimizations. Note that the operands are ordered the opposite of SELECT
37546 // operands.
37547 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
37548 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
37549 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
37550 // larger than FalseC (the false value).
37551 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
37552 CC = X86::GetOppositeBranchCondition(CC);
37553 std::swap(TrueC, FalseC);
37554 std::swap(TrueOp, FalseOp);
37555 }
37556
37557 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
37558 // This is efficient for any integer data type (including i8/i16) and
37559 // shift amount.
37560 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
37561 Cond = getSETCC(CC, Cond, DL, DAG);
37562
37563 // Zero extend the condition if needed.
37564 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
37565
37566 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
37567 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
37568 DAG.getConstant(ShAmt, DL, MVT::i8));
37569 return Cond;
37570 }
37571
37572 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
37573 // for any integer data type, including i8/i16.
37574 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
37575 Cond = getSETCC(CC, Cond, DL, DAG);
37576
37577 // Zero extend the condition if needed.
37578 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
37579 FalseC->getValueType(0), Cond);
37580 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
37581 SDValue(FalseC, 0));
37582 return Cond;
37583 }
37584
37585 // Optimize cases that will turn into an LEA instruction. This requires
37586 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
37587 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
37588 APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
37589 assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&((Diff.getBitWidth() == N->getValueType(0).getSizeInBits()
&& "Implicit constant truncation") ? static_cast<
void> (0) : __assert_fail ("Diff.getBitWidth() == N->getValueType(0).getSizeInBits() && \"Implicit constant truncation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 37590, __PRETTY_FUNCTION__))
37590 "Implicit constant truncation")((Diff.getBitWidth() == N->getValueType(0).getSizeInBits()
&& "Implicit constant truncation") ? static_cast<
void> (0) : __assert_fail ("Diff.getBitWidth() == N->getValueType(0).getSizeInBits() && \"Implicit constant truncation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 37590, __PRETTY_FUNCTION__))
;
37591
37592 bool isFastMultiplier = false;
37593 if (Diff.ult(10)) {
37594 switch (Diff.getZExtValue()) {
37595 default: break;
37596 case 1: // result = add base, cond
37597 case 2: // result = lea base( , cond*2)
37598 case 3: // result = lea base(cond, cond*2)
37599 case 4: // result = lea base( , cond*4)
37600 case 5: // result = lea base(cond, cond*4)
37601 case 8: // result = lea base( , cond*8)
37602 case 9: // result = lea base(cond, cond*8)
37603 isFastMultiplier = true;
37604 break;
37605 }
37606 }
37607
37608 if (isFastMultiplier) {
37609 Cond = getSETCC(CC, Cond, DL ,DAG);
37610 // Zero extend the condition if needed.
37611 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
37612 Cond);
37613 // Scale the condition by the difference.
37614 if (Diff != 1)
37615 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
37616 DAG.getConstant(Diff, DL, Cond.getValueType()));
37617
37618 // Add the base if non-zero.
37619 if (FalseC->getAPIntValue() != 0)
37620 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
37621 SDValue(FalseC, 0));
37622 return Cond;
37623 }
37624 }
37625 }
37626 }
37627
37628 // Handle these cases:
37629 // (select (x != c), e, c) -> select (x != c), e, x),
37630 // (select (x == c), c, e) -> select (x == c), x, e)
37631 // where the c is an integer constant, and the "select" is the combination
37632 // of CMOV and CMP.
37633 //
37634 // The rationale for this change is that the conditional-move from a constant
37635 // needs two instructions, however, conditional-move from a register needs
37636 // only one instruction.
37637 //
37638 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
37639 // some instruction-combining opportunities. This opt needs to be
37640 // postponed as late as possible.
37641 //
37642 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
37643 // the DCI.xxxx conditions are provided to postpone the optimization as
37644 // late as possible.
37645
37646 ConstantSDNode *CmpAgainst = nullptr;
37647 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
37648 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
37649 !isa<ConstantSDNode>(Cond.getOperand(0))) {
37650
37651 if (CC == X86::COND_NE &&
37652 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
37653 CC = X86::GetOppositeBranchCondition(CC);
37654 std::swap(TrueOp, FalseOp);
37655 }
37656
37657 if (CC == X86::COND_E &&
37658 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
37659 SDValue Ops[] = {FalseOp, Cond.getOperand(0),
37660 DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
37661 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
37662 }
37663 }
37664 }
37665
37666 // Fold and/or of setcc's to double CMOV:
37667 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
37668 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
37669 //
37670 // This combine lets us generate:
37671 // cmovcc1 (jcc1 if we don't have CMOV)
37672 // cmovcc2 (same)
37673 // instead of:
37674 // setcc1
37675 // setcc2
37676 // and/or
37677 // cmovne (jne if we don't have CMOV)
37678 // When we can't use the CMOV instruction, it might increase branch
37679 // mispredicts.
37680 // When we can use CMOV, or when there is no mispredict, this improves
37681 // throughput and reduces register pressure.
37682 //
37683 if (CC == X86::COND_NE) {
37684 SDValue Flags;
37685 X86::CondCode CC0, CC1;
37686 bool isAndSetCC;
37687 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
37688 if (isAndSetCC) {
37689 std::swap(FalseOp, TrueOp);
37690 CC0 = X86::GetOppositeBranchCondition(CC0);
37691 CC1 = X86::GetOppositeBranchCondition(CC1);
37692 }
37693
37694 SDValue LOps[] = {FalseOp, TrueOp,
37695 DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
37696 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
37697 SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
37698 Flags};
37699 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
37700 return CMOV;
37701 }
37702 }
37703
37704 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
37705 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
37706 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
37707 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
37708 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
37709 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
37710 SDValue Add = TrueOp;
37711 SDValue Const = FalseOp;
37712 // Canonicalize the condition code for easier matching and output.
37713 if (CC == X86::COND_E)
37714 std::swap(Add, Const);
37715
37716 // We might have replaced the constant in the cmov with the LHS of the
37717 // compare. If so change it to the RHS of the compare.
37718 if (Const == Cond.getOperand(0))
37719 Const = Cond.getOperand(1);
37720
37721 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
37722 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
37723 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
37724 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
37725 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
37726 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
37727 EVT VT = N->getValueType(0);
37728 // This should constant fold.
37729 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
37730 SDValue CMov =
37731 DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
37732 DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
37733 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
37734 }
37735 }
37736
37737 return SDValue();
37738}
37739
37740/// Different mul shrinking modes.
37741enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
37742
37743static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
37744 EVT VT = N->getOperand(0).getValueType();
37745 if (VT.getScalarSizeInBits() != 32)
37746 return false;
37747
37748 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2")((N->getNumOperands() == 2 && "NumOperands of Mul are 2"
) ? static_cast<void> (0) : __assert_fail ("N->getNumOperands() == 2 && \"NumOperands of Mul are 2\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 37748, __PRETTY_FUNCTION__))
;
37749 unsigned SignBits[2] = {1, 1};
37750 bool IsPositive[2] = {false, false};
37751 for (unsigned i = 0; i < 2; i++) {
37752 SDValue Opd = N->getOperand(i);
37753
37754 SignBits[i] = DAG.ComputeNumSignBits(Opd);
37755 IsPositive[i] = DAG.SignBitIsZero(Opd);
37756 }
37757
37758 bool AllPositive = IsPositive[0] && IsPositive[1];
37759 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
37760 // When ranges are from -128 ~ 127, use MULS8 mode.
37761 if (MinSignBits >= 25)
37762 Mode = MULS8;
37763 // When ranges are from 0 ~ 255, use MULU8 mode.
37764 else if (AllPositive && MinSignBits >= 24)
37765 Mode = MULU8;
37766 // When ranges are from -32768 ~ 32767, use MULS16 mode.
37767 else if (MinSignBits >= 17)
37768 Mode = MULS16;
37769 // When ranges are from 0 ~ 65535, use MULU16 mode.
37770 else if (AllPositive && MinSignBits >= 16)
37771 Mode = MULU16;
37772 else
37773 return false;
37774 return true;
37775}
37776
37777/// When the operands of vector mul are extended from smaller size values,
37778/// like i8 and i16, the type of mul may be shrinked to generate more
37779/// efficient code. Two typical patterns are handled:
37780/// Pattern1:
37781/// %2 = sext/zext <N x i8> %1 to <N x i32>
37782/// %4 = sext/zext <N x i8> %3 to <N x i32>
37783// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
37784/// %5 = mul <N x i32> %2, %4
37785///
37786/// Pattern2:
37787/// %2 = zext/sext <N x i16> %1 to <N x i32>
37788/// %4 = zext/sext <N x i16> %3 to <N x i32>
37789/// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
37790/// %5 = mul <N x i32> %2, %4
37791///
37792/// There are four mul shrinking modes:
37793/// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
37794/// -128 to 128, and the scalar value range of %4 is also -128 to 128,
37795/// generate pmullw+sext32 for it (MULS8 mode).
37796/// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
37797/// 0 to 255, and the scalar value range of %4 is also 0 to 255,
37798/// generate pmullw+zext32 for it (MULU8 mode).
37799/// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
37800/// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
37801/// generate pmullw+pmulhw for it (MULS16 mode).
37802/// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
37803/// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
37804/// generate pmullw+pmulhuw for it (MULU16 mode).
37805static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
37806 const X86Subtarget &Subtarget) {
37807 // Check for legality
37808 // pmullw/pmulhw are not supported by SSE.
37809 if (!Subtarget.hasSSE2())
37810 return SDValue();
37811
37812 // Check for profitability
37813 // pmulld is supported since SSE41. It is better to use pmulld
37814 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
37815 // the expansion.
37816 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
37817 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
37818 return SDValue();
37819
37820 ShrinkMode Mode;
37821 if (!canReduceVMulWidth(N, DAG, Mode))
37822 return SDValue();
37823
37824 SDLoc DL(N);
37825 SDValue N0 = N->getOperand(0);
37826 SDValue N1 = N->getOperand(1);
37827 EVT VT = N->getOperand(0).getValueType();
37828 unsigned NumElts = VT.getVectorNumElements();
37829 if ((NumElts % 2) != 0)
37830 return SDValue();
37831
37832 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
37833
37834 // Shrink the operands of mul.
37835 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
37836 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
37837
37838 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
37839 // lower part is needed.
37840 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
37841 if (Mode == MULU8 || Mode == MULS8)
37842 return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND,
37843 DL, VT, MulLo);
37844
37845 MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2);
37846 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
37847 // the higher part is also needed.
37848 SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL,
37849 ReducedVT, NewN0, NewN1);
37850
37851 // Repack the lower part and higher part result of mul into a wider
37852 // result.
37853 // Generate shuffle functioning as punpcklwd.
37854 SmallVector<int, 16> ShuffleMask(NumElts);
37855 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
37856 ShuffleMask[2 * i] = i;
37857 ShuffleMask[2 * i + 1] = i + NumElts;
37858 }
37859 SDValue ResLo =
37860 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
37861 ResLo = DAG.getBitcast(ResVT, ResLo);
37862 // Generate shuffle functioning as punpckhwd.
37863 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
37864 ShuffleMask[2 * i] = i + NumElts / 2;
37865 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
37866 }
37867 SDValue ResHi =
37868 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
37869 ResHi = DAG.getBitcast(ResVT, ResHi);
37870 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
37871}
37872
37873static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
37874 EVT VT, const SDLoc &DL) {
37875
37876 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
37877 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37878 DAG.getConstant(Mult, DL, VT));
37879 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
37880 DAG.getConstant(Shift, DL, MVT::i8));
37881 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
37882 N->getOperand(0));
37883 return Result;
37884 };
37885
37886 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
37887 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
37888 DAG.getConstant(Mul1, DL, VT));
37889 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
37890 DAG.getConstant(Mul2, DL, VT));
37891 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
37892 N->getOperand(0));
37893 return Result;
37894 };
37895
37896 switch (MulAmt) {
37897 default:
37898 break;
37899 case 11:
37900 // mul x, 11 => add ((shl (mul x, 5), 1), x)
37901 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
37902 case 21:
37903 // mul x, 21 => add ((shl (mul x, 5), 2), x)
37904 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
37905 case 41:
37906 // mul x, 41 => add ((shl (mul x, 5), 3), x)
37907 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
37908 case 22:
37909 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
37910 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
37911 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
37912 case 19:
37913 // mul x, 19 => add ((shl (mul x, 9), 1), x)
37914 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
37915 case 37:
37916 // mul x, 37 => add ((shl (mul x, 9), 2), x)
37917 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
37918 case 73:
37919 // mul x, 73 => add ((shl (mul x, 9), 3), x)
37920 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
37921 case 13:
37922 // mul x, 13 => add ((shl (mul x, 3), 2), x)
37923 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
37924 case 23:
37925 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
37926 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
37927 case 26:
37928 // mul x, 26 => add ((mul (mul x, 5), 5), x)
37929 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
37930 case 28:
37931 // mul x, 28 => add ((mul (mul x, 9), 3), x)
37932 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
37933 case 29:
37934 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
37935 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
37936 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
37937 }
37938
37939 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
37940 // by a single LEA.
37941 // First check if this a sum of two power of 2s because that's easy. Then
37942 // count how many zeros are up to the first bit.
37943 // TODO: We can do this even without LEA at a cost of two shifts and an add.
37944 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
37945 unsigned ScaleShift = countTrailingZeros(MulAmt);
37946 if (ScaleShift >= 1 && ScaleShift < 4) {
37947 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
37948 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37949 DAG.getConstant(ShiftAmt, DL, MVT::i8));
37950 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
37951 DAG.getConstant(ScaleShift, DL, MVT::i8));
37952 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
37953 }
37954 }
37955
37956 return SDValue();
37957}
37958
37959// If the upper 17 bits of each element are zero then we can use PMADDWD,
37960// which is always at least as quick as PMULLD, except on KNL.
37961static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
37962 const X86Subtarget &Subtarget) {
37963 if (!Subtarget.hasSSE2())
37964 return SDValue();
37965
37966 if (Subtarget.isPMADDWDSlow())
37967 return SDValue();
37968
37969 EVT VT = N->getValueType(0);
37970
37971 // Only support vXi32 vectors.
37972 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
37973 return SDValue();
37974
37975 // Make sure the vXi16 type is legal. This covers the AVX512 without BWI case.
37976 // Also allow v2i32 if it will be widened.
37977 MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
37978 if (VT != MVT::v2i32 && !DAG.getTargetLoweringInfo().isTypeLegal(WVT))
37979 return SDValue();
37980
37981 SDValue N0 = N->getOperand(0);
37982 SDValue N1 = N->getOperand(1);
37983
37984 // If we are zero extending two steps without SSE4.1, its better to reduce
37985 // the vmul width instead.
37986 if (!Subtarget.hasSSE41() &&
37987 (N0.getOpcode() == ISD::ZERO_EXTEND &&
37988 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
37989 (N1.getOpcode() == ISD::ZERO_EXTEND &&
37990 N1.getOperand(0).getScalarValueSizeInBits() <= 8))
37991 return SDValue();
37992
37993 APInt Mask17 = APInt::getHighBitsSet(32, 17);
37994 if (!DAG.MaskedValueIsZero(N1, Mask17) ||
37995 !DAG.MaskedValueIsZero(N0, Mask17))
37996 return SDValue();
37997
37998 // Use SplitOpsAndApply to handle AVX splitting.
37999 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
38000 ArrayRef<SDValue> Ops) {
38001 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
38002 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
38003 };
38004 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
38005 { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
38006 PMADDWDBuilder);
38007}
38008
38009static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
38010 const X86Subtarget &Subtarget) {
38011 if (!Subtarget.hasSSE2())
38012 return SDValue();
38013
38014 EVT VT = N->getValueType(0);
38015
38016 // Only support vXi64 vectors.
38017 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
38018 VT.getVectorNumElements() < 2 ||
38019 !isPowerOf2_32(VT.getVectorNumElements()))
38020 return SDValue();
38021
38022 SDValue N0 = N->getOperand(0);
38023 SDValue N1 = N->getOperand(1);
38024
38025 // MULDQ returns the 64-bit result of the signed multiplication of the lower
38026 // 32-bits. We can lower with this if the sign bits stretch that far.
38027 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
38028 DAG.ComputeNumSignBits(N1) > 32) {
38029 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
38030 ArrayRef<SDValue> Ops) {
38031 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
38032 };
38033 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
38034 PMULDQBuilder, /*CheckBWI*/false);
38035 }
38036
38037 // If the upper bits are zero we can use a single pmuludq.
38038 APInt Mask = APInt::getHighBitsSet(64, 32);
38039 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
38040 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
38041 ArrayRef<SDValue> Ops) {
38042 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
38043 };
38044 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
38045 PMULUDQBuilder, /*CheckBWI*/false);
38046 }
38047
38048 return SDValue();
38049}
38050
38051/// Optimize a single multiply with constant into two operations in order to
38052/// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
38053static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
38054 TargetLowering::DAGCombinerInfo &DCI,
38055 const X86Subtarget &Subtarget) {
38056 EVT VT = N->getValueType(0);
38057
38058 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
38059 return V;
38060
38061 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
38062 return V;
38063
38064 if (DCI.isBeforeLegalize() && VT.isVector())
38065 return reduceVMULWidth(N, DAG, Subtarget);
38066
38067 if (!MulConstantOptimization)
38068 return SDValue();
38069 // An imul is usually smaller than the alternative sequence.
38070 if (DAG.getMachineFunction().getFunction().hasMinSize())
38071 return SDValue();
38072
38073 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
38074 return SDValue();
38075
38076 if (VT != MVT::i64 && VT != MVT::i32)
38077 return SDValue();
38078
38079 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
38080 if (!C)
38081 return SDValue();
38082 if (isPowerOf2_64(C->getZExtValue()))
38083 return SDValue();
38084
38085 int64_t SignMulAmt = C->getSExtValue();
38086 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!")((SignMulAmt != (-9223372036854775807L -1) && "Int min should have been handled!"
) ? static_cast<void> (0) : __assert_fail ("SignMulAmt != INT64_MIN && \"Int min should have been handled!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38086, __PRETTY_FUNCTION__))
;
38087 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
38088
38089 SDLoc DL(N);
38090 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
38091 SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
38092 DAG.getConstant(AbsMulAmt, DL, VT));
38093 if (SignMulAmt < 0)
38094 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
38095 NewMul);
38096
38097 return NewMul;
38098 }
38099
38100 uint64_t MulAmt1 = 0;
38101 uint64_t MulAmt2 = 0;
38102 if ((AbsMulAmt % 9) == 0) {
38103 MulAmt1 = 9;
38104 MulAmt2 = AbsMulAmt / 9;
38105 } else if ((AbsMulAmt % 5) == 0) {
38106 MulAmt1 = 5;
38107 MulAmt2 = AbsMulAmt / 5;
38108 } else if ((AbsMulAmt % 3) == 0) {
38109 MulAmt1 = 3;
38110 MulAmt2 = AbsMulAmt / 3;
38111 }
38112
38113 SDValue NewMul;
38114 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
38115 if (MulAmt2 &&
38116 (isPowerOf2_64(MulAmt2) ||
38117 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
38118
38119 if (isPowerOf2_64(MulAmt2) &&
38120 !(SignMulAmt >= 0 && N->hasOneUse() &&
38121 N->use_begin()->getOpcode() == ISD::ADD))
38122 // If second multiplifer is pow2, issue it first. We want the multiply by
38123 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
38124 // is an add. Only do this for positive multiply amounts since the
38125 // negate would prevent it from being used as an address mode anyway.
38126 std::swap(MulAmt1, MulAmt2);
38127
38128 if (isPowerOf2_64(MulAmt1))
38129 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38130 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
38131 else
38132 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
38133 DAG.getConstant(MulAmt1, DL, VT));
38134
38135 if (isPowerOf2_64(MulAmt2))
38136 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
38137 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
38138 else
38139 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
38140 DAG.getConstant(MulAmt2, DL, VT));
38141
38142 // Negate the result.
38143 if (SignMulAmt < 0)
38144 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
38145 NewMul);
38146 } else if (!Subtarget.slowLEA())
38147 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
38148
38149 if (!NewMul) {
38150 assert(C->getZExtValue() != 0 &&((C->getZExtValue() != 0 && C->getZExtValue() !=
(VT == MVT::i64 ? (18446744073709551615UL) : (4294967295U)) &&
"Both cases that could cause potential overflows should have "
"already been handled.") ? static_cast<void> (0) : __assert_fail
("C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) && \"Both cases that could cause potential overflows should have \" \"already been handled.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38153, __PRETTY_FUNCTION__))
38151 C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&((C->getZExtValue() != 0 && C->getZExtValue() !=
(VT == MVT::i64 ? (18446744073709551615UL) : (4294967295U)) &&
"Both cases that could cause potential overflows should have "
"already been handled.") ? static_cast<void> (0) : __assert_fail
("C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) && \"Both cases that could cause potential overflows should have \" \"already been handled.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38153, __PRETTY_FUNCTION__))
38152 "Both cases that could cause potential overflows should have "((C->getZExtValue() != 0 && C->getZExtValue() !=
(VT == MVT::i64 ? (18446744073709551615UL) : (4294967295U)) &&
"Both cases that could cause potential overflows should have "
"already been handled.") ? static_cast<void> (0) : __assert_fail
("C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) && \"Both cases that could cause potential overflows should have \" \"already been handled.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38153, __PRETTY_FUNCTION__))
38153 "already been handled.")((C->getZExtValue() != 0 && C->getZExtValue() !=
(VT == MVT::i64 ? (18446744073709551615UL) : (4294967295U)) &&
"Both cases that could cause potential overflows should have "
"already been handled.") ? static_cast<void> (0) : __assert_fail
("C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) && \"Both cases that could cause potential overflows should have \" \"already been handled.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38153, __PRETTY_FUNCTION__))
;
38154 if (isPowerOf2_64(AbsMulAmt - 1)) {
38155 // (mul x, 2^N + 1) => (add (shl x, N), x)
38156 NewMul = DAG.getNode(
38157 ISD::ADD, DL, VT, N->getOperand(0),
38158 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38159 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
38160 MVT::i8)));
38161 // To negate, subtract the number from zero
38162 if (SignMulAmt < 0)
38163 NewMul = DAG.getNode(ISD::SUB, DL, VT,
38164 DAG.getConstant(0, DL, VT), NewMul);
38165 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
38166 // (mul x, 2^N - 1) => (sub (shl x, N), x)
38167 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38168 DAG.getConstant(Log2_64(AbsMulAmt + 1),
38169 DL, MVT::i8));
38170 // To negate, reverse the operands of the subtract.
38171 if (SignMulAmt < 0)
38172 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
38173 else
38174 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
38175 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
38176 // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
38177 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38178 DAG.getConstant(Log2_64(AbsMulAmt - 2),
38179 DL, MVT::i8));
38180 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
38181 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
38182 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
38183 // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
38184 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
38185 DAG.getConstant(Log2_64(AbsMulAmt + 2),
38186 DL, MVT::i8));
38187 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
38188 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
38189 }
38190 }
38191
38192 return NewMul;
38193}
38194
38195static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
38196 SDValue N0 = N->getOperand(0);
38197 SDValue N1 = N->getOperand(1);
38198 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
38199 EVT VT = N0.getValueType();
38200
38201 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
38202 // since the result of setcc_c is all zero's or all ones.
38203 if (VT.isInteger() && !VT.isVector() &&
38204 N1C && N0.getOpcode() == ISD::AND &&
38205 N0.getOperand(1).getOpcode() == ISD::Constant) {
38206 SDValue N00 = N0.getOperand(0);
38207 APInt Mask = N0.getConstantOperandAPInt(1);
38208 Mask <<= N1C->getAPIntValue();
38209 bool MaskOK = false;
38210 // We can handle cases concerning bit-widening nodes containing setcc_c if
38211 // we carefully interrogate the mask to make sure we are semantics
38212 // preserving.
38213 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
38214 // of the underlying setcc_c operation if the setcc_c was zero extended.
38215 // Consider the following example:
38216 // zext(setcc_c) -> i32 0x0000FFFF
38217 // c1 -> i32 0x0000FFFF
38218 // c2 -> i32 0x00000001
38219 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
38220 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
38221 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
38222 MaskOK = true;
38223 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
38224 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
38225 MaskOK = true;
38226 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
38227 N00.getOpcode() == ISD::ANY_EXTEND) &&
38228 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
38229 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
38230 }
38231 if (MaskOK && Mask != 0) {
38232 SDLoc DL(N);
38233 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
38234 }
38235 }
38236
38237 // Hardware support for vector shifts is sparse which makes us scalarize the
38238 // vector operations in many cases. Also, on sandybridge ADD is faster than
38239 // shl.
38240 // (shl V, 1) -> add V,V
38241 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
38242 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
38243 assert(N0.getValueType().isVector() && "Invalid vector shift type")((N0.getValueType().isVector() && "Invalid vector shift type"
) ? static_cast<void> (0) : __assert_fail ("N0.getValueType().isVector() && \"Invalid vector shift type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38243, __PRETTY_FUNCTION__))
;
38244 // We shift all of the values by one. In many cases we do not have
38245 // hardware support for this operation. This is better expressed as an ADD
38246 // of two values.
38247 if (N1SplatC->getAPIntValue() == 1)
38248 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
38249 }
38250
38251 return SDValue();
38252}
38253
38254static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) {
38255 SDValue N0 = N->getOperand(0);
38256 SDValue N1 = N->getOperand(1);
38257 EVT VT = N0.getValueType();
38258 unsigned Size = VT.getSizeInBits();
38259
38260 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
38261 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
38262 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
38263 // depending on sign of (SarConst - [56,48,32,24,16])
38264
38265 // sexts in X86 are MOVs. The MOVs have the same code size
38266 // as above SHIFTs (only SHIFT on 1 has lower code size).
38267 // However the MOVs have 2 advantages to a SHIFT:
38268 // 1. MOVs can write to a register that differs from source
38269 // 2. MOVs accept memory operands
38270
38271 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
38272 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
38273 N0.getOperand(1).getOpcode() != ISD::Constant)
38274 return SDValue();
38275
38276 SDValue N00 = N0.getOperand(0);
38277 SDValue N01 = N0.getOperand(1);
38278 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
38279 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
38280 EVT CVT = N1.getValueType();
38281
38282 if (SarConst.isNegative())
38283 return SDValue();
38284
38285 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
38286 unsigned ShiftSize = SVT.getSizeInBits();
38287 // skipping types without corresponding sext/zext and
38288 // ShlConst that is not one of [56,48,32,24,16]
38289 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
38290 continue;
38291 SDLoc DL(N);
38292 SDValue NN =
38293 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
38294 SarConst = SarConst - (Size - ShiftSize);
38295 if (SarConst == 0)
38296 return NN;
38297 else if (SarConst.isNegative())
38298 return DAG.getNode(ISD::SHL, DL, VT, NN,
38299 DAG.getConstant(-SarConst, DL, CVT));
38300 else
38301 return DAG.getNode(ISD::SRA, DL, VT, NN,
38302 DAG.getConstant(SarConst, DL, CVT));
38303 }
38304 return SDValue();
38305}
38306
38307static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
38308 TargetLowering::DAGCombinerInfo &DCI) {
38309 SDValue N0 = N->getOperand(0);
38310 SDValue N1 = N->getOperand(1);
38311 EVT VT = N0.getValueType();
38312
38313 // Only do this on the last DAG combine as it can interfere with other
38314 // combines.
38315 if (!DCI.isAfterLegalizeDAG())
38316 return SDValue();
38317
38318 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
38319 // TODO: This is a generic DAG combine that became an x86-only combine to
38320 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
38321 // and-not ('andn').
38322 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
38323 return SDValue();
38324
38325 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
38326 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
38327 if (!ShiftC || !AndC)
38328 return SDValue();
38329
38330 // If we can shrink the constant mask below 8-bits or 32-bits, then this
38331 // transform should reduce code size. It may also enable secondary transforms
38332 // from improved known-bits analysis or instruction selection.
38333 APInt MaskVal = AndC->getAPIntValue();
38334
38335 // If this can be matched by a zero extend, don't optimize.
38336 if (MaskVal.isMask()) {
38337 unsigned TO = MaskVal.countTrailingOnes();
38338 if (TO >= 8 && isPowerOf2_32(TO))
38339 return SDValue();
38340 }
38341
38342 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
38343 unsigned OldMaskSize = MaskVal.getMinSignedBits();
38344 unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
38345 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
38346 (OldMaskSize > 32 && NewMaskSize <= 32)) {
38347 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
38348 SDLoc DL(N);
38349 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
38350 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
38351 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
38352 }
38353 return SDValue();
38354}
38355
38356static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
38357 TargetLowering::DAGCombinerInfo &DCI,
38358 const X86Subtarget &Subtarget) {
38359 unsigned Opcode = N->getOpcode();
38360 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&(((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
"Unexpected shift opcode") ? static_cast<void> (0) : __assert_fail
("(X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) && \"Unexpected shift opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38361, __PRETTY_FUNCTION__))
38361 "Unexpected shift opcode")(((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
"Unexpected shift opcode") ? static_cast<void> (0) : __assert_fail
("(X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) && \"Unexpected shift opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38361, __PRETTY_FUNCTION__))
;
38362
38363 EVT VT = N->getValueType(0);
38364 SDValue N0 = N->getOperand(0);
38365 SDValue N1 = N->getOperand(1);
38366 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
38367 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
38368 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&((N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1
.getScalarValueSizeInBits() == SrcBitsPerElt && "Unexpected PACKSS/PACKUS input type"
) ? static_cast<void> (0) : __assert_fail ("N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1.getScalarValueSizeInBits() == SrcBitsPerElt && \"Unexpected PACKSS/PACKUS input type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38370, __PRETTY_FUNCTION__))
38369 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&((N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1
.getScalarValueSizeInBits() == SrcBitsPerElt && "Unexpected PACKSS/PACKUS input type"
) ? static_cast<void> (0) : __assert_fail ("N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1.getScalarValueSizeInBits() == SrcBitsPerElt && \"Unexpected PACKSS/PACKUS input type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38370, __PRETTY_FUNCTION__))
38370 "Unexpected PACKSS/PACKUS input type")((N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1
.getScalarValueSizeInBits() == SrcBitsPerElt && "Unexpected PACKSS/PACKUS input type"
) ? static_cast<void> (0) : __assert_fail ("N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1.getScalarValueSizeInBits() == SrcBitsPerElt && \"Unexpected PACKSS/PACKUS input type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38370, __PRETTY_FUNCTION__))
;
38371
38372 bool IsSigned = (X86ISD::PACKSS == Opcode);
38373
38374 // Constant Folding.
38375 APInt UndefElts0, UndefElts1;
38376 SmallVector<APInt, 32> EltBits0, EltBits1;
38377 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
38378 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
38379 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
38380 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
38381 unsigned NumLanes = VT.getSizeInBits() / 128;
38382 unsigned NumDstElts = VT.getVectorNumElements();
38383 unsigned NumSrcElts = NumDstElts / 2;
38384 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
38385 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
38386
38387 APInt Undefs(NumDstElts, 0);
38388 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
38389 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
38390 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
38391 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
38392 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
38393 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
38394
38395 if (UndefElts[SrcIdx]) {
38396 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
38397 continue;
38398 }
38399
38400 APInt &Val = EltBits[SrcIdx];
38401 if (IsSigned) {
38402 // PACKSS: Truncate signed value with signed saturation.
38403 // Source values less than dst minint are saturated to minint.
38404 // Source values greater than dst maxint are saturated to maxint.
38405 if (Val.isSignedIntN(DstBitsPerElt))
38406 Val = Val.trunc(DstBitsPerElt);
38407 else if (Val.isNegative())
38408 Val = APInt::getSignedMinValue(DstBitsPerElt);
38409 else
38410 Val = APInt::getSignedMaxValue(DstBitsPerElt);
38411 } else {
38412 // PACKUS: Truncate signed value with unsigned saturation.
38413 // Source values less than zero are saturated to zero.
38414 // Source values greater than dst maxuint are saturated to maxuint.
38415 if (Val.isIntN(DstBitsPerElt))
38416 Val = Val.trunc(DstBitsPerElt);
38417 else if (Val.isNegative())
38418 Val = APInt::getNullValue(DstBitsPerElt);
38419 else
38420 Val = APInt::getAllOnesValue(DstBitsPerElt);
38421 }
38422 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
38423 }
38424 }
38425
38426 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
38427 }
38428
38429 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
38430 // truncate to create a larger truncate.
38431 if (Subtarget.hasAVX512() &&
38432 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
38433 N0.getOperand(0).getValueType() == MVT::v8i32) {
38434 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
38435 (!IsSigned &&
38436 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
38437 if (Subtarget.hasVLX())
38438 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
38439
38440 // Widen input to v16i32 so we can truncate that.
38441 SDLoc dl(N);
38442 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
38443 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
38444 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
38445 }
38446 }
38447
38448 // Attempt to combine as shuffle.
38449 SDValue Op(N, 0);
38450 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38451 return Res;
38452
38453 return SDValue();
38454}
38455
38456static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
38457 TargetLowering::DAGCombinerInfo &DCI,
38458 const X86Subtarget &Subtarget) {
38459 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||(((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->
getOpcode() || X86ISD::VSRL == N->getOpcode()) && "Unexpected shift opcode"
) ? static_cast<void> (0) : __assert_fail ("(X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() || X86ISD::VSRL == N->getOpcode()) && \"Unexpected shift opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38461, __PRETTY_FUNCTION__))
38460 X86ISD::VSRL == N->getOpcode()) &&(((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->
getOpcode() || X86ISD::VSRL == N->getOpcode()) && "Unexpected shift opcode"
) ? static_cast<void> (0) : __assert_fail ("(X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() || X86ISD::VSRL == N->getOpcode()) && \"Unexpected shift opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38461, __PRETTY_FUNCTION__))
38461 "Unexpected shift opcode")(((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->
getOpcode() || X86ISD::VSRL == N->getOpcode()) && "Unexpected shift opcode"
) ? static_cast<void> (0) : __assert_fail ("(X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() || X86ISD::VSRL == N->getOpcode()) && \"Unexpected shift opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38461, __PRETTY_FUNCTION__))
;
38462 EVT VT = N->getValueType(0);
38463 SDValue N0 = N->getOperand(0);
38464 SDValue N1 = N->getOperand(1);
38465
38466 // Shift zero -> zero.
38467 if (ISD::isBuildVectorAllZeros(N0.getNode()))
38468 return DAG.getConstant(0, SDLoc(N), VT);
38469
38470 // Detect constant shift amounts.
38471 APInt UndefElts;
38472 SmallVector<APInt, 32> EltBits;
38473 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
38474 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
38475 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
38476 EltBits[0].getZExtValue(), DAG);
38477 }
38478
38479 APInt KnownUndef, KnownZero;
38480 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38481 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
38482 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
38483 KnownZero, DCI))
38484 return SDValue(N, 0);
38485
38486 return SDValue();
38487}
38488
38489static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
38490 TargetLowering::DAGCombinerInfo &DCI,
38491 const X86Subtarget &Subtarget) {
38492 unsigned Opcode = N->getOpcode();
38493 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||(((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD
::VSRLI == Opcode) && "Unexpected shift opcode") ? static_cast
<void> (0) : __assert_fail ("(X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD::VSRLI == Opcode) && \"Unexpected shift opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38495, __PRETTY_FUNCTION__))
38494 X86ISD::VSRLI == Opcode) &&(((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD
::VSRLI == Opcode) && "Unexpected shift opcode") ? static_cast
<void> (0) : __assert_fail ("(X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD::VSRLI == Opcode) && \"Unexpected shift opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38495, __PRETTY_FUNCTION__))
38495 "Unexpected shift opcode")(((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD
::VSRLI == Opcode) && "Unexpected shift opcode") ? static_cast
<void> (0) : __assert_fail ("(X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD::VSRLI == Opcode) && \"Unexpected shift opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38495, __PRETTY_FUNCTION__))
;
38496 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
38497 EVT VT = N->getValueType(0);
38498 SDValue N0 = N->getOperand(0);
38499 SDValue N1 = N->getOperand(1);
38500 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
38501 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&((VT == N0.getValueType() && (NumBitsPerElt % 8) == 0
&& "Unexpected value type") ? static_cast<void>
(0) : __assert_fail ("VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38502, __PRETTY_FUNCTION__))
38502 "Unexpected value type")((VT == N0.getValueType() && (NumBitsPerElt % 8) == 0
&& "Unexpected value type") ? static_cast<void>
(0) : __assert_fail ("VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 && \"Unexpected value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38502, __PRETTY_FUNCTION__))
;
38503 assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type")((N1.getValueType() == MVT::i8 && "Unexpected shift amount type"
) ? static_cast<void> (0) : __assert_fail ("N1.getValueType() == MVT::i8 && \"Unexpected shift amount type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38503, __PRETTY_FUNCTION__))
;
38504
38505 // Out of range logical bit shifts are guaranteed to be zero.
38506 // Out of range arithmetic bit shifts splat the sign bit.
38507 unsigned ShiftVal = cast<ConstantSDNode>(N1)->getZExtValue();
38508 if (ShiftVal >= NumBitsPerElt) {
38509 if (LogicalShift)
38510 return DAG.getConstant(0, SDLoc(N), VT);
38511 else
38512 ShiftVal = NumBitsPerElt - 1;
38513 }
38514
38515 // Shift N0 by zero -> N0.
38516 if (!ShiftVal)
38517 return N0;
38518
38519 // Shift zero -> zero.
38520 if (ISD::isBuildVectorAllZeros(N0.getNode()))
38521 return DAG.getConstant(0, SDLoc(N), VT);
38522
38523 // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2)
38524 // clamped to (NumBitsPerElt - 1).
38525 if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) {
38526 unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
38527 unsigned NewShiftVal = ShiftVal + ShiftVal2;
38528 if (NewShiftVal >= NumBitsPerElt)
38529 NewShiftVal = NumBitsPerElt - 1;
38530 return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0),
38531 DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
38532 }
38533
38534 // We can decode 'whole byte' logical bit shifts as shuffles.
38535 if (LogicalShift && (ShiftVal % 8) == 0) {
38536 SDValue Op(N, 0);
38537 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38538 return Res;
38539 }
38540
38541 // Constant Folding.
38542 APInt UndefElts;
38543 SmallVector<APInt, 32> EltBits;
38544 if (N->isOnlyUserOf(N0.getNode()) &&
38545 getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
38546 assert(EltBits.size() == VT.getVectorNumElements() &&((EltBits.size() == VT.getVectorNumElements() && "Unexpected shift value type"
) ? static_cast<void> (0) : __assert_fail ("EltBits.size() == VT.getVectorNumElements() && \"Unexpected shift value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38547, __PRETTY_FUNCTION__))
38547 "Unexpected shift value type")((EltBits.size() == VT.getVectorNumElements() && "Unexpected shift value type"
) ? static_cast<void> (0) : __assert_fail ("EltBits.size() == VT.getVectorNumElements() && \"Unexpected shift value type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38547, __PRETTY_FUNCTION__))
;
38548 for (APInt &Elt : EltBits) {
38549 if (X86ISD::VSHLI == Opcode)
38550 Elt <<= ShiftVal;
38551 else if (X86ISD::VSRAI == Opcode)
38552 Elt.ashrInPlace(ShiftVal);
38553 else
38554 Elt.lshrInPlace(ShiftVal);
38555 }
38556 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
38557 }
38558
38559 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38560 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
38561 APInt::getAllOnesValue(NumBitsPerElt), DCI))
38562 return SDValue(N, 0);
38563
38564 return SDValue();
38565}
38566
38567static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
38568 TargetLowering::DAGCombinerInfo &DCI,
38569 const X86Subtarget &Subtarget) {
38570 EVT VT = N->getValueType(0);
38571 assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||((((N->getOpcode() == X86ISD::PINSRB && VT == MVT::
v16i8) || (N->getOpcode() == X86ISD::PINSRW && VT ==
MVT::v8i16)) && "Unexpected vector insertion") ? static_cast
<void> (0) : __assert_fail ("((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) && \"Unexpected vector insertion\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38573, __PRETTY_FUNCTION__))
38572 (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) &&((((N->getOpcode() == X86ISD::PINSRB && VT == MVT::
v16i8) || (N->getOpcode() == X86ISD::PINSRW && VT ==
MVT::v8i16)) && "Unexpected vector insertion") ? static_cast
<void> (0) : __assert_fail ("((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) && \"Unexpected vector insertion\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38573, __PRETTY_FUNCTION__))
38573 "Unexpected vector insertion")((((N->getOpcode() == X86ISD::PINSRB && VT == MVT::
v16i8) || (N->getOpcode() == X86ISD::PINSRW && VT ==
MVT::v8i16)) && "Unexpected vector insertion") ? static_cast
<void> (0) : __assert_fail ("((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16)) && \"Unexpected vector insertion\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38573, __PRETTY_FUNCTION__))
;
38574
38575 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
38576 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38577 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
38578 APInt::getAllOnesValue(NumBitsPerElt), DCI))
38579 return SDValue(N, 0);
38580
38581 // Attempt to combine PINSRB/PINSRW patterns to a shuffle.
38582 SDValue Op(N, 0);
38583 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
38584 return Res;
38585
38586 return SDValue();
38587}
38588
38589/// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
38590/// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
38591/// OR -> CMPNEQSS.
38592static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
38593 TargetLowering::DAGCombinerInfo &DCI,
38594 const X86Subtarget &Subtarget) {
38595 unsigned opcode;
38596
38597 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
38598 // we're requiring SSE2 for both.
38599 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
38600 SDValue N0 = N->getOperand(0);
38601 SDValue N1 = N->getOperand(1);
38602 SDValue CMP0 = N0.getOperand(1);
38603 SDValue CMP1 = N1.getOperand(1);
38604 SDLoc DL(N);
38605
38606 // The SETCCs should both refer to the same CMP.
38607 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
38608 return SDValue();
38609
38610 SDValue CMP00 = CMP0->getOperand(0);
38611 SDValue CMP01 = CMP0->getOperand(1);
38612 EVT VT = CMP00.getValueType();
38613
38614 if (VT == MVT::f32 || VT == MVT::f64) {
38615 bool ExpectingFlags = false;
38616 // Check for any users that want flags:
38617 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
38618 !ExpectingFlags && UI != UE; ++UI)
38619 switch (UI->getOpcode()) {
38620 default:
38621 case ISD::BR_CC:
38622 case ISD::BRCOND:
38623 case ISD::SELECT:
38624 ExpectingFlags = true;
38625 break;
38626 case ISD::CopyToReg:
38627 case ISD::SIGN_EXTEND:
38628 case ISD::ZERO_EXTEND:
38629 case ISD::ANY_EXTEND:
38630 break;
38631 }
38632
38633 if (!ExpectingFlags) {
38634 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
38635 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
38636
38637 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
38638 X86::CondCode tmp = cc0;
38639 cc0 = cc1;
38640 cc1 = tmp;
38641 }
38642
38643 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
38644 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
38645 // FIXME: need symbolic constants for these magic numbers.
38646 // See X86ATTInstPrinter.cpp:printSSECC().
38647 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
38648 if (Subtarget.hasAVX512()) {
38649 SDValue FSetCC =
38650 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
38651 DAG.getTargetConstant(x86cc, DL, MVT::i8));
38652 // Need to fill with zeros to ensure the bitcast will produce zeroes
38653 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
38654 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
38655 DAG.getConstant(0, DL, MVT::v16i1),
38656 FSetCC, DAG.getIntPtrConstant(0, DL));
38657 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
38658 N->getSimpleValueType(0));
38659 }
38660 SDValue OnesOrZeroesF =
38661 DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
38662 CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
38663
38664 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
38665 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
38666
38667 if (is64BitFP && !Subtarget.is64Bit()) {
38668 // On a 32-bit target, we cannot bitcast the 64-bit float to a
38669 // 64-bit integer, since that's not a legal type. Since
38670 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
38671 // bits, but can do this little dance to extract the lowest 32 bits
38672 // and work with those going forward.
38673 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
38674 OnesOrZeroesF);
38675 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
38676 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
38677 Vector32, DAG.getIntPtrConstant(0, DL));
38678 IntVT = MVT::i32;
38679 }
38680
38681 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
38682 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
38683 DAG.getConstant(1, DL, IntVT));
38684 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
38685 ANDed);
38686 return OneBitOfTruth;
38687 }
38688 }
38689 }
38690 }
38691 return SDValue();
38692}
38693
38694/// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
38695static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
38696 assert(N->getOpcode() == ISD::AND)((N->getOpcode() == ISD::AND) ? static_cast<void> (0
) : __assert_fail ("N->getOpcode() == ISD::AND", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38696, __PRETTY_FUNCTION__))
;
38697
38698 MVT VT = N->getSimpleValueType(0);
38699 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
38700 return SDValue();
38701
38702 SDValue X, Y;
38703 SDValue N0 = N->getOperand(0);
38704 SDValue N1 = N->getOperand(1);
38705
38706 if (SDValue Not = IsNOT(N0, DAG)) {
38707 X = Not;
38708 Y = N1;
38709 } else if (SDValue Not = IsNOT(N1, DAG)) {
38710 X = Not;
38711 Y = N0;
38712 } else
38713 return SDValue();
38714
38715 X = DAG.getBitcast(VT, X);
38716 Y = DAG.getBitcast(VT, Y);
38717 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
38718}
38719
38720// On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
38721// register. In most cases we actually compare or select YMM-sized registers
38722// and mixing the two types creates horrible code. This method optimizes
38723// some of the transition sequences.
38724// Even with AVX-512 this is still useful for removing casts around logical
38725// operations on vXi1 mask types.
38726static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
38727 const X86Subtarget &Subtarget) {
38728 EVT VT = N->getValueType(0);
38729 assert(VT.isVector() && "Expected vector type")((VT.isVector() && "Expected vector type") ? static_cast
<void> (0) : __assert_fail ("VT.isVector() && \"Expected vector type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38729, __PRETTY_FUNCTION__))
;
38730
38731 assert((N->getOpcode() == ISD::ANY_EXTEND ||(((N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() ==
ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) &&
"Invalid Node") ? static_cast<void> (0) : __assert_fail
("(N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) && \"Invalid Node\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38733, __PRETTY_FUNCTION__))
38732 N->getOpcode() == ISD::ZERO_EXTEND ||(((N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() ==
ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) &&
"Invalid Node") ? static_cast<void> (0) : __assert_fail
("(N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) && \"Invalid Node\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38733, __PRETTY_FUNCTION__))
38733 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node")(((N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() ==
ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) &&
"Invalid Node") ? static_cast<void> (0) : __assert_fail
("(N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) && \"Invalid Node\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38733, __PRETTY_FUNCTION__))
;
38734
38735 SDValue Narrow = N->getOperand(0);
38736 EVT NarrowVT = Narrow.getValueType();
38737
38738 if (Narrow->getOpcode() != ISD::XOR &&
38739 Narrow->getOpcode() != ISD::AND &&
38740 Narrow->getOpcode() != ISD::OR)
38741 return SDValue();
38742
38743 SDValue N0 = Narrow->getOperand(0);
38744 SDValue N1 = Narrow->getOperand(1);
38745 SDLoc DL(Narrow);
38746
38747 // The Left side has to be a trunc.
38748 if (N0.getOpcode() != ISD::TRUNCATE)
38749 return SDValue();
38750
38751 // The type of the truncated inputs.
38752 if (N0.getOperand(0).getValueType() != VT)
38753 return SDValue();
38754
38755 // The right side has to be a 'trunc' or a constant vector.
38756 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
38757 N1.getOperand(0).getValueType() == VT;
38758 if (!RHSTrunc &&
38759 !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
38760 return SDValue();
38761
38762 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38763
38764 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT))
38765 return SDValue();
38766
38767 // Set N0 and N1 to hold the inputs to the new wide operation.
38768 N0 = N0.getOperand(0);
38769 if (RHSTrunc)
38770 N1 = N1.getOperand(0);
38771 else
38772 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
38773
38774 // Generate the wide operation.
38775 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1);
38776 unsigned Opcode = N->getOpcode();
38777 switch (Opcode) {
38778 default: llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38778)
;
38779 case ISD::ANY_EXTEND:
38780 return Op;
38781 case ISD::ZERO_EXTEND:
38782 return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType());
38783 case ISD::SIGN_EXTEND:
38784 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
38785 Op, DAG.getValueType(NarrowVT));
38786 }
38787}
38788
38789/// If both input operands of a logic op are being cast from floating point
38790/// types, try to convert this into a floating point logic node to avoid
38791/// unnecessary moves from SSE to integer registers.
38792static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
38793 const X86Subtarget &Subtarget) {
38794 EVT VT = N->getValueType(0);
38795 SDValue N0 = N->getOperand(0);
38796 SDValue N1 = N->getOperand(1);
38797 SDLoc DL(N);
38798
38799 if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
38800 return SDValue();
38801
38802 SDValue N00 = N0.getOperand(0);
38803 SDValue N10 = N1.getOperand(0);
38804 EVT N00Type = N00.getValueType();
38805 EVT N10Type = N10.getValueType();
38806
38807 // Ensure that both types are the same and are legal scalar fp types.
38808 if (N00Type != N10Type ||
38809 !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
38810 (Subtarget.hasSSE2() && N00Type == MVT::f64)))
38811 return SDValue();
38812
38813 unsigned FPOpcode;
38814 switch (N->getOpcode()) {
38815 default: llvm_unreachable("Unexpected input node for FP logic conversion")::llvm::llvm_unreachable_internal("Unexpected input node for FP logic conversion"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 38815)
;
38816 case ISD::AND: FPOpcode = X86ISD::FAND; break;
38817 case ISD::OR: FPOpcode = X86ISD::FOR; break;
38818 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
38819 }
38820
38821 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
38822 return DAG.getBitcast(VT, FPLogic);
38823}
38824
38825/// If this is a zero/all-bits result that is bitwise-anded with a low bits
38826/// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
38827/// with a shift-right to eliminate loading the vector constant mask value.
38828static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
38829 const X86Subtarget &Subtarget) {
38830 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
38831 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
38832 EVT VT0 = Op0.getValueType();
38833 EVT VT1 = Op1.getValueType();
38834
38835 if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
38836 return SDValue();
38837
38838 APInt SplatVal;
38839 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
38840 !SplatVal.isMask())
38841 return SDValue();
38842
38843 // Don't prevent creation of ANDN.
38844 if (isBitwiseNot(Op0))
38845 return SDValue();
38846
38847 if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
38848 return SDValue();
38849
38850 unsigned EltBitWidth = VT0.getScalarSizeInBits();
38851 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
38852 return SDValue();
38853
38854 SDLoc DL(N);
38855 unsigned ShiftVal = SplatVal.countTrailingOnes();
38856 SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
38857 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
38858 return DAG.getBitcast(N->getValueType(0), Shift);
38859}
38860
38861// Get the index node from the lowered DAG of a GEP IR instruction with one
38862// indexing dimension.
38863static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
38864 if (Ld->isIndexed())
38865 return SDValue();
38866
38867 SDValue Base = Ld->getBasePtr();
38868
38869 if (Base.getOpcode() != ISD::ADD)
38870 return SDValue();
38871
38872 SDValue ShiftedIndex = Base.getOperand(0);
38873
38874 if (ShiftedIndex.getOpcode() != ISD::SHL)
38875 return SDValue();
38876
38877 return ShiftedIndex.getOperand(0);
38878
38879}
38880
38881static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
38882 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
38883 switch (VT.getSizeInBits()) {
38884 default: return false;
38885 case 64: return Subtarget.is64Bit() ? true : false;
38886 case 32: return true;
38887 }
38888 }
38889 return false;
38890}
38891
38892// This function recognizes cases where X86 bzhi instruction can replace and
38893// 'and-load' sequence.
38894// In case of loading integer value from an array of constants which is defined
38895// as follows:
38896//
38897// int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
38898//
38899// then applying a bitwise and on the result with another input.
38900// It's equivalent to performing bzhi (zero high bits) on the input, with the
38901// same index of the load.
38902static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
38903 const X86Subtarget &Subtarget) {
38904 MVT VT = Node->getSimpleValueType(0);
38905 SDLoc dl(Node);
38906
38907 // Check if subtarget has BZHI instruction for the node's type
38908 if (!hasBZHI(Subtarget, VT))
38909 return SDValue();
38910
38911 // Try matching the pattern for both operands.
38912 for (unsigned i = 0; i < 2; i++) {
38913 SDValue N = Node->getOperand(i);
38914 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
38915
38916 // continue if the operand is not a load instruction
38917 if (!Ld)
38918 return SDValue();
38919
38920 const Value *MemOp = Ld->getMemOperand()->getValue();
38921
38922 if (!MemOp)
38923 return SDValue();
38924
38925 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
38926 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
38927 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
38928
38929 Constant *Init = GV->getInitializer();
38930 Type *Ty = Init->getType();
38931 if (!isa<ConstantDataArray>(Init) ||
38932 !Ty->getArrayElementType()->isIntegerTy() ||
38933 Ty->getArrayElementType()->getScalarSizeInBits() !=
38934 VT.getSizeInBits() ||
38935 Ty->getArrayNumElements() >
38936 Ty->getArrayElementType()->getScalarSizeInBits())
38937 continue;
38938
38939 // Check if the array's constant elements are suitable to our case.
38940 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
38941 bool ConstantsMatch = true;
38942 for (uint64_t j = 0; j < ArrayElementCount; j++) {
38943 ConstantInt *Elem =
38944 dyn_cast<ConstantInt>(Init->getAggregateElement(j));
38945 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
38946 ConstantsMatch = false;
38947 break;
38948 }
38949 }
38950 if (!ConstantsMatch)
38951 continue;
38952
38953 // Do the transformation (For 32-bit type):
38954 // -> (and (load arr[idx]), inp)
38955 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
38956 // that will be replaced with one bzhi instruction.
38957 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
38958 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
38959
38960 // Get the Node which indexes into the array.
38961 SDValue Index = getIndexFromUnindexedLoad(Ld);
38962 if (!Index)
38963 return SDValue();
38964 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
38965
38966 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
38967 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
38968
38969 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
38970 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
38971
38972 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
38973 }
38974 }
38975 }
38976 }
38977 return SDValue();
38978}
38979
38980// Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
38981// Turn it into series of XORs and a setnp.
38982static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
38983 const X86Subtarget &Subtarget) {
38984 EVT VT = N->getValueType(0);
38985
38986 // We only support 64-bit and 32-bit. 64-bit requires special handling
38987 // unless the 64-bit popcnt instruction is legal.
38988 if (VT != MVT::i32 && VT != MVT::i64)
38989 return SDValue();
38990
38991 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38992 if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
38993 return SDValue();
38994
38995 SDValue N0 = N->getOperand(0);
38996 SDValue N1 = N->getOperand(1);
38997
38998 // LHS needs to be a single use CTPOP.
38999 if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
39000 return SDValue();
39001
39002 // RHS needs to be 1.
39003 if (!isOneConstant(N1))
39004 return SDValue();
39005
39006 SDLoc DL(N);
39007 SDValue X = N0.getOperand(0);
39008
39009 // If this is 64-bit, its always best to xor the two 32-bit pieces together
39010 // even if we have popcnt.
39011 if (VT == MVT::i64) {
39012 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
39013 DAG.getNode(ISD::SRL, DL, VT, X,
39014 DAG.getConstant(32, DL, MVT::i8)));
39015 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
39016 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
39017 // Generate a 32-bit parity idiom. This will bring us back here if we need
39018 // to expand it too.
39019 SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
39020 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
39021 DAG.getConstant(1, DL, MVT::i32));
39022 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
39023 }
39024 assert(VT == MVT::i32 && "Unexpected VT!")((VT == MVT::i32 && "Unexpected VT!") ? static_cast<
void> (0) : __assert_fail ("VT == MVT::i32 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39024, __PRETTY_FUNCTION__))
;
39025
39026 // Xor the high and low 16-bits together using a 32-bit operation.
39027 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
39028 DAG.getConstant(16, DL, MVT::i8));
39029 X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
39030
39031 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
39032 // This should allow an h-reg to be used to save a shift.
39033 // FIXME: We only get an h-reg in 32-bit mode.
39034 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
39035 DAG.getNode(ISD::SRL, DL, VT, X,
39036 DAG.getConstant(8, DL, MVT::i8)));
39037 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
39038 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
39039 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
39040
39041 // Copy the inverse of the parity flag into a register with setcc.
39042 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
39043 // Zero extend to original type.
39044 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
39045}
39046
39047static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
39048 TargetLowering::DAGCombinerInfo &DCI,
39049 const X86Subtarget &Subtarget) {
39050 EVT VT = N->getValueType(0);
39051
39052 // If this is SSE1 only convert to FAND to avoid scalarization.
39053 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
39054 return DAG.getBitcast(
39055 MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
39056 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
39057 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
39058 }
39059
39060 // Use a 32-bit and+zext if upper bits known zero.
39061 if (VT == MVT::i64 && Subtarget.is64Bit() &&
39062 !isa<ConstantSDNode>(N->getOperand(1))) {
39063 APInt HiMask = APInt::getHighBitsSet(64, 32);
39064 if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
39065 DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
39066 SDLoc dl(N);
39067 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
39068 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
39069 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
39070 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
39071 }
39072 }
39073
39074 // This must be done before legalization has expanded the ctpop.
39075 if (SDValue V = combineParity(N, DAG, Subtarget))
39076 return V;
39077
39078 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
39079 // TODO: Support multiple SrcOps.
39080 if (VT == MVT::i1) {
39081 SmallVector<SDValue, 2> SrcOps;
39082 if (matchBitOpReduction(SDValue(N, 0), ISD::AND, SrcOps) &&
39083 SrcOps.size() == 1) {
39084 SDLoc dl(N);
39085 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
39086 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
39087 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
39088 if (Mask) {
39089 APInt AllBits = APInt::getAllOnesValue(NumElts);
39090 return DAG.getSetCC(dl, MVT::i1, Mask,
39091 DAG.getConstant(AllBits, dl, MaskVT), ISD::SETEQ);
39092 }
39093 }
39094 }
39095
39096 if (DCI.isBeforeLegalizeOps())
39097 return SDValue();
39098
39099 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
39100 return R;
39101
39102 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
39103 return FPLogic;
39104
39105 if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
39106 return R;
39107
39108 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
39109 return ShiftRight;
39110
39111 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
39112 return R;
39113
39114 // Attempt to recursively combine a bitmask AND with shuffles.
39115 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
39116 SDValue Op(N, 0);
39117 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39118 return Res;
39119 }
39120
39121 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
39122 if ((VT.getScalarSizeInBits() % 8) == 0 &&
39123 N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
39124 isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
39125 SDValue BitMask = N->getOperand(1);
39126 SDValue SrcVec = N->getOperand(0).getOperand(0);
39127 EVT SrcVecVT = SrcVec.getValueType();
39128
39129 // Check that the constant bitmask masks whole bytes.
39130 APInt UndefElts;
39131 SmallVector<APInt, 64> EltBits;
39132 if (VT == SrcVecVT.getScalarType() &&
39133 N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
39134 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
39135 llvm::all_of(EltBits, [](APInt M) {
39136 return M.isNullValue() || M.isAllOnesValue();
39137 })) {
39138 unsigned NumElts = SrcVecVT.getVectorNumElements();
39139 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
39140 unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
39141
39142 // Create a root shuffle mask from the byte mask and the extracted index.
39143 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
39144 for (unsigned i = 0; i != Scale; ++i) {
39145 if (UndefElts[i])
39146 continue;
39147 int VecIdx = Scale * Idx + i;
39148 ShuffleMask[VecIdx] =
39149 EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
39150 }
39151
39152 if (SDValue Shuffle = combineX86ShufflesRecursively(
39153 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
39154 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
39155 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
39156 N->getOperand(0).getOperand(1));
39157 }
39158 }
39159
39160 return SDValue();
39161}
39162
39163// Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
39164static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
39165 const X86Subtarget &Subtarget) {
39166 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode")((N->getOpcode() == ISD::OR && "Unexpected Opcode"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::OR && \"Unexpected Opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39166, __PRETTY_FUNCTION__))
;
39167
39168 MVT VT = N->getSimpleValueType(0);
39169 if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
39170 return SDValue();
39171
39172 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
39173 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
39174 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
39175 return SDValue();
39176
39177 // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
39178 // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
39179 bool UseVPTERNLOG = (Subtarget.hasAVX512() && VT.is512BitVector()) ||
39180 Subtarget.hasVLX();
39181 if (!(Subtarget.hasXOP() || UseVPTERNLOG ||
39182 !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
39183 return SDValue();
39184
39185 // Attempt to extract constant byte masks.
39186 APInt UndefElts0, UndefElts1;
39187 SmallVector<APInt, 32> EltBits0, EltBits1;
39188 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
39189 false, false))
39190 return SDValue();
39191 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
39192 false, false))
39193 return SDValue();
39194
39195 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
39196 // TODO - add UNDEF elts support.
39197 if (UndefElts0[i] || UndefElts1[i])
39198 return SDValue();
39199 if (EltBits0[i] != ~EltBits1[i])
39200 return SDValue();
39201 }
39202
39203 SDLoc DL(N);
39204 SDValue X = N->getOperand(0);
39205 SDValue Y =
39206 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
39207 DAG.getBitcast(VT, N1.getOperand(0)));
39208 return DAG.getNode(ISD::OR, DL, VT, X, Y);
39209}
39210
39211// Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
39212static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
39213 if (N->getOpcode() != ISD::OR)
39214 return false;
39215
39216 SDValue N0 = N->getOperand(0);
39217 SDValue N1 = N->getOperand(1);
39218
39219 // Canonicalize AND to LHS.
39220 if (N1.getOpcode() == ISD::AND)
39221 std::swap(N0, N1);
39222
39223 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
39224 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
39225 return false;
39226
39227 Mask = N1.getOperand(0);
39228 X = N1.getOperand(1);
39229
39230 // Check to see if the mask appeared in both the AND and ANDNP.
39231 if (N0.getOperand(0) == Mask)
39232 Y = N0.getOperand(1);
39233 else if (N0.getOperand(1) == Mask)
39234 Y = N0.getOperand(0);
39235 else
39236 return false;
39237
39238 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
39239 // ANDNP combine allows other combines to happen that prevent matching.
39240 return true;
39241}
39242
39243// Try to match:
39244// (or (and (M, (sub 0, X)), (pandn M, X)))
39245// which is a special case of vselect:
39246// (vselect M, (sub 0, X), X)
39247// Per:
39248// http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
39249// We know that, if fNegate is 0 or 1:
39250// (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
39251//
39252// Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
39253// ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
39254// ( M ? -X : X) == ((X ^ M ) + (M & 1))
39255// This lets us transform our vselect to:
39256// (add (xor X, M), (and M, 1))
39257// And further to:
39258// (sub (xor X, M), M)
39259static SDValue combineLogicBlendIntoConditionalNegate(
39260 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
39261 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
39262 EVT MaskVT = Mask.getValueType();
39263 assert(MaskVT.isInteger() &&((MaskVT.isInteger() && DAG.ComputeNumSignBits(Mask) ==
MaskVT.getScalarSizeInBits() && "Mask must be zero/all-bits"
) ? static_cast<void> (0) : __assert_fail ("MaskVT.isInteger() && DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() && \"Mask must be zero/all-bits\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39265, __PRETTY_FUNCTION__))
39264 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&((MaskVT.isInteger() && DAG.ComputeNumSignBits(Mask) ==
MaskVT.getScalarSizeInBits() && "Mask must be zero/all-bits"
) ? static_cast<void> (0) : __assert_fail ("MaskVT.isInteger() && DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() && \"Mask must be zero/all-bits\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39265, __PRETTY_FUNCTION__))
39265 "Mask must be zero/all-bits")((MaskVT.isInteger() && DAG.ComputeNumSignBits(Mask) ==
MaskVT.getScalarSizeInBits() && "Mask must be zero/all-bits"
) ? static_cast<void> (0) : __assert_fail ("MaskVT.isInteger() && DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() && \"Mask must be zero/all-bits\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39265, __PRETTY_FUNCTION__))
;
39266
39267 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
39268 return SDValue();
39269 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
39270 return SDValue();
39271
39272 auto IsNegV = [](SDNode *N, SDValue V) {
39273 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
39274 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
39275 };
39276
39277 SDValue V;
39278 if (IsNegV(Y.getNode(), X))
39279 V = X;
39280 else if (IsNegV(X.getNode(), Y))
39281 V = Y;
39282 else
39283 return SDValue();
39284
39285 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
39286 SDValue SubOp2 = Mask;
39287
39288 // If the negate was on the false side of the select, then
39289 // the operands of the SUB need to be swapped. PR 27251.
39290 // This is because the pattern being matched above is
39291 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
39292 // but if the pattern matched was
39293 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
39294 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
39295 // pattern also needs to be a negation of the replacement pattern above.
39296 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
39297 // sub accomplishes the negation of the replacement pattern.
39298 if (V == Y)
39299 std::swap(SubOp1, SubOp2);
39300
39301 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
39302 return DAG.getBitcast(VT, Res);
39303}
39304
39305// Try to fold:
39306// (or (and (m, y), (pandn m, x)))
39307// into:
39308// (vselect m, x, y)
39309// As a special case, try to fold:
39310// (or (and (m, (sub 0, x)), (pandn m, x)))
39311// into:
39312// (sub (xor X, M), M)
39313static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
39314 const X86Subtarget &Subtarget) {
39315 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode")((N->getOpcode() == ISD::OR && "Unexpected Opcode"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::OR && \"Unexpected Opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39315, __PRETTY_FUNCTION__))
;
39316
39317 EVT VT = N->getValueType(0);
39318 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
39319 (VT.is256BitVector() && Subtarget.hasInt256())))
39320 return SDValue();
39321
39322 SDValue X, Y, Mask;
39323 if (!matchLogicBlend(N, X, Y, Mask))
39324 return SDValue();
39325
39326 // Validate that X, Y, and Mask are bitcasts, and see through them.
39327 Mask = peekThroughBitcasts(Mask);
39328 X = peekThroughBitcasts(X);
39329 Y = peekThroughBitcasts(Y);
39330
39331 EVT MaskVT = Mask.getValueType();
39332 unsigned EltBits = MaskVT.getScalarSizeInBits();
39333
39334 // TODO: Attempt to handle floating point cases as well?
39335 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
39336 return SDValue();
39337
39338 SDLoc DL(N);
39339
39340 // Attempt to combine to conditional negate: (sub (xor X, M), M)
39341 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
39342 DAG, Subtarget))
39343 return Res;
39344
39345 // PBLENDVB is only available on SSE 4.1.
39346 if (!Subtarget.hasSSE41())
39347 return SDValue();
39348
39349 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
39350
39351 X = DAG.getBitcast(BlendVT, X);
39352 Y = DAG.getBitcast(BlendVT, Y);
39353 Mask = DAG.getBitcast(BlendVT, Mask);
39354 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
39355 return DAG.getBitcast(VT, Mask);
39356}
39357
39358// Helper function for combineOrCmpEqZeroToCtlzSrl
39359// Transforms:
39360// seteq(cmp x, 0)
39361// into:
39362// srl(ctlz x), log2(bitsize(x))
39363// Input pattern is checked by caller.
39364static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
39365 SelectionDAG &DAG) {
39366 SDValue Cmp = Op.getOperand(1);
39367 EVT VT = Cmp.getOperand(0).getValueType();
39368 unsigned Log2b = Log2_32(VT.getSizeInBits());
39369 SDLoc dl(Op);
39370 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
39371 // The result of the shift is true or false, and on X86, the 32-bit
39372 // encoding of shr and lzcnt is more desirable.
39373 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
39374 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
39375 DAG.getConstant(Log2b, dl, MVT::i8));
39376 return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
39377}
39378
39379// Try to transform:
39380// zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
39381// into:
39382// srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
39383// Will also attempt to match more generic cases, eg:
39384// zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
39385// Only applies if the target supports the FastLZCNT feature.
39386static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
39387 TargetLowering::DAGCombinerInfo &DCI,
39388 const X86Subtarget &Subtarget) {
39389 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
39390 return SDValue();
39391
39392 auto isORCandidate = [](SDValue N) {
39393 return (N->getOpcode() == ISD::OR && N->hasOneUse());
39394 };
39395
39396 // Check the zero extend is extending to 32-bit or more. The code generated by
39397 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
39398 // instructions to clear the upper bits.
39399 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
39400 !isORCandidate(N->getOperand(0)))
39401 return SDValue();
39402
39403 // Check the node matches: setcc(eq, cmp 0)
39404 auto isSetCCCandidate = [](SDValue N) {
39405 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
39406 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
39407 N->getOperand(1).getOpcode() == X86ISD::CMP &&
39408 isNullConstant(N->getOperand(1).getOperand(1)) &&
39409 N->getOperand(1).getValueType().bitsGE(MVT::i32);
39410 };
39411
39412 SDNode *OR = N->getOperand(0).getNode();
39413 SDValue LHS = OR->getOperand(0);
39414 SDValue RHS = OR->getOperand(1);
39415
39416 // Save nodes matching or(or, setcc(eq, cmp 0)).
39417 SmallVector<SDNode *, 2> ORNodes;
39418 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
39419 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
39420 ORNodes.push_back(OR);
39421 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
39422 LHS = OR->getOperand(0);
39423 RHS = OR->getOperand(1);
39424 }
39425
39426 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
39427 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
39428 !isORCandidate(SDValue(OR, 0)))
39429 return SDValue();
39430
39431 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
39432 // to
39433 // or(srl(ctlz),srl(ctlz)).
39434 // The dag combiner can then fold it into:
39435 // srl(or(ctlz, ctlz)).
39436 EVT VT = OR->getValueType(0);
39437 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
39438 SDValue Ret, NewRHS;
39439 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
39440 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
39441
39442 if (!Ret)
39443 return SDValue();
39444
39445 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
39446 while (ORNodes.size() > 0) {
39447 OR = ORNodes.pop_back_val();
39448 LHS = OR->getOperand(0);
39449 RHS = OR->getOperand(1);
39450 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
39451 if (RHS->getOpcode() == ISD::OR)
39452 std::swap(LHS, RHS);
39453 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
39454 if (!NewRHS)
39455 return SDValue();
39456 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
39457 }
39458
39459 if (Ret)
39460 Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
39461
39462 return Ret;
39463}
39464
39465static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
39466 TargetLowering::DAGCombinerInfo &DCI,
39467 const X86Subtarget &Subtarget) {
39468 SDValue N0 = N->getOperand(0);
39469 SDValue N1 = N->getOperand(1);
39470 EVT VT = N->getValueType(0);
39471
39472 // If this is SSE1 only convert to FOR to avoid scalarization.
39473 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
39474 return DAG.getBitcast(MVT::v4i32,
39475 DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
39476 DAG.getBitcast(MVT::v4f32, N0),
39477 DAG.getBitcast(MVT::v4f32, N1)));
39478 }
39479
39480 if (DCI.isBeforeLegalizeOps())
39481 return SDValue();
39482
39483 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
39484 return R;
39485
39486 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
39487 return FPLogic;
39488
39489 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
39490 return R;
39491
39492 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
39493 return R;
39494
39495 // Attempt to recursively combine an OR of shuffles.
39496 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
39497 SDValue Op(N, 0);
39498 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
39499 return Res;
39500 }
39501
39502 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
39503 return SDValue();
39504
39505 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
39506 bool OptForSize = DAG.getMachineFunction().getFunction().hasOptSize();
39507 unsigned Bits = VT.getScalarSizeInBits();
39508
39509 // SHLD/SHRD instructions have lower register pressure, but on some
39510 // platforms they have higher latency than the equivalent
39511 // series of shifts/or that would otherwise be generated.
39512 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
39513 // have higher latencies and we are not optimizing for size.
39514 if (!OptForSize && Subtarget.isSHLDSlow())
39515 return SDValue();
39516
39517 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
39518 std::swap(N0, N1);
39519 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
39520 return SDValue();
39521 if (!N0.hasOneUse() || !N1.hasOneUse())
39522 return SDValue();
39523
39524 SDValue ShAmt0 = N0.getOperand(1);
39525 if (ShAmt0.getValueType() != MVT::i8)
39526 return SDValue();
39527 SDValue ShAmt1 = N1.getOperand(1);
39528 if (ShAmt1.getValueType() != MVT::i8)
39529 return SDValue();
39530
39531 // Peek through any modulo shift masks.
39532 SDValue ShMsk0;
39533 if (ShAmt0.getOpcode() == ISD::AND &&
39534 isa<ConstantSDNode>(ShAmt0.getOperand(1)) &&
39535 ShAmt0.getConstantOperandAPInt(1) == (Bits - 1)) {
39536 ShMsk0 = ShAmt0;
39537 ShAmt0 = ShAmt0.getOperand(0);
39538 }
39539 SDValue ShMsk1;
39540 if (ShAmt1.getOpcode() == ISD::AND &&
39541 isa<ConstantSDNode>(ShAmt1.getOperand(1)) &&
39542 ShAmt1.getConstantOperandAPInt(1) == (Bits - 1)) {
39543 ShMsk1 = ShAmt1;
39544 ShAmt1 = ShAmt1.getOperand(0);
39545 }
39546
39547 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
39548 ShAmt0 = ShAmt0.getOperand(0);
39549 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
39550 ShAmt1 = ShAmt1.getOperand(0);
39551
39552 SDLoc DL(N);
39553 unsigned Opc = ISD::FSHL;
39554 SDValue Op0 = N0.getOperand(0);
39555 SDValue Op1 = N1.getOperand(0);
39556 if (ShAmt0.getOpcode() == ISD::SUB || ShAmt0.getOpcode() == ISD::XOR) {
39557 Opc = ISD::FSHR;
39558 std::swap(Op0, Op1);
39559 std::swap(ShAmt0, ShAmt1);
39560 std::swap(ShMsk0, ShMsk1);
39561 }
39562
39563 auto GetFunnelShift = [&DAG, &DL, VT, Opc](SDValue Op0, SDValue Op1,
39564 SDValue Amt) {
39565 if (Opc == ISD::FSHR)
39566 std::swap(Op0, Op1);
39567 return DAG.getNode(Opc, DL, VT, Op0, Op1,
39568 DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Amt));
39569 };
39570
39571 // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> FSHL( X, Y, C )
39572 // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> FSHR( Y, X, C )
39573 // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHL( X, Y, C )
39574 // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> FSHR( Y, X, C )
39575 // OR( SHL( X, AND( C, 31 ) ), SRL( Y, AND( 0 - C, 31 ) ) ) -> FSHL( X, Y, C )
39576 // OR( SRL( X, AND( C, 31 ) ), SHL( Y, AND( 0 - C, 31 ) ) ) -> FSHR( Y, X, C )
39577 if (ShAmt1.getOpcode() == ISD::SUB) {
39578 SDValue Sum = ShAmt1.getOperand(0);
39579 if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) {
39580 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
39581 if (ShAmt1Op1.getOpcode() == ISD::AND &&
39582 isa<ConstantSDNode>(ShAmt1Op1.getOperand(1)) &&
39583 ShAmt1Op1.getConstantOperandAPInt(1) == (Bits - 1)) {
39584 ShMsk1 = ShAmt1Op1;
39585 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
39586 }
39587 if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE)
39588 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
39589 if ((SumC->getAPIntValue() == Bits ||
39590 (SumC->getAPIntValue() == 0 && ShMsk1)) &&
39591 ShAmt1Op1 == ShAmt0)
39592 return GetFunnelShift(Op0, Op1, ShAmt0);
39593 }
39594 } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
39595 auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
39596 if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits)
39597 return GetFunnelShift(Op0, Op1, ShAmt0);
39598 } else if (ShAmt1.getOpcode() == ISD::XOR) {
39599 SDValue Mask = ShAmt1.getOperand(1);
39600 if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) {
39601 unsigned InnerShift = (ISD::FSHL == Opc ? ISD::SRL : ISD::SHL);
39602 SDValue ShAmt1Op0 = ShAmt1.getOperand(0);
39603 if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE)
39604 ShAmt1Op0 = ShAmt1Op0.getOperand(0);
39605 if (MaskC->getSExtValue() == (Bits - 1) &&
39606 (ShAmt1Op0 == ShAmt0 || ShAmt1Op0 == ShMsk0)) {
39607 if (Op1.getOpcode() == InnerShift &&
39608 isa<ConstantSDNode>(Op1.getOperand(1)) &&
39609 Op1.getConstantOperandAPInt(1) == 1) {
39610 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
39611 }
39612 // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ).
39613 if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD &&
39614 Op1.getOperand(0) == Op1.getOperand(1)) {
39615 return GetFunnelShift(Op0, Op1.getOperand(0), ShAmt0);
39616 }
39617 }
39618 }
39619 }
39620
39621 return SDValue();
39622}
39623
39624/// Try to turn tests against the signbit in the form of:
39625/// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
39626/// into:
39627/// SETGT(X, -1)
39628static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
39629 // This is only worth doing if the output type is i8 or i1.
39630 EVT ResultType = N->getValueType(0);
39631 if (ResultType != MVT::i8 && ResultType != MVT::i1)
39632 return SDValue();
39633
39634 SDValue N0 = N->getOperand(0);
39635 SDValue N1 = N->getOperand(1);
39636
39637 // We should be performing an xor against a truncated shift.
39638 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
39639 return SDValue();
39640
39641 // Make sure we are performing an xor against one.
39642 if (!isOneConstant(N1))
39643 return SDValue();
39644
39645 // SetCC on x86 zero extends so only act on this if it's a logical shift.
39646 SDValue Shift = N0.getOperand(0);
39647 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
39648 return SDValue();
39649
39650 // Make sure we are truncating from one of i16, i32 or i64.
39651 EVT ShiftTy = Shift.getValueType();
39652 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
39653 return SDValue();
39654
39655 // Make sure the shift amount extracts the sign bit.
39656 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
39657 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
39658 return SDValue();
39659
39660 // Create a greater-than comparison against -1.
39661 // N.B. Using SETGE against 0 works but we want a canonical looking
39662 // comparison, using SETGT matches up with what TranslateX86CC.
39663 SDLoc DL(N);
39664 SDValue ShiftOp = Shift.getOperand(0);
39665 EVT ShiftOpTy = ShiftOp.getValueType();
39666 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39667 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
39668 *DAG.getContext(), ResultType);
39669 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
39670 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
39671 if (SetCCResultType != ResultType)
39672 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
39673 return Cond;
39674}
39675
39676/// Turn vector tests of the signbit in the form of:
39677/// xor (sra X, elt_size(X)-1), -1
39678/// into:
39679/// pcmpgt X, -1
39680///
39681/// This should be called before type legalization because the pattern may not
39682/// persist after that.
39683static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
39684 const X86Subtarget &Subtarget) {
39685 EVT VT = N->getValueType(0);
39686 if (!VT.isSimple())
39687 return SDValue();
39688
39689 switch (VT.getSimpleVT().SimpleTy) {
39690 default: return SDValue();
39691 case MVT::v16i8:
39692 case MVT::v8i16:
39693 case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break;
39694 case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break;
39695 case MVT::v32i8:
39696 case MVT::v16i16:
39697 case MVT::v8i32:
39698 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
39699 }
39700
39701 // There must be a shift right algebraic before the xor, and the xor must be a
39702 // 'not' operation.
39703 SDValue Shift = N->getOperand(0);
39704 SDValue Ones = N->getOperand(1);
39705 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
39706 !ISD::isBuildVectorAllOnes(Ones.getNode()))
39707 return SDValue();
39708
39709 // The shift should be smearing the sign bit across each vector element.
39710 auto *ShiftAmt =
39711 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
39712 if (!ShiftAmt ||
39713 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
39714 return SDValue();
39715
39716 // Create a greater-than comparison against -1. We don't use the more obvious
39717 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
39718 return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones);
39719}
39720
39721/// Detect patterns of truncation with unsigned saturation:
39722///
39723/// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
39724/// Return the source value x to be truncated or SDValue() if the pattern was
39725/// not matched.
39726///
39727/// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
39728/// where C1 >= 0 and C2 is unsigned max of destination type.
39729///
39730/// (truncate (smax (smin (x, C2), C1)) to dest_type)
39731/// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
39732///
39733/// These two patterns are equivalent to:
39734/// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
39735/// So return the smax(x, C1) value to be truncated or SDValue() if the
39736/// pattern was not matched.
39737static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39738 const SDLoc &DL) {
39739 EVT InVT = In.getValueType();
39740
39741 // Saturation with truncation. We truncate from InVT to VT.
39742 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&((InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
"Unexpected types for truncate operation") ? static_cast<
void> (0) : __assert_fail ("InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() && \"Unexpected types for truncate operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39743, __PRETTY_FUNCTION__))
39743 "Unexpected types for truncate operation")((InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
"Unexpected types for truncate operation") ? static_cast<
void> (0) : __assert_fail ("InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() && \"Unexpected types for truncate operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39743, __PRETTY_FUNCTION__))
;
39744
39745 // Match min/max and return limit value as a parameter.
39746 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
39747 if (V.getOpcode() == Opcode &&
39748 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
39749 return V.getOperand(0);
39750 return SDValue();
39751 };
39752
39753 APInt C1, C2;
39754 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
39755 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
39756 // the element size of the destination type.
39757 if (C2.isMask(VT.getScalarSizeInBits()))
39758 return UMin;
39759
39760 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
39761 if (MatchMinMax(SMin, ISD::SMAX, C1))
39762 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
39763 return SMin;
39764
39765 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
39766 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
39767 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
39768 C2.uge(C1)) {
39769 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
39770 }
39771
39772 return SDValue();
39773}
39774
39775/// Detect patterns of truncation with signed saturation:
39776/// (truncate (smin ((smax (x, signed_min_of_dest_type)),
39777/// signed_max_of_dest_type)) to dest_type)
39778/// or:
39779/// (truncate (smax ((smin (x, signed_max_of_dest_type)),
39780/// signed_min_of_dest_type)) to dest_type).
39781/// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
39782/// Return the source value to be truncated or SDValue() if the pattern was not
39783/// matched.
39784static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
39785 unsigned NumDstBits = VT.getScalarSizeInBits();
39786 unsigned NumSrcBits = In.getScalarValueSizeInBits();
39787 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation")((NumSrcBits > NumDstBits && "Unexpected types for truncate operation"
) ? static_cast<void> (0) : __assert_fail ("NumSrcBits > NumDstBits && \"Unexpected types for truncate operation\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39787, __PRETTY_FUNCTION__))
;
39788
39789 auto MatchMinMax = [](SDValue V, unsigned Opcode,
39790 const APInt &Limit) -> SDValue {
39791 APInt C;
39792 if (V.getOpcode() == Opcode &&
39793 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
39794 return V.getOperand(0);
39795 return SDValue();
39796 };
39797
39798 APInt SignedMax, SignedMin;
39799 if (MatchPackUS) {
39800 SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
39801 SignedMin = APInt(NumSrcBits, 0);
39802 } else {
39803 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
39804 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
39805 }
39806
39807 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
39808 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
39809 return SMax;
39810
39811 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
39812 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
39813 return SMin;
39814
39815 return SDValue();
39816}
39817
39818static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
39819 SelectionDAG &DAG,
39820 const X86Subtarget &Subtarget) {
39821 if (!Subtarget.hasSSE2() || !VT.isVector())
39822 return SDValue();
39823
39824 EVT SVT = VT.getVectorElementType();
39825 EVT InVT = In.getValueType();
39826 EVT InSVT = InVT.getVectorElementType();
39827
39828 // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
39829 // split across two registers. We can use a packusdw+perm to clamp to 0-65535
39830 // and concatenate at the same time. Then we can use a final vpmovuswb to
39831 // clip to 0-255.
39832 if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
39833 InVT == MVT::v16i32 && VT == MVT::v16i8) {
39834 if (auto USatVal = detectSSatPattern(In, VT, true)) {
39835 // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
39836 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
39837 DL, DAG, Subtarget);
39838 assert(Mid && "Failed to pack!")((Mid && "Failed to pack!") ? static_cast<void>
(0) : __assert_fail ("Mid && \"Failed to pack!\"", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39838, __PRETTY_FUNCTION__))
;
39839 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
39840 }
39841 }
39842
39843 // vXi32 truncate instructions are available with AVX512F.
39844 // vXi16 truncate instructions are only available with AVX512BW.
39845 // For 256-bit or smaller vectors, we require VLX.
39846 // FIXME: We could widen truncates to 512 to remove the VLX restriction.
39847 // If the result type is 256-bits or larger and we have disable 512-bit
39848 // registers, we should go ahead and use the pack instructions if possible.
39849 bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
39850 (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
39851 (InVT.getSizeInBits() > 128) &&
39852 (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
39853 !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
39854
39855 if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
39856 VT.getSizeInBits() >= 64 &&
39857 (SVT == MVT::i8 || SVT == MVT::i16) &&
39858 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
39859 if (auto USatVal = detectSSatPattern(In, VT, true)) {
39860 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
39861 // Only do this when the result is at least 64 bits or we'll leaving
39862 // dangling PACKSSDW nodes.
39863 if (SVT == MVT::i8 && InSVT == MVT::i32) {
39864 EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
39865 VT.getVectorNumElements());
39866 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
39867 DAG, Subtarget);
39868 assert(Mid && "Failed to pack!")((Mid && "Failed to pack!") ? static_cast<void>
(0) : __assert_fail ("Mid && \"Failed to pack!\"", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39868, __PRETTY_FUNCTION__))
;
39869 SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
39870 Subtarget);
39871 assert(V && "Failed to pack!")((V && "Failed to pack!") ? static_cast<void> (
0) : __assert_fail ("V && \"Failed to pack!\"", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 39871, __PRETTY_FUNCTION__))
;
39872 return V;
39873 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
39874 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
39875 Subtarget);
39876 }
39877 if (auto SSatVal = detectSSatPattern(In, VT))
39878 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
39879 Subtarget);
39880 }
39881
39882 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39883 if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
39884 Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI())) {
39885 unsigned TruncOpc;
39886 SDValue SatVal;
39887 if (auto SSatVal = detectSSatPattern(In, VT)) {
39888 SatVal = SSatVal;
39889 TruncOpc = X86ISD::VTRUNCS;
39890 } else if (auto USatVal = detectUSatPattern(In, VT, DAG, DL)) {
39891 SatVal = USatVal;
39892 TruncOpc = X86ISD::VTRUNCUS;
39893 }
39894 if (SatVal) {
39895 unsigned ResElts = VT.getVectorNumElements();
39896 // If the input type is less than 512 bits and we don't have VLX, we need
39897 // to widen to 512 bits.
39898 if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
39899 unsigned NumConcats = 512 / InVT.getSizeInBits();
39900 ResElts *= NumConcats;
39901 SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
39902 ConcatOps[0] = SatVal;
39903 InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
39904 NumConcats * InVT.getVectorNumElements());
39905 SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
39906 }
39907 // Widen the result if its narrower than 128 bits.
39908 if (ResElts * SVT.getSizeInBits() < 128)
39909 ResElts = 128 / SVT.getSizeInBits();
39910 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
39911 SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
39912 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
39913 DAG.getIntPtrConstant(0, DL));
39914 }
39915 }
39916
39917 return SDValue();
39918}
39919
39920/// This function detects the AVG pattern between vectors of unsigned i8/i16,
39921/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
39922/// X86ISD::AVG instruction.
39923static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
39924 const X86Subtarget &Subtarget,
39925 const SDLoc &DL) {
39926 if (!VT.isVector())
39927 return SDValue();
39928 EVT InVT = In.getValueType();
39929 unsigned NumElems = VT.getVectorNumElements();
39930
39931 EVT ScalarVT = VT.getVectorElementType();
39932 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
39933 NumElems >= 2 && isPowerOf2_32(NumElems)))
39934 return SDValue();
39935
39936 // InScalarVT is the intermediate type in AVG pattern and it should be greater
39937 // than the original input type (i8/i16).
39938 EVT InScalarVT = InVT.getVectorElementType();
39939 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
39940 return SDValue();
39941
39942 if (!Subtarget.hasSSE2())
39943 return SDValue();
39944
39945 // Detect the following pattern:
39946 //
39947 // %1 = zext <N x i8> %a to <N x i32>
39948 // %2 = zext <N x i8> %b to <N x i32>
39949 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
39950 // %4 = add nuw nsw <N x i32> %3, %2
39951 // %5 = lshr <N x i32> %N, <i32 1 x N>
39952 // %6 = trunc <N x i32> %5 to <N x i8>
39953 //
39954 // In AVX512, the last instruction can also be a trunc store.
39955 if (In.getOpcode() != ISD::SRL)
39956 return SDValue();
39957
39958 // A lambda checking the given SDValue is a constant vector and each element
39959 // is in the range [Min, Max].
39960 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
39961 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V);
39962 if (!BV || !BV->isConstant())
39963 return false;
39964 for (SDValue Op : V->ops()) {
39965 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
39966 if (!C)
39967 return false;
39968 const APInt &Val = C->getAPIntValue();
39969 if (Val.ult(Min) || Val.ugt(Max))
39970 return false;
39971 }
39972 return true;
39973 };
39974
39975 // Check if each element of the vector is right-shifted by one.
39976 auto LHS = In.getOperand(0);
39977 auto RHS = In.getOperand(1);
39978 if (!IsConstVectorInRange(RHS, 1, 1))
39979 return SDValue();
39980 if (LHS.getOpcode() != ISD::ADD)
39981 return SDValue();
39982
39983 // Detect a pattern of a + b + 1 where the order doesn't matter.
39984 SDValue Operands[3];
39985 Operands[0] = LHS.getOperand(0);
39986 Operands[1] = LHS.getOperand(1);
39987
39988 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
39989 ArrayRef<SDValue> Ops) {
39990 return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
39991 };
39992
39993 // Take care of the case when one of the operands is a constant vector whose
39994 // element is in the range [1, 256].
39995 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
39996 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
39997 Operands[0].getOperand(0).getValueType() == VT) {
39998 // The pattern is detected. Subtract one from the constant vector, then
39999 // demote it and emit X86ISD::AVG instruction.
40000 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
40001 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
40002 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
40003 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
40004 { Operands[0].getOperand(0), Operands[1] },
40005 AVGBuilder);
40006 }
40007
40008 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
40009 // Match the or case only if its 'add-like' - can be replaced by an add.
40010 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
40011 if (ISD::ADD == V.getOpcode()) {
40012 Op0 = V.getOperand(0);
40013 Op1 = V.getOperand(1);
40014 return true;
40015 }
40016 if (ISD::ZERO_EXTEND != V.getOpcode())
40017 return false;
40018 V = V.getOperand(0);
40019 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
40020 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
40021 return false;
40022 Op0 = V.getOperand(0);
40023 Op1 = V.getOperand(1);
40024 return true;
40025 };
40026
40027 SDValue Op0, Op1;
40028 if (FindAddLike(Operands[0], Op0, Op1))
40029 std::swap(Operands[0], Operands[1]);
40030 else if (!FindAddLike(Operands[1], Op0, Op1))
40031 return SDValue();
40032 Operands[2] = Op0;
40033 Operands[1] = Op1;
40034
40035 // Now we have three operands of two additions. Check that one of them is a
40036 // constant vector with ones, and the other two can be promoted from i8/i16.
40037 for (int i = 0; i < 3; ++i) {
40038 if (!IsConstVectorInRange(Operands[i], 1, 1))
40039 continue;
40040 std::swap(Operands[i], Operands[2]);
40041
40042 // Check if Operands[0] and Operands[1] are results of type promotion.
40043 for (int j = 0; j < 2; ++j)
40044 if (Operands[j].getValueType() != VT) {
40045 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
40046 Operands[j].getOperand(0).getValueType() != VT)
40047 return SDValue();
40048 Operands[j] = Operands[j].getOperand(0);
40049 }
40050
40051 // The pattern is detected, emit X86ISD::AVG instruction(s).
40052 return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
40053 AVGBuilder);
40054 }
40055
40056 return SDValue();
40057}
40058
40059static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
40060 TargetLowering::DAGCombinerInfo &DCI,
40061 const X86Subtarget &Subtarget) {
40062 LoadSDNode *Ld = cast<LoadSDNode>(N);
40063 EVT RegVT = Ld->getValueType(0);
40064 EVT MemVT = Ld->getMemoryVT();
40065 SDLoc dl(Ld);
40066 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40067
40068 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
40069 // into two 16-byte operations. Also split non-temporal aligned loads on
40070 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
40071 ISD::LoadExtType Ext = Ld->getExtensionType();
40072 bool Fast;
40073 unsigned Alignment = Ld->getAlignment();
40074 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
40075 Ext == ISD::NON_EXTLOAD &&
40076 ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
40077 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
40078 *Ld->getMemOperand(), &Fast) &&
40079 !Fast))) {
40080 unsigned NumElems = RegVT.getVectorNumElements();
40081 if (NumElems < 2)
40082 return SDValue();
40083
40084 unsigned HalfAlign = 16;
40085 SDValue Ptr1 = Ld->getBasePtr();
40086 SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfAlign, dl);
40087 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
40088 NumElems / 2);
40089 SDValue Load1 =
40090 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
40091 Alignment, Ld->getMemOperand()->getFlags());
40092 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
40093 Ld->getPointerInfo().getWithOffset(HalfAlign),
40094 MinAlign(Alignment, HalfAlign),
40095 Ld->getMemOperand()->getFlags());
40096 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
40097 Load1.getValue(1), Load2.getValue(1));
40098
40099 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
40100 return DCI.CombineTo(N, NewVec, TF, true);
40101 }
40102
40103 // Bool vector load - attempt to cast to an integer, as we have good
40104 // (vXiY *ext(vXi1 bitcast(iX))) handling.
40105 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
40106 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
40107 unsigned NumElts = RegVT.getVectorNumElements();
40108 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
40109 if (TLI.isTypeLegal(IntVT)) {
40110 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
40111 Ld->getPointerInfo(), Alignment,
40112 Ld->getMemOperand()->getFlags());
40113 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
40114 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
40115 }
40116 }
40117
40118 return SDValue();
40119}
40120
40121/// If V is a build vector of boolean constants and exactly one of those
40122/// constants is true, return the operand index of that true element.
40123/// Otherwise, return -1.
40124static int getOneTrueElt(SDValue V) {
40125 // This needs to be a build vector of booleans.
40126 // TODO: Checking for the i1 type matches the IR definition for the mask,
40127 // but the mask check could be loosened to i8 or other types. That might
40128 // also require checking more than 'allOnesValue'; eg, the x86 HW
40129 // instructions only require that the MSB is set for each mask element.
40130 // The ISD::MSTORE comments/definition do not specify how the mask operand
40131 // is formatted.
40132 auto *BV = dyn_cast<BuildVectorSDNode>(V);
40133 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
40134 return -1;
40135
40136 int TrueIndex = -1;
40137 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
40138 for (unsigned i = 0; i < NumElts; ++i) {
40139 const SDValue &Op = BV->getOperand(i);
40140 if (Op.isUndef())
40141 continue;
40142 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
40143 if (!ConstNode)
40144 return -1;
40145 if (ConstNode->getAPIntValue().isAllOnesValue()) {
40146 // If we already found a one, this is too many.
40147 if (TrueIndex >= 0)
40148 return -1;
40149 TrueIndex = i;
40150 }
40151 }
40152 return TrueIndex;
40153}
40154
40155/// Given a masked memory load/store operation, return true if it has one mask
40156/// bit set. If it has one mask bit set, then also return the memory address of
40157/// the scalar element to load/store, the vector index to insert/extract that
40158/// scalar element, and the alignment for the scalar memory access.
40159static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
40160 SelectionDAG &DAG, SDValue &Addr,
40161 SDValue &Index, unsigned &Alignment) {
40162 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
40163 if (TrueMaskElt < 0)
40164 return false;
40165
40166 // Get the address of the one scalar element that is specified by the mask
40167 // using the appropriate offset from the base pointer.
40168 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
40169 Addr = MaskedOp->getBasePtr();
40170 if (TrueMaskElt != 0) {
40171 unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
40172 Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
40173 }
40174
40175 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
40176 Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
40177 return true;
40178}
40179
40180/// If exactly one element of the mask is set for a non-extending masked load,
40181/// it is a scalar load and vector insert.
40182/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
40183/// mask have already been optimized in IR, so we don't bother with those here.
40184static SDValue
40185reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
40186 TargetLowering::DAGCombinerInfo &DCI) {
40187 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
40188 // However, some target hooks may need to be added to know when the transform
40189 // is profitable. Endianness would also have to be considered.
40190
40191 SDValue Addr, VecIndex;
40192 unsigned Alignment;
40193 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
40194 return SDValue();
40195
40196 // Load the one scalar element that is specified by the mask using the
40197 // appropriate offset from the base pointer.
40198 SDLoc DL(ML);
40199 EVT VT = ML->getValueType(0);
40200 EVT EltVT = VT.getVectorElementType();
40201 SDValue Load =
40202 DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
40203 Alignment, ML->getMemOperand()->getFlags());
40204
40205 // Insert the loaded element into the appropriate place in the vector.
40206 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
40207 ML->getPassThru(), Load, VecIndex);
40208 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
40209}
40210
40211static SDValue
40212combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
40213 TargetLowering::DAGCombinerInfo &DCI) {
40214 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
40215 return SDValue();
40216
40217 SDLoc DL(ML);
40218 EVT VT = ML->getValueType(0);
40219
40220 // If we are loading the first and last elements of a vector, it is safe and
40221 // always faster to load the whole vector. Replace the masked load with a
40222 // vector load and select.
40223 unsigned NumElts = VT.getVectorNumElements();
40224 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
40225 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
40226 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
40227 if (LoadFirstElt && LoadLastElt) {
40228 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
40229 ML->getMemOperand());
40230 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
40231 ML->getPassThru());
40232 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
40233 }
40234
40235 // Convert a masked load with a constant mask into a masked load and a select.
40236 // This allows the select operation to use a faster kind of select instruction
40237 // (for example, vblendvps -> vblendps).
40238
40239 // Don't try this if the pass-through operand is already undefined. That would
40240 // cause an infinite loop because that's what we're about to create.
40241 if (ML->getPassThru().isUndef())
40242 return SDValue();
40243
40244 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
40245 return SDValue();
40246
40247 // The new masked load has an undef pass-through operand. The select uses the
40248 // original pass-through operand.
40249 SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
40250 ML->getMask(), DAG.getUNDEF(VT),
40251 ML->getMemoryVT(), ML->getMemOperand(),
40252 ML->getExtensionType());
40253 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
40254 ML->getPassThru());
40255
40256 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
40257}
40258
40259static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
40260 TargetLowering::DAGCombinerInfo &DCI,
40261 const X86Subtarget &Subtarget) {
40262 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
40263
40264 // TODO: Expanding load with constant mask may be optimized as well.
40265 if (Mld->isExpandingLoad())
40266 return SDValue();
40267
40268 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
40269 if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
40270 return ScalarLoad;
40271 // TODO: Do some AVX512 subsets benefit from this transform?
40272 if (!Subtarget.hasAVX512())
40273 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
40274 return Blend;
40275 }
40276
40277 return SDValue();
40278}
40279
40280/// If exactly one element of the mask is set for a non-truncating masked store,
40281/// it is a vector extract and scalar store.
40282/// Note: It is expected that the degenerate cases of an all-zeros or all-ones
40283/// mask have already been optimized in IR, so we don't bother with those here.
40284static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
40285 SelectionDAG &DAG) {
40286 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
40287 // However, some target hooks may need to be added to know when the transform
40288 // is profitable. Endianness would also have to be considered.
40289
40290 SDValue Addr, VecIndex;
40291 unsigned Alignment;
40292 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
40293 return SDValue();
40294
40295 // Extract the one scalar element that is actually being stored.
40296 SDLoc DL(MS);
40297 EVT VT = MS->getValue().getValueType();
40298 EVT EltVT = VT.getVectorElementType();
40299 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
40300 MS->getValue(), VecIndex);
40301
40302 // Store that element at the appropriate offset from the base pointer.
40303 return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
40304 Alignment, MS->getMemOperand()->getFlags());
40305}
40306
40307static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
40308 TargetLowering::DAGCombinerInfo &DCI,
40309 const X86Subtarget &Subtarget) {
40310 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
40311 if (Mst->isCompressingStore())
40312 return SDValue();
40313
40314 EVT VT = Mst->getValue().getValueType();
40315 SDLoc dl(Mst);
40316 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40317
40318 if (Mst->isTruncatingStore())
40319 return SDValue();
40320
40321 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
40322 return ScalarStore;
40323
40324 // If the mask value has been legalized to a non-boolean vector, try to
40325 // simplify ops leading up to it. We only demand the MSB of each lane.
40326 SDValue Mask = Mst->getMask();
40327 if (Mask.getScalarValueSizeInBits() != 1) {
40328 APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits()));
40329 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
40330 return SDValue(N, 0);
40331 }
40332
40333 SDValue Value = Mst->getValue();
40334 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
40335 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
40336 Mst->getMemoryVT())) {
40337 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
40338 Mst->getBasePtr(), Mask,
40339 Mst->getMemoryVT(), Mst->getMemOperand(), true);
40340 }
40341
40342 return SDValue();
40343}
40344
40345static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
40346 TargetLowering::DAGCombinerInfo &DCI,
40347 const X86Subtarget &Subtarget) {
40348 StoreSDNode *St = cast<StoreSDNode>(N);
40349 EVT StVT = St->getMemoryVT();
40350 SDLoc dl(St);
40351 unsigned Alignment = St->getAlignment();
40352 SDValue StoredVal = St->getValue();
40353 EVT VT = StoredVal.getValueType();
40354 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40355
40356 // Convert a store of vXi1 into a store of iX and a bitcast.
40357 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
40358 VT.getVectorElementType() == MVT::i1) {
40359
40360 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
40361 StoredVal = DAG.getBitcast(NewVT, StoredVal);
40362
40363 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
40364 St->getPointerInfo(), St->getAlignment(),
40365 St->getMemOperand()->getFlags());
40366 }
40367
40368 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
40369 // This will avoid a copy to k-register.
40370 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
40371 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
40372 StoredVal.getOperand(0).getValueType() == MVT::i8) {
40373 return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
40374 St->getBasePtr(), St->getPointerInfo(),
40375 St->getAlignment(), St->getMemOperand()->getFlags());
40376 }
40377
40378 // Widen v2i1/v4i1 stores to v8i1.
40379 if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
40380 Subtarget.hasAVX512()) {
40381 unsigned NumConcats = 8 / VT.getVectorNumElements();
40382 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
40383 Ops[0] = StoredVal;
40384 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
40385 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
40386 St->getPointerInfo(), St->getAlignment(),
40387 St->getMemOperand()->getFlags());
40388 }
40389
40390 // Turn vXi1 stores of constants into a scalar store.
40391 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
40392 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
40393 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
40394 // If its a v64i1 store without 64-bit support, we need two stores.
40395 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
40396 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
40397 StoredVal->ops().slice(0, 32));
40398 Lo = combinevXi1ConstantToInteger(Lo, DAG);
40399 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
40400 StoredVal->ops().slice(32, 32));
40401 Hi = combinevXi1ConstantToInteger(Hi, DAG);
40402
40403 SDValue Ptr0 = St->getBasePtr();
40404 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
40405
40406 SDValue Ch0 =
40407 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
40408 Alignment, St->getMemOperand()->getFlags());
40409 SDValue Ch1 =
40410 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
40411 St->getPointerInfo().getWithOffset(4),
40412 MinAlign(Alignment, 4U),
40413 St->getMemOperand()->getFlags());
40414 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
40415 }
40416
40417 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
40418 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
40419 St->getPointerInfo(), St->getAlignment(),
40420 St->getMemOperand()->getFlags());
40421 }
40422
40423 // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
40424 // Sandy Bridge, perform two 16-byte stores.
40425 bool Fast;
40426 if (VT.is256BitVector() && StVT == VT &&
40427 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
40428 *St->getMemOperand(), &Fast) &&
40429 !Fast) {
40430 unsigned NumElems = VT.getVectorNumElements();
40431 if (NumElems < 2)
40432 return SDValue();
40433
40434 return splitVectorStore(St, DAG);
40435 }
40436
40437 // Split under-aligned vector non-temporal stores.
40438 if (St->isNonTemporal() && StVT == VT && Alignment < VT.getStoreSize()) {
40439 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
40440 // vectors or the legalizer can scalarize it to use MOVNTI.
40441 if (VT.is256BitVector() || VT.is512BitVector()) {
40442 unsigned NumElems = VT.getVectorNumElements();
40443 if (NumElems < 2)
40444 return SDValue();
40445 return splitVectorStore(St, DAG);
40446 }
40447
40448 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
40449 // to use MOVNTI.
40450 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
40451 MVT NTVT = Subtarget.hasSSE4A()
40452 ? MVT::v2f64
40453 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
40454 return scalarizeVectorStore(St, NTVT, DAG);
40455 }
40456 }
40457
40458 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
40459 // supported, but avx512f is by extending to v16i32 and truncating.
40460 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
40461 St->getValue().getOpcode() == ISD::TRUNCATE &&
40462 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
40463 TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
40464 St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
40465 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
40466 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
40467 MVT::v16i8, St->getMemOperand());
40468 }
40469
40470 // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
40471 if (!St->isTruncatingStore() && StoredVal.hasOneUse() &&
40472 (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
40473 StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
40474 TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
40475 bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
40476 return EmitTruncSStore(IsSigned, St->getChain(),
40477 dl, StoredVal.getOperand(0), St->getBasePtr(),
40478 VT, St->getMemOperand(), DAG);
40479 }
40480
40481 // Optimize trunc store (of multiple scalars) to shuffle and store.
40482 // First, pack all of the elements in one place. Next, store to memory
40483 // in fewer chunks.
40484 if (St->isTruncatingStore() && VT.isVector()) {
40485 // Check if we can detect an AVG pattern from the truncation. If yes,
40486 // replace the trunc store by a normal store with the result of X86ISD::AVG
40487 // instruction.
40488 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
40489 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
40490 Subtarget, dl))
40491 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
40492 St->getPointerInfo(), St->getAlignment(),
40493 St->getMemOperand()->getFlags());
40494
40495 if (TLI.isTruncStoreLegal(VT, StVT)) {
40496 if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
40497 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
40498 dl, Val, St->getBasePtr(),
40499 St->getMemoryVT(), St->getMemOperand(), DAG);
40500 if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
40501 DAG, dl))
40502 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
40503 dl, Val, St->getBasePtr(),
40504 St->getMemoryVT(), St->getMemOperand(), DAG);
40505 }
40506
40507 return SDValue();
40508 }
40509
40510 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
40511 // the FP state in cases where an emms may be missing.
40512 // A preferable solution to the general problem is to figure out the right
40513 // places to insert EMMS. This qualifies as a quick hack.
40514
40515 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
40516 if (VT.getSizeInBits() != 64)
40517 return SDValue();
40518
40519 const Function &F = DAG.getMachineFunction().getFunction();
40520 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
40521 bool F64IsLegal =
40522 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
40523 if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
40524 isa<LoadSDNode>(St->getValue()) &&
40525 cast<LoadSDNode>(St->getValue())->isSimple() &&
40526 St->getChain().hasOneUse() && St->isSimple()) {
40527 LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
40528 SmallVector<SDValue, 8> Ops;
40529
40530 if (!ISD::isNormalLoad(Ld))
40531 return SDValue();
40532
40533 // If this is not the MMX case, i.e. we are just turning i64 load/store
40534 // into f64 load/store, avoid the transformation if there are multiple
40535 // uses of the loaded value.
40536 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
40537 return SDValue();
40538
40539 SDLoc LdDL(Ld);
40540 SDLoc StDL(N);
40541 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
40542 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
40543 // pair instead.
40544 if (Subtarget.is64Bit() || F64IsLegal) {
40545 MVT LdVT = Subtarget.is64Bit() ? MVT::i64 : MVT::f64;
40546 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
40547 Ld->getMemOperand());
40548
40549 // Make sure new load is placed in same chain order.
40550 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
40551 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
40552 St->getMemOperand());
40553 }
40554
40555 // Otherwise, lower to two pairs of 32-bit loads / stores.
40556 SDValue LoAddr = Ld->getBasePtr();
40557 SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL);
40558
40559 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
40560 Ld->getPointerInfo(), Ld->getAlignment(),
40561 Ld->getMemOperand()->getFlags());
40562 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
40563 Ld->getPointerInfo().getWithOffset(4),
40564 MinAlign(Ld->getAlignment(), 4),
40565 Ld->getMemOperand()->getFlags());
40566 // Make sure new loads are placed in same chain order.
40567 DAG.makeEquivalentMemoryOrdering(Ld, LoLd);
40568 DAG.makeEquivalentMemoryOrdering(Ld, HiLd);
40569
40570 LoAddr = St->getBasePtr();
40571 HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL);
40572
40573 SDValue LoSt =
40574 DAG.getStore(St->getChain(), StDL, LoLd, LoAddr, St->getPointerInfo(),
40575 St->getAlignment(), St->getMemOperand()->getFlags());
40576 SDValue HiSt = DAG.getStore(St->getChain(), StDL, HiLd, HiAddr,
40577 St->getPointerInfo().getWithOffset(4),
40578 MinAlign(St->getAlignment(), 4),
40579 St->getMemOperand()->getFlags());
40580 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
40581 }
40582
40583 // This is similar to the above case, but here we handle a scalar 64-bit
40584 // integer store that is extracted from a vector on a 32-bit target.
40585 // If we have SSE2, then we can treat it like a floating-point double
40586 // to get past legalization. The execution dependencies fixup pass will
40587 // choose the optimal machine instruction for the store if this really is
40588 // an integer or v2f32 rather than an f64.
40589 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
40590 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
40591 SDValue OldExtract = St->getOperand(1);
40592 SDValue ExtOp0 = OldExtract.getOperand(0);
40593 unsigned VecSize = ExtOp0.getValueSizeInBits();
40594 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
40595 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
40596 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
40597 BitCast, OldExtract.getOperand(1));
40598 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
40599 St->getPointerInfo(), St->getAlignment(),
40600 St->getMemOperand()->getFlags());
40601 }
40602
40603 return SDValue();
40604}
40605
40606/// Return 'true' if this vector operation is "horizontal"
40607/// and return the operands for the horizontal operation in LHS and RHS. A
40608/// horizontal operation performs the binary operation on successive elements
40609/// of its first operand, then on successive elements of its second operand,
40610/// returning the resulting values in a vector. For example, if
40611/// A = < float a0, float a1, float a2, float a3 >
40612/// and
40613/// B = < float b0, float b1, float b2, float b3 >
40614/// then the result of doing a horizontal operation on A and B is
40615/// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
40616/// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
40617/// A horizontal-op B, for some already available A and B, and if so then LHS is
40618/// set to A, RHS to B, and the routine returns 'true'.
40619static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
40620 const X86Subtarget &Subtarget,
40621 bool IsCommutative) {
40622 // If either operand is undef, bail out. The binop should be simplified.
40623 if (LHS.isUndef() || RHS.isUndef())
40624 return false;
40625
40626 // Look for the following pattern:
40627 // A = < float a0, float a1, float a2, float a3 >
40628 // B = < float b0, float b1, float b2, float b3 >
40629 // and
40630 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
40631 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
40632 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
40633 // which is A horizontal-op B.
40634
40635 MVT VT = LHS.getSimpleValueType();
40636 assert((VT.is128BitVector() || VT.is256BitVector()) &&(((VT.is128BitVector() || VT.is256BitVector()) && "Unsupported vector type for horizontal add/sub"
) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector()) && \"Unsupported vector type for horizontal add/sub\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 40637, __PRETTY_FUNCTION__))
40637 "Unsupported vector type for horizontal add/sub")(((VT.is128BitVector() || VT.is256BitVector()) && "Unsupported vector type for horizontal add/sub"
) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector()) && \"Unsupported vector type for horizontal add/sub\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 40637, __PRETTY_FUNCTION__))
;
40638 unsigned NumElts = VT.getVectorNumElements();
40639
40640 // TODO - can we make a general helper method that does all of this for us?
40641 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
40642 SmallVectorImpl<int> &ShuffleMask) {
40643 if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
40644 if (!Op.getOperand(0).isUndef())
40645 N0 = Op.getOperand(0);
40646 if (!Op.getOperand(1).isUndef())
40647 N1 = Op.getOperand(1);
40648 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
40649 ShuffleMask.append(Mask.begin(), Mask.end());
40650 return;
40651 }
40652 bool UseSubVector = false;
40653 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40654 Op.getOperand(0).getValueType().is256BitVector() &&
40655 llvm::isNullConstant(Op.getOperand(1))) {
40656 Op = Op.getOperand(0);
40657 UseSubVector = true;
40658 }
40659 bool IsUnary;
40660 SmallVector<SDValue, 2> SrcOps;
40661 SmallVector<int, 16> SrcShuffleMask;
40662 SDValue BC = peekThroughBitcasts(Op);
40663 if (isTargetShuffle(BC.getOpcode()) &&
40664 getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
40665 SrcOps, SrcShuffleMask, IsUnary)) {
40666 if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
40667 SrcOps.size() <= 2) {
40668 N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
40669 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
40670 ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
40671 }
40672 if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
40673 SrcOps.size() == 1) {
40674 N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
40675 N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
40676 ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
40677 ShuffleMask.append(Mask.begin(), Mask.end());
40678 }
40679 }
40680 };
40681
40682 // View LHS in the form
40683 // LHS = VECTOR_SHUFFLE A, B, LMask
40684 // If LHS is not a shuffle, then pretend it is the identity shuffle:
40685 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
40686 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
40687 SDValue A, B;
40688 SmallVector<int, 16> LMask;
40689 GetShuffle(LHS, A, B, LMask);
40690
40691 // Likewise, view RHS in the form
40692 // RHS = VECTOR_SHUFFLE C, D, RMask
40693 SDValue C, D;
40694 SmallVector<int, 16> RMask;
40695 GetShuffle(RHS, C, D, RMask);
40696
40697 // At least one of the operands should be a vector shuffle.
40698 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
40699 if (NumShuffles == 0)
40700 return false;
40701
40702 if (LMask.empty()) {
40703 A = LHS;
40704 for (unsigned i = 0; i != NumElts; ++i)
40705 LMask.push_back(i);
40706 }
40707
40708 if (RMask.empty()) {
40709 C = RHS;
40710 for (unsigned i = 0; i != NumElts; ++i)
40711 RMask.push_back(i);
40712 }
40713
40714 // If A and B occur in reverse order in RHS, then canonicalize by commuting
40715 // RHS operands and shuffle mask.
40716 if (A != C) {
40717 std::swap(C, D);
40718 ShuffleVectorSDNode::commuteMask(RMask);
40719 }
40720 // Check that the shuffles are both shuffling the same vectors.
40721 if (!(A == C && B == D))
40722 return false;
40723
40724 // LHS and RHS are now:
40725 // LHS = shuffle A, B, LMask
40726 // RHS = shuffle A, B, RMask
40727 // Check that the masks correspond to performing a horizontal operation.
40728 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
40729 // so we just repeat the inner loop if this is a 256-bit op.
40730 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
40731 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
40732 assert((NumEltsPer128BitChunk % 2 == 0) &&(((NumEltsPer128BitChunk % 2 == 0) && "Vector type should have an even number of elements in each lane"
) ? static_cast<void> (0) : __assert_fail ("(NumEltsPer128BitChunk % 2 == 0) && \"Vector type should have an even number of elements in each lane\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 40733, __PRETTY_FUNCTION__))
40733 "Vector type should have an even number of elements in each lane")(((NumEltsPer128BitChunk % 2 == 0) && "Vector type should have an even number of elements in each lane"
) ? static_cast<void> (0) : __assert_fail ("(NumEltsPer128BitChunk % 2 == 0) && \"Vector type should have an even number of elements in each lane\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 40733, __PRETTY_FUNCTION__))
;
40734 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
40735 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
40736 // Ignore undefined components.
40737 int LIdx = LMask[i + j], RIdx = RMask[i + j];
40738 if (LIdx < 0 || RIdx < 0 ||
40739 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
40740 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
40741 continue;
40742
40743 // The low half of the 128-bit result must choose from A.
40744 // The high half of the 128-bit result must choose from B,
40745 // unless B is undef. In that case, we are always choosing from A.
40746 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
40747 unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0;
40748
40749 // Check that successive elements are being operated on. If not, this is
40750 // not a horizontal operation.
40751 int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j;
40752 if (!(LIdx == Index && RIdx == Index + 1) &&
40753 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
40754 return false;
40755 }
40756 }
40757
40758 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
40759 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
40760
40761 if (!shouldUseHorizontalOp(LHS == RHS && NumShuffles < 2, DAG, Subtarget))
40762 return false;
40763
40764 LHS = DAG.getBitcast(VT, LHS);
40765 RHS = DAG.getBitcast(VT, RHS);
40766 return true;
40767}
40768
40769/// Do target-specific dag combines on floating-point adds/subs.
40770static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
40771 const X86Subtarget &Subtarget) {
40772 EVT VT = N->getValueType(0);
40773 SDValue LHS = N->getOperand(0);
40774 SDValue RHS = N->getOperand(1);
40775 bool IsFadd = N->getOpcode() == ISD::FADD;
40776 auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
40777 assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode")(((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode"
) ? static_cast<void> (0) : __assert_fail ("(IsFadd || N->getOpcode() == ISD::FSUB) && \"Wrong opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 40777, __PRETTY_FUNCTION__))
;
40778
40779 // Try to synthesize horizontal add/sub from adds/subs of shuffles.
40780 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
40781 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
40782 isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd))
40783 return DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
40784
40785 return SDValue();
40786}
40787
40788/// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
40789/// the codegen.
40790/// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
40791/// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
40792/// anything that is guaranteed to be transformed by DAGCombiner.
40793static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
40794 const X86Subtarget &Subtarget,
40795 const SDLoc &DL) {
40796 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode")((N->getOpcode() == ISD::TRUNCATE && "Wrong opcode"
) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::TRUNCATE && \"Wrong opcode\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 40796, __PRETTY_FUNCTION__))
;
40797 SDValue Src = N->getOperand(0);
40798 unsigned SrcOpcode = Src.getOpcode();
40799 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40800
40801 EVT VT = N->getValueType(0);
40802 EVT SrcVT = Src.getValueType();
40803
40804 auto IsFreeTruncation = [VT](SDValue Op) {
40805 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
40806
40807 // See if this has been extended from a smaller/equal size to
40808 // the truncation size, allowing a truncation to combine with the extend.
40809 unsigned Opcode = Op.getOpcode();
40810 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
40811 Opcode == ISD::ZERO_EXTEND) &&
40812 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
40813 return true;
40814
40815 // See if this is a single use constant which can be constant folded.
40816 // NOTE: We don't peek throught bitcasts here because there is currently
40817 // no support for constant folding truncate+bitcast+vector_of_constants. So
40818 // we'll just send up with a truncate on both operands which will
40819 // get turned back into (truncate (binop)) causing an infinite loop.
40820 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
40821 };
40822
40823 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
40824 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
40825 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
40826 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
40827 };
40828
40829 // Don't combine if the operation has other uses.
40830 if (!Src.hasOneUse())
40831 return SDValue();
40832
40833 // Only support vector truncation for now.
40834 // TODO: i64 scalar math would benefit as well.
40835 if (!VT.isVector())
40836 return SDValue();
40837
40838 // In most cases its only worth pre-truncating if we're only facing the cost
40839 // of one truncation.
40840 // i.e. if one of the inputs will constant fold or the input is repeated.
40841 switch (SrcOpcode) {
40842 case ISD::AND:
40843 case ISD::XOR:
40844 case ISD::OR: {
40845 SDValue Op0 = Src.getOperand(0);
40846 SDValue Op1 = Src.getOperand(1);
40847 if (TLI.isOperationLegalOrPromote(SrcOpcode, VT) &&
40848 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
40849 return TruncateArithmetic(Op0, Op1);
40850 break;
40851 }
40852
40853 case ISD::MUL:
40854 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
40855 // better to truncate if we have the chance.
40856 if (SrcVT.getScalarType() == MVT::i64 &&
40857 TLI.isOperationLegal(SrcOpcode, VT) &&
40858 !TLI.isOperationLegal(SrcOpcode, SrcVT))
40859 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
40860 LLVM_FALLTHROUGH[[gnu::fallthrough]];
40861 case ISD::ADD: {
40862 SDValue Op0 = Src.getOperand(0);
40863 SDValue Op1 = Src.getOperand(1);
40864 if (TLI.isOperationLegal(SrcOpcode, VT) &&
40865 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
40866 return TruncateArithmetic(Op0, Op1);
40867 break;
40868 }
40869 case ISD::SUB: {
40870 // TODO: ISD::SUB We are conservative and require both sides to be freely
40871 // truncatable to avoid interfering with combineSubToSubus.
40872 SDValue Op0 = Src.getOperand(0);
40873 SDValue Op1 = Src.getOperand(1);
40874 if (TLI.isOperationLegal(SrcOpcode, VT) &&
40875 (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1))))
40876 return TruncateArithmetic(Op0, Op1);
40877 break;
40878 }
40879 }
40880
40881 return SDValue();
40882}
40883
40884/// Truncate using ISD::AND mask and X86ISD::PACKUS.
40885/// e.g. trunc <8 x i32> X to <8 x i16> -->
40886/// MaskX = X & 0xffff (clear high bits to prevent saturation)
40887/// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
40888static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
40889 const X86Subtarget &Subtarget,
40890 SelectionDAG &DAG) {
40891 SDValue In = N->getOperand(0);
40892 EVT InVT = In.getValueType();
40893 EVT OutVT = N->getValueType(0);
40894
40895 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
40896 OutVT.getScalarSizeInBits());
40897 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
40898 return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
40899}
40900
40901/// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
40902static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
40903 const X86Subtarget &Subtarget,
40904 SelectionDAG &DAG) {
40905 SDValue In = N->getOperand(0);
40906 EVT InVT = In.getValueType();
40907 EVT OutVT = N->getValueType(0);
40908 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
40909 DAG.getValueType(OutVT));
40910 return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
40911}
40912
40913/// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
40914/// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
40915/// legalization the truncation will be translated into a BUILD_VECTOR with each
40916/// element that is extracted from a vector and then truncated, and it is
40917/// difficult to do this optimization based on them.
40918static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
40919 const X86Subtarget &Subtarget) {
40920 EVT OutVT = N->getValueType(0);
40921 if (!OutVT.isVector())
40922 return SDValue();
40923
40924 SDValue In = N->getOperand(0);
40925 if (!In.getValueType().isSimple())
40926 return SDValue();
40927
40928 EVT InVT = In.getValueType();
40929 unsigned NumElems = OutVT.getVectorNumElements();
40930
40931 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
40932 // SSE2, and we need to take care of it specially.
40933 // AVX512 provides vpmovdb.
40934 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
40935 return SDValue();
40936
40937 EVT OutSVT = OutVT.getVectorElementType();
40938 EVT InSVT = InVT.getVectorElementType();
40939 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
40940 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
40941 NumElems >= 8))
40942 return SDValue();
40943
40944 // SSSE3's pshufb results in less instructions in the cases below.
40945 if (Subtarget.hasSSSE3() && NumElems == 8 &&
40946 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
40947 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
40948 return SDValue();
40949
40950 SDLoc DL(N);
40951 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
40952 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
40953 // truncate 2 x v4i32 to v8i16.
40954 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
40955 return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
40956 if (InSVT == MVT::i32)
40957 return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
40958
40959 return SDValue();
40960}
40961
40962/// This function transforms vector truncation of 'extended sign-bits' or
40963/// 'extended zero-bits' values.
40964/// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
40965static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
40966 SelectionDAG &DAG,
40967 const X86Subtarget &Subtarget) {
40968 // Requires SSE2.
40969 if (!Subtarget.hasSSE2())
40970 return SDValue();
40971
40972 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
40973 return SDValue();
40974
40975 SDValue In = N->getOperand(0);
40976 if (!In.getValueType().isSimple())
40977 return SDValue();
40978
40979 MVT VT = N->getValueType(0).getSimpleVT();
40980 MVT SVT = VT.getScalarType();
40981
40982 MVT InVT = In.getValueType().getSimpleVT();
40983 MVT InSVT = InVT.getScalarType();
40984
40985 // Check we have a truncation suited for PACKSS/PACKUS.
40986 if (!VT.is128BitVector() && !VT.is256BitVector())
40987 return SDValue();
40988 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
40989 return SDValue();
40990 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
40991 return SDValue();
40992
40993 // AVX512 has fast truncate, but if the input is already going to be split,
40994 // there's no harm in trying pack.
40995 if (Subtarget.hasAVX512() &&
40996 !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
40997 InVT.is512BitVector()))
40998 return SDValue();
40999
41000 unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
41001 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
41002
41003 // Use PACKUS if the input has zero-bits that extend all the way to the
41004 // packed/truncated value. e.g. masks, zext_in_reg, etc.
41005 KnownBits Known = DAG.computeKnownBits(In);
41006 unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
41007 if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
41008 return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
41009
41010 // Use PACKSS if the input has sign-bits that extend all the way to the
41011 // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
41012 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
41013 if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
41014 return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
41015
41016 return SDValue();
41017}
41018
41019// Try to form a MULHU or MULHS node by looking for
41020// (trunc (srl (mul ext, ext), 16))
41021// TODO: This is X86 specific because we want to be able to handle wide types
41022// before type legalization. But we can only do it if the vector will be
41023// legalized via widening/splitting. Type legalization can't handle promotion
41024// of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
41025// combiner.
41026static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
41027 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
41028 // First instruction should be a right shift of a multiply.
41029 if (Src.getOpcode() != ISD::SRL ||
41030 Src.getOperand(0).getOpcode() != ISD::MUL)
41031 return SDValue();
41032
41033 if (!Subtarget.hasSSE2())
41034 return SDValue();
41035
41036 // Only handle vXi16 types that are at least 128-bits unless they will be
41037 // widened.
41038 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
41039 return SDValue();
41040
41041 // Input type should be vXi32.
41042 EVT InVT = Src.getValueType();
41043 if (InVT.getVectorElementType() != MVT::i32)
41044 return SDValue();
41045
41046 // Need a shift by 16.
41047 APInt ShiftAmt;
41048 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
41049 ShiftAmt != 16)
41050 return SDValue();
41051
41052 SDValue LHS = Src.getOperand(0).getOperand(0);
41053 SDValue RHS = Src.getOperand(0).getOperand(1);
41054
41055 unsigned ExtOpc = LHS.getOpcode();
41056 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
41057 RHS.getOpcode() != ExtOpc)
41058 return SDValue();
41059
41060 // Peek through the extends.
41061 LHS = LHS.getOperand(0);
41062 RHS = RHS.getOperand(0);
41063
41064 // Ensure the input types match.
41065 if (LHS.getValueType() != VT || RHS.getValueType() != VT)
41066 return SDValue();
41067
41068 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
41069 return DAG.getNode(Opc, DL, VT, LHS, RHS);
41070}
41071
41072// Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
41073// from one vector with signed bytes from another vector, adds together
41074// adjacent pairs of 16-bit products, and saturates the result before
41075// truncating to 16-bits.
41076//
41077// Which looks something like this:
41078// (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
41079// (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
41080static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
41081 const X86Subtarget &Subtarget,
41082 const SDLoc &DL) {
41083 if (!VT.isVector() || !Subtarget.hasSSSE3())
41084 return SDValue();
41085
41086 unsigned NumElems = VT.getVectorNumElements();
41087 EVT ScalarVT = VT.getVectorElementType();
41088 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
41089 return SDValue();
41090
41091 SDValue SSatVal = detectSSatPattern(In, VT);
41092 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
41093 return SDValue();
41094
41095 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
41096 // of multiplies from even/odd elements.
41097 SDValue N0 = SSatVal.getOperand(0);
41098 SDValue N1 = SSatVal.getOperand(1);
41099
41100 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
41101 return SDValue();
41102
41103 SDValue N00 = N0.getOperand(0);
41104 SDValue N01 = N0.getOperand(1);
41105 SDValue N10 = N1.getOperand(0);
41106 SDValue N11 = N1.getOperand(1);
41107
41108 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
41109 // Canonicalize zero_extend to LHS.
41110 if (N01.getOpcode() == ISD::ZERO_EXTEND)
41111 std::swap(N00, N01);
41112 if (N11.getOpcode() == ISD::ZERO_EXTEND)
41113 std::swap(N10, N11);
41114
41115 // Ensure we have a zero_extend and a sign_extend.
41116 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
41117 N01.getOpcode() != ISD::SIGN_EXTEND ||
41118 N10.getOpcode() != ISD::ZERO_EXTEND ||
41119 N11.getOpcode() != ISD::SIGN_EXTEND)
41120 return SDValue();
41121
41122 // Peek through the extends.
41123 N00 = N00.getOperand(0);
41124 N01 = N01.getOperand(0);
41125 N10 = N10.getOperand(0);
41126 N11 = N11.getOperand(0);
41127
41128 // Ensure the extend is from vXi8.
41129 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
41130 N01.getValueType().getVectorElementType() != MVT::i8 ||
41131 N10.getValueType().getVectorElementType() != MVT::i8 ||
41132 N11.getValueType().getVectorElementType() != MVT::i8)
41133 return SDValue();
41134
41135 // All inputs should be build_vectors.
41136 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
41137 N01.getOpcode() != ISD::BUILD_VECTOR ||
41138 N10.getOpcode() != ISD::BUILD_VECTOR ||
41139 N11.getOpcode() != ISD::BUILD_VECTOR)
41140 return SDValue();
41141
41142 // N00/N10 are zero extended. N01/N11 are sign extended.
41143
41144 // For each element, we need to ensure we have an odd element from one vector
41145 // multiplied by the odd element of another vector and the even element from
41146 // one of the same vectors being multiplied by the even element from the
41147 // other vector. So we need to make sure for each element i, this operator
41148 // is being performed:
41149 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
41150 SDValue ZExtIn, SExtIn;
41151 for (unsigned i = 0; i != NumElems; ++i) {
41152 SDValue N00Elt = N00.getOperand(i);
41153 SDValue N01Elt = N01.getOperand(i);
41154 SDValue N10Elt = N10.getOperand(i);
41155 SDValue N11Elt = N11.getOperand(i);
41156 // TODO: Be more tolerant to undefs.
41157 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
41158 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
41159 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
41160 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
41161 return SDValue();
41162 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
41163 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
41164 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
41165 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
41166 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
41167 return SDValue();
41168 unsigned IdxN00 = ConstN00Elt->getZExtValue();
41169 unsigned IdxN01 = ConstN01Elt->getZExtValue();
41170 unsigned IdxN10 = ConstN10Elt->getZExtValue();
41171 unsigned IdxN11 = ConstN11Elt->getZExtValue();
41172 // Add is commutative so indices can be reordered.
41173 if (IdxN00 > IdxN10) {
41174 std::swap(IdxN00, IdxN10);
41175 std::swap(IdxN01, IdxN11);
41176 }
41177 // N0 indices be the even element. N1 indices must be the next odd element.
41178 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
41179 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
41180 return SDValue();
41181 SDValue N00In = N00Elt.getOperand(0);
41182 SDValue N01In = N01Elt.getOperand(0);
41183 SDValue N10In = N10Elt.getOperand(0);
41184 SDValue N11In = N11Elt.getOperand(0);
41185 // First time we find an input capture it.
41186 if (!ZExtIn) {
41187 ZExtIn = N00In;
41188 SExtIn = N01In;
41189 }
41190 if (ZExtIn != N00In || SExtIn != N01In ||
41191 ZExtIn != N10In || SExtIn != N11In)
41192 return SDValue();
41193 }
41194
41195 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41196 ArrayRef<SDValue> Ops) {
41197 // Shrink by adding truncate nodes and let DAGCombine fold with the
41198 // sources.
41199 EVT InVT = Ops[0].getValueType();
41200 assert(InVT.getScalarType() == MVT::i8 &&((InVT.getScalarType() == MVT::i8 && "Unexpected scalar element type"
) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i8 && \"Unexpected scalar element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41201, __PRETTY_FUNCTION__))
41201 "Unexpected scalar element type")((InVT.getScalarType() == MVT::i8 && "Unexpected scalar element type"
) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i8 && \"Unexpected scalar element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41201, __PRETTY_FUNCTION__))
;
41202 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch")((InVT == Ops[1].getValueType() && "Operands' types mismatch"
) ? static_cast<void> (0) : __assert_fail ("InVT == Ops[1].getValueType() && \"Operands' types mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41202, __PRETTY_FUNCTION__))
;
41203 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
41204 InVT.getVectorNumElements() / 2);
41205 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
41206 };
41207 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
41208 PMADDBuilder);
41209}
41210
41211static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
41212 const X86Subtarget &Subtarget) {
41213 EVT VT = N->getValueType(0);
41214 SDValue Src = N->getOperand(0);
41215 SDLoc DL(N);
41216
41217 // Attempt to pre-truncate inputs to arithmetic ops instead.
41218 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
41219 return V;
41220
41221 // Try to detect AVG pattern first.
41222 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
41223 return Avg;
41224
41225 // Try to detect PMADD
41226 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
41227 return PMAdd;
41228
41229 // Try to combine truncation with signed/unsigned saturation.
41230 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
41231 return Val;
41232
41233 // Try to combine PMULHUW/PMULHW for vXi16.
41234 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
41235 return V;
41236
41237 // The bitcast source is a direct mmx result.
41238 // Detect bitcasts between i32 to x86mmx
41239 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
41240 SDValue BCSrc = Src.getOperand(0);
41241 if (BCSrc.getValueType() == MVT::x86mmx)
41242 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
41243 }
41244
41245 // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
41246 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
41247 return V;
41248
41249 return combineVectorTruncation(N, DAG, Subtarget);
41250}
41251
41252static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG) {
41253 EVT VT = N->getValueType(0);
41254 SDValue In = N->getOperand(0);
41255 SDLoc DL(N);
41256
41257 if (auto SSatVal = detectSSatPattern(In, VT))
41258 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
41259 if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
41260 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
41261
41262 return SDValue();
41263}
41264
41265/// Returns the negated value if the node \p N flips sign of FP value.
41266///
41267/// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
41268/// or FSUB(0, x)
41269/// AVX512F does not have FXOR, so FNEG is lowered as
41270/// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
41271/// In this case we go though all bitcasts.
41272/// This also recognizes splat of a negated value and returns the splat of that
41273/// value.
41274static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
41275 if (N->getOpcode() == ISD::FNEG)
41276 return N->getOperand(0);
41277
41278 // Don't recurse exponentially.
41279 if (Depth > SelectionDAG::MaxRecursionDepth)
41280 return SDValue();
41281
41282 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
41283
41284 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
41285 EVT VT = Op->getValueType(0);
41286 // Make sure the element size does't change.
41287 if (VT.getScalarSizeInBits() != ScalarSize)
41288 return SDValue();
41289
41290 if (auto SVOp = dyn_cast<ShuffleVectorSDNode>(Op.getNode())) {
41291 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
41292 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
41293 if (!SVOp->getOperand(1).isUndef())
41294 return SDValue();
41295 if (SDValue NegOp0 = isFNEG(DAG, SVOp->getOperand(0).getNode(), Depth + 1))
41296 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
41297 return DAG.getVectorShuffle(VT, SDLoc(SVOp), NegOp0, DAG.getUNDEF(VT),
41298 SVOp->getMask());
41299 return SDValue();
41300 }
41301 unsigned Opc = Op.getOpcode();
41302 if (Opc == ISD::INSERT_VECTOR_ELT) {
41303 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
41304 // -V, INDEX).
41305 SDValue InsVector = Op.getOperand(0);
41306 SDValue InsVal = Op.getOperand(1);
41307 if (!InsVector.isUndef())
41308 return SDValue();
41309 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
41310 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
41311 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
41312 NegInsVal, Op.getOperand(2));
41313 return SDValue();
41314 }
41315
41316 if (Opc != X86ISD::FXOR && Opc != ISD::XOR && Opc != ISD::FSUB)
41317 return SDValue();
41318
41319 SDValue Op1 = Op.getOperand(1);
41320 SDValue Op0 = Op.getOperand(0);
41321
41322 // For XOR and FXOR, we want to check if constant bits of Op1 are sign bit
41323 // masks. For FSUB, we have to check if constant bits of Op0 are sign bit
41324 // masks and hence we swap the operands.
41325 if (Opc == ISD::FSUB)
41326 std::swap(Op0, Op1);
41327
41328 APInt UndefElts;
41329 SmallVector<APInt, 16> EltBits;
41330 // Extract constant bits and see if they are all sign bit masks. Ignore the
41331 // undef elements.
41332 if (getTargetConstantBitsFromNode(Op1, ScalarSize,
41333 UndefElts, EltBits,
41334 /* AllowWholeUndefs */ true,
41335 /* AllowPartialUndefs */ false)) {
41336 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
41337 if (!UndefElts[I] && !EltBits[I].isSignMask())
41338 return SDValue();
41339
41340 return peekThroughBitcasts(Op0);
41341 }
41342
41343 return SDValue();
41344}
41345
41346static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
41347 bool NegRes) {
41348 if (NegMul) {
41349 switch (Opcode) {
41350 default: llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41350)
;
41351 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
41352 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
41353 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
41354 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
41355 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
41356 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
41357 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
41358 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
41359 }
41360 }
41361
41362 if (NegAcc) {
41363 switch (Opcode) {
41364 default: llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41364)
;
41365 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
41366 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
41367 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
41368 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
41369 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
41370 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
41371 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
41372 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
41373 case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break;
41374 case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
41375 case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break;
41376 case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
41377 }
41378 }
41379
41380 if (NegRes) {
41381 switch (Opcode) {
41382 default: llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41382)
;
41383 case ISD::FMA: Opcode = X86ISD::FNMSUB; break;
41384 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
41385 case X86ISD::FMSUB: Opcode = X86ISD::FNMADD; break;
41386 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
41387 case X86ISD::FNMADD: Opcode = X86ISD::FMSUB; break;
41388 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
41389 case X86ISD::FNMSUB: Opcode = ISD::FMA; break;
41390 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
41391 }
41392 }
41393
41394 return Opcode;
41395}
41396
41397/// Do target-specific dag combines on floating point negations.
41398static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
41399 const X86Subtarget &Subtarget) {
41400 EVT OrigVT = N->getValueType(0);
41401 SDValue Arg = isFNEG(DAG, N);
41402 if (!Arg)
41403 return SDValue();
41404
41405 EVT VT = Arg.getValueType();
41406 EVT SVT = VT.getScalarType();
41407 SDLoc DL(N);
41408
41409 // Let legalize expand this if it isn't a legal type yet.
41410 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
41411 return SDValue();
41412
41413 // If we're negating a FMUL node on a target with FMA, then we can avoid the
41414 // use of a constant by performing (-0 - A*B) instead.
41415 // FIXME: Check rounding control flags as well once it becomes available.
41416 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
41417 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
41418 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
41419 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
41420 Arg.getOperand(1), Zero);
41421 return DAG.getBitcast(OrigVT, NewNode);
41422 }
41423
41424 // If we're negating an FMA node, then we can adjust the
41425 // instruction to include the extra negation.
41426 if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) {
41427 switch (Arg.getOpcode()) {
41428 case ISD::FMA:
41429 case X86ISD::FMSUB:
41430 case X86ISD::FNMADD:
41431 case X86ISD::FNMSUB:
41432 case X86ISD::FMADD_RND:
41433 case X86ISD::FMSUB_RND:
41434 case X86ISD::FNMADD_RND:
41435 case X86ISD::FNMSUB_RND: {
41436 // We can't handle scalar intrinsic node here because it would only
41437 // invert one element and not the whole vector. But we could try to handle
41438 // a negation of the lower element only.
41439 unsigned NewOpcode = negateFMAOpcode(Arg.getOpcode(), false, false, true);
41440 return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT, Arg->ops()));
41441 }
41442 }
41443 }
41444
41445 return SDValue();
41446}
41447
41448char X86TargetLowering::isNegatibleForFree(SDValue Op, SelectionDAG &DAG,
41449 bool LegalOperations,
41450 bool ForCodeSize,
41451 unsigned Depth) const {
41452 // fneg patterns are removable even if they have multiple uses.
41453 if (isFNEG(DAG, Op.getNode(), Depth))
41454 return 2;
41455
41456 // Don't recurse exponentially.
41457 if (Depth > SelectionDAG::MaxRecursionDepth)
41458 return 0;
41459
41460 EVT VT = Op.getValueType();
41461 EVT SVT = VT.getScalarType();
41462 switch (Op.getOpcode()) {
41463 case ISD::FMA:
41464 case X86ISD::FMSUB:
41465 case X86ISD::FNMADD:
41466 case X86ISD::FNMSUB:
41467 case X86ISD::FMADD_RND:
41468 case X86ISD::FMSUB_RND:
41469 case X86ISD::FNMADD_RND:
41470 case X86ISD::FNMSUB_RND: {
41471 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
41472 !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
41473 break;
41474
41475 // This is always negatible for free but we might be able to remove some
41476 // extra operand negations as well.
41477 for (int i = 0; i != 3; ++i) {
41478 char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
41479 ForCodeSize, Depth + 1);
41480 if (V == 2)
41481 return V;
41482 }
41483 return 1;
41484 }
41485 }
41486
41487 return TargetLowering::isNegatibleForFree(Op, DAG, LegalOperations,
41488 ForCodeSize, Depth);
41489}
41490
41491SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
41492 bool LegalOperations,
41493 bool ForCodeSize,
41494 unsigned Depth) const {
41495 // fneg patterns are removable even if they have multiple uses.
41496 if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth))
41497 return DAG.getBitcast(Op.getValueType(), Arg);
41498
41499 EVT VT = Op.getValueType();
41500 EVT SVT = VT.getScalarType();
41501 unsigned Opc = Op.getOpcode();
41502 switch (Opc) {
41503 case ISD::FMA:
41504 case X86ISD::FMSUB:
41505 case X86ISD::FNMADD:
41506 case X86ISD::FNMSUB:
41507 case X86ISD::FMADD_RND:
41508 case X86ISD::FMSUB_RND:
41509 case X86ISD::FNMADD_RND:
41510 case X86ISD::FNMSUB_RND: {
41511 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
41512 !(SVT == MVT::f32 || SVT == MVT::f64) || !LegalOperations)
41513 break;
41514
41515 // This is always negatible for free but we might be able to remove some
41516 // extra operand negations as well.
41517 SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
41518 for (int i = 0; i != 3; ++i) {
41519 char V = isNegatibleForFree(Op.getOperand(i), DAG, LegalOperations,
41520 ForCodeSize, Depth + 1);
41521 if (V == 2)
41522 NewOps[i] = getNegatedExpression(Op.getOperand(i), DAG, LegalOperations,
41523 ForCodeSize, Depth + 1);
41524 }
41525
41526 bool NegA = !!NewOps[0];
41527 bool NegB = !!NewOps[1];
41528 bool NegC = !!NewOps[2];
41529 unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
41530
41531 // Fill in the non-negated ops with the original values.
41532 for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
41533 if (!NewOps[i])
41534 NewOps[i] = Op.getOperand(i);
41535 return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
41536 }
41537 }
41538
41539 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
41540 ForCodeSize, Depth);
41541}
41542
41543static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
41544 const X86Subtarget &Subtarget) {
41545 MVT VT = N->getSimpleValueType(0);
41546 // If we have integer vector types available, use the integer opcodes.
41547 if (!VT.isVector() || !Subtarget.hasSSE2())
41548 return SDValue();
41549
41550 SDLoc dl(N);
41551
41552 unsigned IntBits = VT.getScalarSizeInBits();
41553 MVT IntSVT = MVT::getIntegerVT(IntBits);
41554 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
41555
41556 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
41557 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
41558 unsigned IntOpcode;
41559 switch (N->getOpcode()) {
41560 default: llvm_unreachable("Unexpected FP logic op")::llvm::llvm_unreachable_internal("Unexpected FP logic op", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41560)
;
41561 case X86ISD::FOR: IntOpcode = ISD::OR; break;
41562 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
41563 case X86ISD::FAND: IntOpcode = ISD::AND; break;
41564 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
41565 }
41566 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
41567 return DAG.getBitcast(VT, IntOp);
41568}
41569
41570
41571/// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
41572static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
41573 if (N->getOpcode() != ISD::XOR)
41574 return SDValue();
41575
41576 SDValue LHS = N->getOperand(0);
41577 auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1));
41578 if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC)
41579 return SDValue();
41580
41581 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
41582 X86::CondCode(LHS->getConstantOperandVal(0)));
41583 SDLoc DL(N);
41584 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
41585}
41586
41587static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
41588 TargetLowering::DAGCombinerInfo &DCI,
41589 const X86Subtarget &Subtarget) {
41590 // If this is SSE1 only convert to FXOR to avoid scalarization.
41591 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
41592 N->getValueType(0) == MVT::v4i32) {
41593 return DAG.getBitcast(
41594 MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
41595 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
41596 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
41597 }
41598
41599 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
41600 return Cmp;
41601
41602 if (DCI.isBeforeLegalizeOps())
41603 return SDValue();
41604
41605 if (SDValue SetCC = foldXor1SetCC(N, DAG))
41606 return SetCC;
41607
41608 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
41609 return RV;
41610
41611 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
41612 return FPLogic;
41613
41614 return combineFneg(N, DAG, Subtarget);
41615}
41616
41617static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
41618 TargetLowering::DAGCombinerInfo &DCI,
41619 const X86Subtarget &Subtarget) {
41620 SDValue Op0 = N->getOperand(0);
41621 SDValue Op1 = N->getOperand(1);
41622 EVT VT = N->getValueType(0);
41623 unsigned NumBits = VT.getSizeInBits();
41624
41625 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41626
41627 // TODO - Constant Folding.
41628 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
41629 // Reduce Cst1 to the bottom 16-bits.
41630 // NOTE: SimplifyDemandedBits won't do this for constants.
41631 const APInt &Val1 = Cst1->getAPIntValue();
41632 APInt MaskedVal1 = Val1 & 0xFFFF;
41633 if (MaskedVal1 != Val1)
41634 return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0,
41635 DAG.getConstant(MaskedVal1, SDLoc(N), VT));
41636 }
41637
41638 // Only bottom 16-bits of the control bits are required.
41639 APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16));
41640 if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI))
41641 return SDValue(N, 0);
41642
41643 return SDValue();
41644}
41645
41646static bool isNullFPScalarOrVectorConst(SDValue V) {
41647 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
41648}
41649
41650/// If a value is a scalar FP zero or a vector FP zero (potentially including
41651/// undefined elements), return a zero constant that may be used to fold away
41652/// that value. In the case of a vector, the returned constant will not contain
41653/// undefined elements even if the input parameter does. This makes it suitable
41654/// to be used as a replacement operand with operations (eg, bitwise-and) where
41655/// an undef should not propagate.
41656static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
41657 const X86Subtarget &Subtarget) {
41658 if (!isNullFPScalarOrVectorConst(V))
41659 return SDValue();
41660
41661 if (V.getValueType().isVector())
41662 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
41663
41664 return V;
41665}
41666
41667static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
41668 const X86Subtarget &Subtarget) {
41669 SDValue N0 = N->getOperand(0);
41670 SDValue N1 = N->getOperand(1);
41671 EVT VT = N->getValueType(0);
41672 SDLoc DL(N);
41673
41674 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
41675 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
41676 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
41677 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
41678 return SDValue();
41679
41680 auto isAllOnesConstantFP = [](SDValue V) {
41681 if (V.getSimpleValueType().isVector())
41682 return ISD::isBuildVectorAllOnes(V.getNode());
41683 auto *C = dyn_cast<ConstantFPSDNode>(V);
41684 return C && C->getConstantFPValue()->isAllOnesValue();
41685 };
41686
41687 // fand (fxor X, -1), Y --> fandn X, Y
41688 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
41689 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
41690
41691 // fand X, (fxor Y, -1) --> fandn Y, X
41692 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
41693 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
41694
41695 return SDValue();
41696}
41697
41698/// Do target-specific dag combines on X86ISD::FAND nodes.
41699static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
41700 const X86Subtarget &Subtarget) {
41701 // FAND(0.0, x) -> 0.0
41702 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
41703 return V;
41704
41705 // FAND(x, 0.0) -> 0.0
41706 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
41707 return V;
41708
41709 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
41710 return V;
41711
41712 return lowerX86FPLogicOp(N, DAG, Subtarget);
41713}
41714
41715/// Do target-specific dag combines on X86ISD::FANDN nodes.
41716static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
41717 const X86Subtarget &Subtarget) {
41718 // FANDN(0.0, x) -> x
41719 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
41720 return N->getOperand(1);
41721
41722 // FANDN(x, 0.0) -> 0.0
41723 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
41724 return V;
41725
41726 return lowerX86FPLogicOp(N, DAG, Subtarget);
41727}
41728
41729/// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
41730static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
41731 const X86Subtarget &Subtarget) {
41732 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR)((N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD
::FXOR) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41732, __PRETTY_FUNCTION__))
;
41733
41734 // F[X]OR(0.0, x) -> x
41735 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
41736 return N->getOperand(1);
41737
41738 // F[X]OR(x, 0.0) -> x
41739 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
41740 return N->getOperand(0);
41741
41742 if (SDValue NewVal = combineFneg(N, DAG, Subtarget))
41743 return NewVal;
41744
41745 return lowerX86FPLogicOp(N, DAG, Subtarget);
41746}
41747
41748/// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
41749static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
41750 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX)((N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD
::FMAX) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41750, __PRETTY_FUNCTION__))
;
41751
41752 // Only perform optimizations if UnsafeMath is used.
41753 if (!DAG.getTarget().Options.UnsafeFPMath)
41754 return SDValue();
41755
41756 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
41757 // into FMINC and FMAXC, which are Commutative operations.
41758 unsigned NewOp = 0;
41759 switch (N->getOpcode()) {
41760 default: llvm_unreachable("unknown opcode")::llvm::llvm_unreachable_internal("unknown opcode", "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41760)
;
41761 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
41762 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
41763 }
41764
41765 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
41766 N->getOperand(0), N->getOperand(1));
41767}
41768
41769static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
41770 const X86Subtarget &Subtarget) {
41771 if (Subtarget.useSoftFloat())
41772 return SDValue();
41773
41774 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41775
41776 EVT VT = N->getValueType(0);
41777 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
41778 (Subtarget.hasSSE2() && VT == MVT::f64) ||
41779 (VT.isVector() && TLI.isTypeLegal(VT))))
41780 return SDValue();
41781
41782 SDValue Op0 = N->getOperand(0);
41783 SDValue Op1 = N->getOperand(1);
41784 SDLoc DL(N);
41785 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
41786
41787 // If we don't have to respect NaN inputs, this is a direct translation to x86
41788 // min/max instructions.
41789 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
41790 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
41791
41792 // If one of the operands is known non-NaN use the native min/max instructions
41793 // with the non-NaN input as second operand.
41794 if (DAG.isKnownNeverNaN(Op1))
41795 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
41796 if (DAG.isKnownNeverNaN(Op0))
41797 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
41798
41799 // If we have to respect NaN inputs, this takes at least 3 instructions.
41800 // Favor a library call when operating on a scalar and minimizing code size.
41801 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
41802 return SDValue();
41803
41804 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
41805 VT);
41806
41807 // There are 4 possibilities involving NaN inputs, and these are the required
41808 // outputs:
41809 // Op1
41810 // Num NaN
41811 // ----------------
41812 // Num | Max | Op0 |
41813 // Op0 ----------------
41814 // NaN | Op1 | NaN |
41815 // ----------------
41816 //
41817 // The SSE FP max/min instructions were not designed for this case, but rather
41818 // to implement:
41819 // Min = Op1 < Op0 ? Op1 : Op0
41820 // Max = Op1 > Op0 ? Op1 : Op0
41821 //
41822 // So they always return Op0 if either input is a NaN. However, we can still
41823 // use those instructions for fmaxnum by selecting away a NaN input.
41824
41825 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
41826 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
41827 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
41828
41829 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
41830 // are NaN, the NaN value of Op1 is the result.
41831 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
41832}
41833
41834static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
41835 TargetLowering::DAGCombinerInfo &DCI) {
41836 EVT VT = N->getValueType(0);
41837 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41838
41839 APInt KnownUndef, KnownZero;
41840 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
41841 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
41842 KnownZero, DCI))
41843 return SDValue(N, 0);
41844
41845 // Convert a full vector load into vzload when not all bits are needed.
41846 SDValue In = N->getOperand(0);
41847 MVT InVT = In.getSimpleValueType();
41848 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
41849 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
41850 assert(InVT.is128BitVector() && "Expected 128-bit input vector")((InVT.is128BitVector() && "Expected 128-bit input vector"
) ? static_cast<void> (0) : __assert_fail ("InVT.is128BitVector() && \"Expected 128-bit input vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41850, __PRETTY_FUNCTION__))
;
41851 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
41852 // Unless the load is volatile or atomic.
41853 if (LN->isSimple()) {
41854 SDLoc dl(N);
41855 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
41856 MVT MemVT = MVT::getIntegerVT(NumBits);
41857 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
41858 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
41859 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41860 SDValue VZLoad =
41861 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
41862 LN->getPointerInfo(),
41863 LN->getAlignment(),
41864 LN->getMemOperand()->getFlags());
41865 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
41866 DAG.getBitcast(InVT, VZLoad));
41867 DCI.CombineTo(N, Convert);
41868 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41869 return SDValue(N, 0);
41870 }
41871 }
41872
41873 return SDValue();
41874}
41875
41876static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
41877 TargetLowering::DAGCombinerInfo &DCI) {
41878 EVT VT = N->getValueType(0);
41879
41880 // Convert a full vector load into vzload when not all bits are needed.
41881 SDValue In = N->getOperand(0);
41882 MVT InVT = In.getSimpleValueType();
41883 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
41884 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
41885 assert(InVT.is128BitVector() && "Expected 128-bit input vector")((InVT.is128BitVector() && "Expected 128-bit input vector"
) ? static_cast<void> (0) : __assert_fail ("InVT.is128BitVector() && \"Expected 128-bit input vector\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41885, __PRETTY_FUNCTION__))
;
41886 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
41887 // Unless the load is volatile or atomic.
41888 if (LN->isSimple()) {
41889 SDLoc dl(N);
41890 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
41891 MVT MemVT = MVT::getFloatingPointVT(NumBits);
41892 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
41893 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
41894 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41895 SDValue VZLoad =
41896 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops, MemVT,
41897 LN->getPointerInfo(),
41898 LN->getAlignment(),
41899 LN->getMemOperand()->getFlags());
41900 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
41901 DAG.getBitcast(InVT, VZLoad));
41902 DCI.CombineTo(N, Convert);
41903 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41904 return SDValue(N, 0);
41905 }
41906 }
41907
41908 return SDValue();
41909}
41910
41911/// Do target-specific dag combines on X86ISD::ANDNP nodes.
41912static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
41913 TargetLowering::DAGCombinerInfo &DCI,
41914 const X86Subtarget &Subtarget) {
41915 MVT VT = N->getSimpleValueType(0);
41916
41917 // ANDNP(0, x) -> x
41918 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
41919 return N->getOperand(1);
41920
41921 // ANDNP(x, 0) -> 0
41922 if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
41923 return DAG.getConstant(0, SDLoc(N), VT);
41924
41925 // Turn ANDNP back to AND if input is inverted.
41926 if (SDValue Not = IsNOT(N->getOperand(0), DAG))
41927 return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
41928 N->getOperand(1));
41929
41930 // Attempt to recursively combine a bitmask ANDNP with shuffles.
41931 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
41932 SDValue Op(N, 0);
41933 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
41934 return Res;
41935 }
41936
41937 return SDValue();
41938}
41939
41940static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
41941 TargetLowering::DAGCombinerInfo &DCI) {
41942 SDValue N0 = N->getOperand(0);
41943 SDValue N1 = N->getOperand(1);
41944
41945 // BT ignores high bits in the bit index operand.
41946 unsigned BitWidth = N1.getValueSizeInBits();
41947 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
41948 if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask))
41949 return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1);
41950
41951 return SDValue();
41952}
41953
41954// Try to combine sext_in_reg of a cmov of constants by extending the constants.
41955static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
41956 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG)((N->getOpcode() == ISD::SIGN_EXTEND_INREG) ? static_cast<
void> (0) : __assert_fail ("N->getOpcode() == ISD::SIGN_EXTEND_INREG"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 41956, __PRETTY_FUNCTION__))
;
41957
41958 EVT DstVT = N->getValueType(0);
41959
41960 SDValue N0 = N->getOperand(0);
41961 SDValue N1 = N->getOperand(1);
41962 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
41963
41964 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
41965 return SDValue();
41966
41967 // Look through single use any_extends / truncs.
41968 SDValue IntermediateBitwidthOp;
41969 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
41970 N0.hasOneUse()) {
41971 IntermediateBitwidthOp = N0;
41972 N0 = N0.getOperand(0);
41973 }
41974
41975 // See if we have a single use cmov.
41976 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
41977 return SDValue();
41978
41979 SDValue CMovOp0 = N0.getOperand(0);
41980 SDValue CMovOp1 = N0.getOperand(1);
41981
41982 // Make sure both operands are constants.
41983 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
41984 !isa<ConstantSDNode>(CMovOp1.getNode()))
41985 return SDValue();
41986
41987 SDLoc DL(N);
41988
41989 // If we looked through an any_extend/trunc above, add one to the constants.
41990 if (IntermediateBitwidthOp) {
41991 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
41992 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
41993 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
41994 }
41995
41996 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
41997 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
41998
41999 EVT CMovVT = DstVT;
42000 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
42001 if (DstVT == MVT::i16) {
42002 CMovVT = MVT::i32;
42003 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
42004 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
42005 }
42006
42007 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
42008 N0.getOperand(2), N0.getOperand(3));
42009
42010 if (CMovVT != DstVT)
42011 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
42012
42013 return CMov;
42014}
42015
42016static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
42017 const X86Subtarget &Subtarget) {
42018 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG)((N->getOpcode() == ISD::SIGN_EXTEND_INREG) ? static_cast<
void> (0) : __assert_fail ("N->getOpcode() == ISD::SIGN_EXTEND_INREG"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 42018, __PRETTY_FUNCTION__))
;
42019
42020 if (SDValue V = combineSextInRegCmov(N, DAG))
42021 return V;
42022
42023 EVT VT = N->getValueType(0);
42024 SDValue N0 = N->getOperand(0);
42025 SDValue N1 = N->getOperand(1);
42026 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
42027 SDLoc dl(N);
42028
42029 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
42030 // both SSE and AVX2 since there is no sign-extended shift right
42031 // operation on a vector with 64-bit elements.
42032 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
42033 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
42034 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
42035 N0.getOpcode() == ISD::SIGN_EXTEND)) {
42036 SDValue N00 = N0.getOperand(0);
42037
42038 // EXTLOAD has a better solution on AVX2,
42039 // it may be replaced with X86ISD::VSEXT node.
42040 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
42041 if (!ISD::isNormalLoad(N00.getNode()))
42042 return SDValue();
42043
42044 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
42045 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
42046 N00, N1);
42047 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
42048 }
42049 }
42050 return SDValue();
42051}
42052
42053/// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
42054/// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
42055/// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
42056/// opportunities to combine math ops, use an LEA, or use a complex addressing
42057/// mode. This can eliminate extend, add, and shift instructions.
42058static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
42059 const X86Subtarget &Subtarget) {
42060 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
42061 Ext->getOpcode() != ISD::ZERO_EXTEND)
42062 return SDValue();
42063
42064 // TODO: This should be valid for other integer types.
42065 EVT VT = Ext->getValueType(0);
42066 if (VT != MVT::i64)
42067 return SDValue();
42068
42069 SDValue Add = Ext->getOperand(0);
42070 if (Add.getOpcode() != ISD::ADD)
42071 return SDValue();
42072
42073 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
42074 bool NSW = Add->getFlags().hasNoSignedWrap();
42075 bool NUW = Add->getFlags().hasNoUnsignedWrap();
42076
42077 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
42078 // into the 'zext'
42079 if ((Sext && !NSW) || (!Sext && !NUW))
42080 return SDValue();
42081
42082 // Having a constant operand to the 'add' ensures that we are not increasing
42083 // the instruction count because the constant is extended for free below.
42084 // A constant operand can also become the displacement field of an LEA.
42085 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
42086 if (!AddOp1)
42087 return SDValue();
42088
42089 // Don't make the 'add' bigger if there's no hope of combining it with some
42090 // other 'add' or 'shl' instruction.
42091 // TODO: It may be profitable to generate simpler LEA instructions in place
42092 // of single 'add' instructions, but the cost model for selecting an LEA
42093 // currently has a high threshold.
42094 bool HasLEAPotential = false;
42095 for (auto *User : Ext->uses()) {
42096 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
42097 HasLEAPotential = true;
42098 break;
42099 }
42100 }
42101 if (!HasLEAPotential)
42102 return SDValue();
42103
42104 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
42105 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
42106 SDValue AddOp0 = Add.getOperand(0);
42107 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
42108 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
42109
42110 // The wider add is guaranteed to not wrap because both operands are
42111 // sign-extended.
42112 SDNodeFlags Flags;
42113 Flags.setNoSignedWrap(NSW);
42114 Flags.setNoUnsignedWrap(NUW);
42115 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
42116}
42117
42118// If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
42119// operands and the result of CMOV is not used anywhere else - promote CMOV
42120// itself instead of promoting its result. This could be beneficial, because:
42121// 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
42122// (or more) pseudo-CMOVs only when they go one-after-another and
42123// getting rid of result extension code after CMOV will help that.
42124// 2) Promotion of constant CMOV arguments is free, hence the
42125// {ANY,SIGN,ZERO}_EXTEND will just be deleted.
42126// 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
42127// promotion is also good in terms of code-size.
42128// (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
42129// promotion).
42130static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
42131 SDValue CMovN = Extend->getOperand(0);
42132 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
42133 return SDValue();
42134
42135 EVT TargetVT = Extend->getValueType(0);
42136 unsigned ExtendOpcode = Extend->getOpcode();
42137 SDLoc DL(Extend);
42138
42139 EVT VT = CMovN.getValueType();
42140 SDValue CMovOp0 = CMovN.getOperand(0);
42141 SDValue CMovOp1 = CMovN.getOperand(1);
42142
42143 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
42144 !isa<ConstantSDNode>(CMovOp1.getNode()))
42145 return SDValue();
42146
42147 // Only extend to i32 or i64.
42148 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
42149 return SDValue();
42150
42151 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
42152 // are free.
42153 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
42154 return SDValue();
42155
42156 // If this a zero extend to i64, we should only extend to i32 and use a free
42157 // zero extend to finish.
42158 EVT ExtendVT = TargetVT;
42159 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
42160 ExtendVT = MVT::i32;
42161
42162 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
42163 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
42164
42165 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
42166 CMovN.getOperand(2), CMovN.getOperand(3));
42167
42168 // Finish extending if needed.
42169 if (ExtendVT != TargetVT)
42170 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
42171
42172 return Res;
42173}
42174
42175// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
42176// This is more or less the reverse of combineBitcastvxi1.
42177static SDValue
42178combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
42179 TargetLowering::DAGCombinerInfo &DCI,
42180 const X86Subtarget &Subtarget) {
42181 unsigned Opcode = N->getOpcode();
42182 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
42183 Opcode != ISD::ANY_EXTEND)
42184 return SDValue();
42185 if (!DCI.isBeforeLegalizeOps())
42186 return SDValue();
42187 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
42188 return SDValue();
42189
42190 SDValue N0 = N->getOperand(0);
42191 EVT VT = N->getValueType(0);
42192 EVT SVT = VT.getScalarType();
42193 EVT InSVT = N0.getValueType().getScalarType();
42194 unsigned EltSizeInBits = SVT.getSizeInBits();
42195
42196 // Input type must be extending a bool vector (bit-casted from a scalar
42197 // integer) to legal integer types.
42198 if (!VT.isVector())
42199 return SDValue();
42200 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
42201 return SDValue();
42202 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
42203 return SDValue();
42204
42205 SDValue N00 = N0.getOperand(0);
42206 EVT SclVT = N0.getOperand(0).getValueType();
42207 if (!SclVT.isScalarInteger())
42208 return SDValue();
42209
42210 SDLoc DL(N);
42211 SDValue Vec;
42212 SmallVector<int, 32> ShuffleMask;
42213 unsigned NumElts = VT.getVectorNumElements();
42214 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size")((NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size"
) ? static_cast<void> (0) : __assert_fail ("NumElts == SclVT.getSizeInBits() && \"Unexpected bool vector size\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 42214, __PRETTY_FUNCTION__))
;
42215
42216 // Broadcast the scalar integer to the vector elements.
42217 if (NumElts > EltSizeInBits) {
42218 // If the scalar integer is greater than the vector element size, then we
42219 // must split it down into sub-sections for broadcasting. For example:
42220 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
42221 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
42222 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale")(((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale"
) ? static_cast<void> (0) : __assert_fail ("(NumElts % EltSizeInBits) == 0 && \"Unexpected integer scale\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 42222, __PRETTY_FUNCTION__))
;
42223 unsigned Scale = NumElts / EltSizeInBits;
42224 EVT BroadcastVT =
42225 EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
42226 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
42227 Vec = DAG.getBitcast(VT, Vec);
42228
42229 for (unsigned i = 0; i != Scale; ++i)
42230 ShuffleMask.append(EltSizeInBits, i);
42231 } else {
42232 // For smaller scalar integers, we can simply any-extend it to the vector
42233 // element size (we don't care about the upper bits) and broadcast it to all
42234 // elements.
42235 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
42236 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
42237 ShuffleMask.append(NumElts, 0);
42238 }
42239 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
42240
42241 // Now, mask the relevant bit in each element.
42242 SmallVector<SDValue, 32> Bits;
42243 for (unsigned i = 0; i != NumElts; ++i) {
42244 int BitIdx = (i % EltSizeInBits);
42245 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
42246 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
42247 }
42248 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
42249 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
42250
42251 // Compare against the bitmask and extend the result.
42252 EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
42253 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
42254 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
42255
42256 // For SEXT, this is now done, otherwise shift the result down for
42257 // zero-extension.
42258 if (Opcode == ISD::SIGN_EXTEND)
42259 return Vec;
42260 return DAG.getNode(ISD::SRL, DL, VT, Vec,
42261 DAG.getConstant(EltSizeInBits - 1, DL, VT));
42262}
42263
42264// Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
42265// result type.
42266static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
42267 const X86Subtarget &Subtarget) {
42268 SDValue N0 = N->getOperand(0);
42269 EVT VT = N->getValueType(0);
42270 SDLoc dl(N);
42271
42272 // Only do this combine with AVX512 for vector extends.
42273 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
42274 return SDValue();
42275
42276 // Only combine legal element types.
42277 EVT SVT = VT.getVectorElementType();
42278 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
42279 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
42280 return SDValue();
42281
42282 // We can only do this if the vector size in 256 bits or less.
42283 unsigned Size = VT.getSizeInBits();
42284 if (Size > 256)
42285 return SDValue();
42286
42287 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
42288 // that's the only integer compares with we have.
42289 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
42290 if (ISD::isUnsignedIntSetCC(CC))
42291 return SDValue();
42292
42293 // Only do this combine if the extension will be fully consumed by the setcc.
42294 EVT N00VT = N0.getOperand(0).getValueType();
42295 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
42296 if (Size != MatchingVecType.getSizeInBits())
42297 return SDValue();
42298
42299 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
42300
42301 if (N->getOpcode() == ISD::ZERO_EXTEND)
42302 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType());
42303
42304 return Res;
42305}
42306
42307static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
42308 TargetLowering::DAGCombinerInfo &DCI,
42309 const X86Subtarget &Subtarget) {
42310 SDValue N0 = N->getOperand(0);
42311 EVT VT = N->getValueType(0);
42312 EVT InVT = N0.getValueType();
42313 SDLoc DL(N);
42314
42315 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
42316 return NewCMov;
42317
42318 if (!DCI.isBeforeLegalizeOps())
42319 return SDValue();
42320
42321 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
42322 return V;
42323
42324 if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
42325 isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
42326 // Invert and sign-extend a boolean is the same as zero-extend and subtract
42327 // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
42328 // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
42329 // sext (xor Bool, -1) --> sub (zext Bool), 1
42330 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
42331 return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
42332 }
42333
42334 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
42335 return V;
42336
42337 if (VT.isVector())
42338 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
42339 return R;
42340
42341 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
42342 return NewAdd;
42343
42344 return SDValue();
42345}
42346
42347static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
42348 TargetLowering::DAGCombinerInfo &DCI,
42349 const X86Subtarget &Subtarget) {
42350 SDLoc dl(N);
42351 EVT VT = N->getValueType(0);
42352
42353 // Let legalize expand this if it isn't a legal type yet.
42354 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42355 if (!TLI.isTypeLegal(VT))
42356 return SDValue();
42357
42358 EVT ScalarVT = VT.getScalarType();
42359 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
42360 return SDValue();
42361
42362 SDValue A = N->getOperand(0);
42363 SDValue B = N->getOperand(1);
42364 SDValue C = N->getOperand(2);
42365
42366 auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
42367 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
42368 bool LegalOperations = !DCI.isBeforeLegalizeOps();
42369 if (TLI.isNegatibleForFree(V, DAG, LegalOperations, CodeSize) == 2) {
42370 V = TLI.getNegatedExpression(V, DAG, LegalOperations, CodeSize);
42371 return true;
42372 }
42373 // Look through extract_vector_elts. If it comes from an FNEG, create a
42374 // new extract from the FNEG input.
42375 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
42376 isNullConstant(V.getOperand(1))) {
42377 SDValue Vec = V.getOperand(0);
42378 if (TLI.isNegatibleForFree(Vec, DAG, LegalOperations, CodeSize) == 2) {
42379 SDValue NegVal =
42380 TLI.getNegatedExpression(Vec, DAG, LegalOperations, CodeSize);
42381 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
42382 NegVal, V.getOperand(1));
42383 return true;
42384 }
42385 }
42386
42387 return false;
42388 };
42389
42390 // Do not convert the passthru input of scalar intrinsics.
42391 // FIXME: We could allow negations of the lower element only.
42392 bool NegA = invertIfNegative(A);
42393 bool NegB = invertIfNegative(B);
42394 bool NegC = invertIfNegative(C);
42395
42396 if (!NegA && !NegB && !NegC)
42397 return SDValue();
42398
42399 unsigned NewOpcode =
42400 negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
42401
42402 if (N->getNumOperands() == 4)
42403 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
42404 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
42405}
42406
42407// Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
42408// Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
42409static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
42410 TargetLowering::DAGCombinerInfo &DCI) {
42411 SDLoc dl(N);
42412 EVT VT = N->getValueType(0);
42413 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42414 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
42415 bool LegalOperations = !DCI.isBeforeLegalizeOps();
42416
42417 SDValue N2 = N->getOperand(2);
42418 if (TLI.isNegatibleForFree(N2, DAG, LegalOperations, CodeSize) != 2)
42419 return SDValue();
42420
42421 SDValue NegN2 = TLI.getNegatedExpression(N2, DAG, LegalOperations, CodeSize);
42422 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
42423
42424 if (N->getNumOperands() == 4)
42425 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
42426 NegN2, N->getOperand(3));
42427 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
42428 NegN2);
42429}
42430
42431static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
42432 TargetLowering::DAGCombinerInfo &DCI,
42433 const X86Subtarget &Subtarget) {
42434 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
42435 // (and (i32 x86isd::setcc_carry), 1)
42436 // This eliminates the zext. This transformation is necessary because
42437 // ISD::SETCC is always legalized to i8.
42438 SDLoc dl(N);
42439 SDValue N0 = N->getOperand(0);
42440 EVT VT = N->getValueType(0);
42441
42442 if (N0.getOpcode() == ISD::AND &&
42443 N0.hasOneUse() &&
42444 N0.getOperand(0).hasOneUse()) {
42445 SDValue N00 = N0.getOperand(0);
42446 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
42447 if (!isOneConstant(N0.getOperand(1)))
42448 return SDValue();
42449 return DAG.getNode(ISD::AND, dl, VT,
42450 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
42451 N00.getOperand(0), N00.getOperand(1)),
42452 DAG.getConstant(1, dl, VT));
42453 }
42454 }
42455
42456 if (N0.getOpcode() == ISD::TRUNCATE &&
42457 N0.hasOneUse() &&
42458 N0.getOperand(0).hasOneUse()) {
42459 SDValue N00 = N0.getOperand(0);
42460 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
42461 return DAG.getNode(ISD::AND, dl, VT,
42462 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
42463 N00.getOperand(0), N00.getOperand(1)),
42464 DAG.getConstant(1, dl, VT));
42465 }
42466 }
42467
42468 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
42469 return NewCMov;
42470
42471 if (DCI.isBeforeLegalizeOps())
42472 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
42473 return V;
42474
42475 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
42476 return V;
42477
42478 if (VT.isVector())
42479 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
42480 return R;
42481
42482 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
42483 return NewAdd;
42484
42485 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
42486 return R;
42487
42488 // TODO: Combine with any target/faux shuffle.
42489 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
42490 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
42491 SDValue N00 = N0.getOperand(0);
42492 SDValue N01 = N0.getOperand(1);
42493 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
42494 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
42495 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
42496 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
42497 return concatSubVectors(N00, N01, DAG, dl);
42498 }
42499 }
42500
42501 return SDValue();
42502}
42503
42504/// Try to map a 128-bit or larger integer comparison to vector instructions
42505/// before type legalization splits it up into chunks.
42506static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
42507 const X86Subtarget &Subtarget) {
42508 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
42509 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate")(((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate"
) ? static_cast<void> (0) : __assert_fail ("(CC == ISD::SETNE || CC == ISD::SETEQ) && \"Bad comparison predicate\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 42509, __PRETTY_FUNCTION__))
;
42510
42511 // We're looking for an oversized integer equality comparison.
42512 SDValue X = SetCC->getOperand(0);
42513 SDValue Y = SetCC->getOperand(1);
42514 EVT OpVT = X.getValueType();
42515 unsigned OpSize = OpVT.getSizeInBits();
42516 if (!OpVT.isScalarInteger() || OpSize < 128)
42517 return SDValue();
42518
42519 // Ignore a comparison with zero because that gets special treatment in
42520 // EmitTest(). But make an exception for the special case of a pair of
42521 // logically-combined vector-sized operands compared to zero. This pattern may
42522 // be generated by the memcmp expansion pass with oversized integer compares
42523 // (see PR33325).
42524 bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR &&
42525 X.getOperand(0).getOpcode() == ISD::XOR &&
42526 X.getOperand(1).getOpcode() == ISD::XOR;
42527 if (isNullConstant(Y) && !IsOrXorXorCCZero)
42528 return SDValue();
42529
42530 // Don't perform this combine if constructing the vector will be expensive.
42531 auto IsVectorBitCastCheap = [](SDValue X) {
42532 X = peekThroughBitcasts(X);
42533 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
42534 X.getOpcode() == ISD::LOAD;
42535 };
42536 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
42537 !IsOrXorXorCCZero)
42538 return SDValue();
42539
42540 // TODO: Use PXOR + PTEST for SSE4.1 or later?
42541 EVT VT = SetCC->getValueType(0);
42542 SDLoc DL(SetCC);
42543 if ((OpSize == 128 && Subtarget.hasSSE2()) ||
42544 (OpSize == 256 && Subtarget.hasAVX2()) ||
42545 (OpSize == 512 && Subtarget.useAVX512Regs())) {
42546 auto BW = Subtarget.hasBWI();
42547 EVT VecVT = OpSize == 512 ? (BW ? MVT::v64i8 : MVT::v16i32) :
42548 OpSize == 256 ? MVT::v32i8 :
42549 MVT::v16i8;
42550 EVT CmpVT = OpSize == 512 ? (BW ? MVT::v64i1 : MVT::v16i1) : VecVT;
42551
42552 SDValue Cmp;
42553 if (IsOrXorXorCCZero) {
42554 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
42555 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
42556 // Use 2 vector equality compares and 'and' the results before doing a
42557 // MOVMSK.
42558 SDValue A = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(0));
42559 SDValue B = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(1));
42560 SDValue C = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(0));
42561 SDValue D = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(1));
42562 SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
42563 SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETEQ);
42564 Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2);
42565 } else {
42566 SDValue VecX = DAG.getBitcast(VecVT, X);
42567 SDValue VecY = DAG.getBitcast(VecVT, Y);
42568 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
42569 }
42570 // For 512-bits we want to emit a setcc that will lower to kortest.
42571 if (OpSize == 512 && BW)
42572 return DAG.getSetCC(DL, VT, DAG.getBitcast(MVT::i64, Cmp),
42573 DAG.getConstant(0xFFFFFFFFFFFFFFFF, DL, MVT::i64), CC);
42574 if (OpSize == 512)
42575 return DAG.getSetCC(DL, VT, DAG.getBitcast(MVT::i16, Cmp),
42576 DAG.getConstant(0xFFFF, DL, MVT::i16), CC);
42577 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
42578 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
42579 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
42580 // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq
42581 // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne
42582 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
42583 SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL,
42584 MVT::i32);
42585 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
42586 }
42587
42588 return SDValue();
42589}
42590
42591static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
42592 const X86Subtarget &Subtarget) {
42593 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
42594 SDValue LHS = N->getOperand(0);
42595 SDValue RHS = N->getOperand(1);
42596 EVT VT = N->getValueType(0);
42597 EVT OpVT = LHS.getValueType();
42598 SDLoc DL(N);
42599
42600 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
42601 // 0-x == y --> x+y == 0
42602 // 0-x != y --> x+y != 0
42603 if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
42604 LHS.hasOneUse()) {
42605 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1));
42606 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
42607 }
42608 // x == 0-y --> x+y == 0
42609 // x != 0-y --> x+y != 0
42610 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
42611 RHS.hasOneUse()) {
42612 SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
42613 return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
42614 }
42615
42616 if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
42617 return V;
42618 }
42619
42620 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
42621 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
42622 // Put build_vectors on the right.
42623 if (LHS.getOpcode() == ISD::BUILD_VECTOR) {
42624 std::swap(LHS, RHS);
42625 CC = ISD::getSetCCSwappedOperands(CC);
42626 }
42627
42628 bool IsSEXT0 =
42629 (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
42630 (LHS.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
42631 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
42632
42633 if (IsSEXT0 && IsVZero1) {
42634 assert(VT == LHS.getOperand(0).getValueType() &&((VT == LHS.getOperand(0).getValueType() && "Uexpected operand type"
) ? static_cast<void> (0) : __assert_fail ("VT == LHS.getOperand(0).getValueType() && \"Uexpected operand type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 42635, __PRETTY_FUNCTION__))
42635 "Uexpected operand type")((VT == LHS.getOperand(0).getValueType() && "Uexpected operand type"
) ? static_cast<void> (0) : __assert_fail ("VT == LHS.getOperand(0).getValueType() && \"Uexpected operand type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 42635, __PRETTY_FUNCTION__))
;
42636 if (CC == ISD::SETGT)
42637 return DAG.getConstant(0, DL, VT);
42638 if (CC == ISD::SETLE)
42639 return DAG.getConstant(1, DL, VT);
42640 if (CC == ISD::SETEQ || CC == ISD::SETGE)
42641 return DAG.getNOT(DL, LHS.getOperand(0), VT);
42642
42643 assert((CC == ISD::SETNE || CC == ISD::SETLT) &&(((CC == ISD::SETNE || CC == ISD::SETLT) && "Unexpected condition code!"
) ? static_cast<void> (0) : __assert_fail ("(CC == ISD::SETNE || CC == ISD::SETLT) && \"Unexpected condition code!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 42644, __PRETTY_FUNCTION__))
42644 "Unexpected condition code!")(((CC == ISD::SETNE || CC == ISD::SETLT) && "Unexpected condition code!"
) ? static_cast<void> (0) : __assert_fail ("(CC == ISD::SETNE || CC == ISD::SETLT) && \"Unexpected condition code!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 42644, __PRETTY_FUNCTION__))
;
42645 return LHS.getOperand(0);
42646 }
42647 }
42648
42649 // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
42650 // pre-promote its result type since vXi1 vectors don't get promoted
42651 // during type legalization.
42652 // NOTE: The element count check is to ignore operand types that need to
42653 // go through type promotion to a 128-bit vector.
42654 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
42655 VT.getVectorElementType() == MVT::i1 &&
42656 (OpVT.getVectorElementType() == MVT::i8 ||
42657 OpVT.getVectorElementType() == MVT::i16)) {
42658 SDValue Setcc = DAG.getNode(ISD::SETCC, DL, OpVT, LHS, RHS,
42659 N->getOperand(2));
42660 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
42661 }
42662
42663 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
42664 // to avoid scalarization via legalization because v4i32 is not a legal type.
42665 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
42666 LHS.getValueType() == MVT::v4f32)
42667 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
42668
42669 return SDValue();
42670}
42671
42672static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
42673 TargetLowering::DAGCombinerInfo &DCI,
42674 const X86Subtarget &Subtarget) {
42675 SDValue Src = N->getOperand(0);
42676 MVT SrcVT = Src.getSimpleValueType();
42677 MVT VT = N->getSimpleValueType(0);
42678 unsigned NumBits = VT.getScalarSizeInBits();
42679 unsigned NumElts = SrcVT.getVectorNumElements();
42680
42681 // Perform constant folding.
42682 if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
42683 assert(VT == MVT::i32 && "Unexpected result type")((VT == MVT::i32 && "Unexpected result type") ? static_cast
<void> (0) : __assert_fail ("VT == MVT::i32 && \"Unexpected result type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 42683, __PRETTY_FUNCTION__))
;
42684 APInt Imm(32, 0);
42685 for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
42686 if (!Src.getOperand(Idx).isUndef() &&
42687 Src.getConstantOperandAPInt(Idx).isNegative())
42688 Imm.setBit(Idx);
42689 }
42690 return DAG.getConstant(Imm, SDLoc(N), VT);
42691 }
42692
42693 // Look through int->fp bitcasts that don't change the element width.
42694 unsigned EltWidth = SrcVT.getScalarSizeInBits();
42695 if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
42696 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
42697 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
42698
42699 // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
42700 // with scalar comparisons.
42701 if (SDValue NotSrc = IsNOT(Src, DAG)) {
42702 SDLoc DL(N);
42703 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
42704 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
42705 return DAG.getNode(ISD::XOR, DL, VT,
42706 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
42707 DAG.getConstant(NotMask, DL, VT));
42708 }
42709
42710 // Simplify the inputs.
42711 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42712 APInt DemandedMask(APInt::getAllOnesValue(NumBits));
42713 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
42714 return SDValue(N, 0);
42715
42716 return SDValue();
42717}
42718
42719static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
42720 TargetLowering::DAGCombinerInfo &DCI) {
42721 // With vector masks we only demand the upper bit of the mask.
42722 SDValue Mask = cast<X86MaskedGatherScatterSDNode>(N)->getMask();
42723 if (Mask.getScalarValueSizeInBits() != 1) {
42724 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42725 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
42726 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
42727 return SDValue(N, 0);
42728 }
42729
42730 return SDValue();
42731}
42732
42733static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
42734 TargetLowering::DAGCombinerInfo &DCI) {
42735 SDLoc DL(N);
42736 auto *GorS = cast<MaskedGatherScatterSDNode>(N);
42737 SDValue Chain = GorS->getChain();
42738 SDValue Index = GorS->getIndex();
42739 SDValue Mask = GorS->getMask();
42740 SDValue Base = GorS->getBasePtr();
42741 SDValue Scale = GorS->getScale();
42742
42743 if (DCI.isBeforeLegalize()) {
42744 unsigned IndexWidth = Index.getScalarValueSizeInBits();
42745
42746 // Shrink constant indices if they are larger than 32-bits.
42747 // Only do this before legalize types since v2i64 could become v2i32.
42748 // FIXME: We could check that the type is legal if we're after legalize
42749 // types, but then we would need to construct test cases where that happens.
42750 // FIXME: We could support more than just constant vectors, but we need to
42751 // careful with costing. A truncate that can be optimized out would be fine.
42752 // Otherwise we might only want to create a truncate if it avoids a split.
42753 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
42754 if (BV->isConstant() && IndexWidth > 32 &&
42755 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
42756 unsigned NumElts = Index.getValueType().getVectorNumElements();
42757 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
42758 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
42759 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
42760 SDValue Ops[] = { Chain, Gather->getPassThru(),
42761 Mask, Base, Index, Scale } ;
42762 return DAG.getMaskedGather(Gather->getVTList(),
42763 Gather->getMemoryVT(), DL, Ops,
42764 Gather->getMemOperand(),
42765 Gather->getIndexType());
42766 }
42767 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
42768 SDValue Ops[] = { Chain, Scatter->getValue(),
42769 Mask, Base, Index, Scale };
42770 return DAG.getMaskedScatter(Scatter->getVTList(),
42771 Scatter->getMemoryVT(), DL,
42772 Ops, Scatter->getMemOperand(),
42773 Scatter->getIndexType());
42774 }
42775 }
42776
42777 // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
42778 // there are sufficient sign bits. Only do this before legalize types to
42779 // avoid creating illegal types in truncate.
42780 if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
42781 Index.getOpcode() == ISD::ZERO_EXTEND) &&
42782 IndexWidth > 32 &&
42783 Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
42784 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
42785 unsigned NumElts = Index.getValueType().getVectorNumElements();
42786 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
42787 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
42788 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
42789 SDValue Ops[] = { Chain, Gather->getPassThru(),
42790 Mask, Base, Index, Scale } ;
42791 return DAG.getMaskedGather(Gather->getVTList(),
42792 Gather->getMemoryVT(), DL, Ops,
42793 Gather->getMemOperand(),
42794 Gather->getIndexType());
42795 }
42796 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
42797 SDValue Ops[] = { Chain, Scatter->getValue(),
42798 Mask, Base, Index, Scale };
42799 return DAG.getMaskedScatter(Scatter->getVTList(),
42800 Scatter->getMemoryVT(), DL,
42801 Ops, Scatter->getMemOperand(),
42802 Scatter->getIndexType());
42803 }
42804 }
42805
42806 if (DCI.isBeforeLegalizeOps()) {
42807 unsigned IndexWidth = Index.getScalarValueSizeInBits();
42808
42809 // Make sure the index is either i32 or i64
42810 if (IndexWidth != 32 && IndexWidth != 64) {
42811 MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
42812 EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
42813 Index.getValueType().getVectorNumElements());
42814 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
42815 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
42816 SDValue Ops[] = { Chain, Gather->getPassThru(),
42817 Mask, Base, Index, Scale } ;
42818 return DAG.getMaskedGather(Gather->getVTList(),
42819 Gather->getMemoryVT(), DL, Ops,
42820 Gather->getMemOperand(),
42821 Gather->getIndexType());
42822 }
42823 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
42824 SDValue Ops[] = { Chain, Scatter->getValue(),
42825 Mask, Base, Index, Scale };
42826 return DAG.getMaskedScatter(Scatter->getVTList(),
42827 Scatter->getMemoryVT(), DL,
42828 Ops, Scatter->getMemOperand(),
42829 Scatter->getIndexType());
42830 }
42831 }
42832
42833 // With vector masks we only demand the upper bit of the mask.
42834 if (Mask.getScalarValueSizeInBits() != 1) {
42835 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42836 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
42837 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI))
42838 return SDValue(N, 0);
42839 }
42840
42841 return SDValue();
42842}
42843
42844// Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
42845static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
42846 const X86Subtarget &Subtarget) {
42847 SDLoc DL(N);
42848 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
42849 SDValue EFLAGS = N->getOperand(1);
42850
42851 // Try to simplify the EFLAGS and condition code operands.
42852 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
42853 return getSETCC(CC, Flags, DL, DAG);
42854
42855 return SDValue();
42856}
42857
42858/// Optimize branch condition evaluation.
42859static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
42860 const X86Subtarget &Subtarget) {
42861 SDLoc DL(N);
42862 SDValue EFLAGS = N->getOperand(3);
42863 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
42864
42865 // Try to simplify the EFLAGS and condition code operands.
42866 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
42867 // RAUW them under us.
42868 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
42869 SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
42870 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
42871 N->getOperand(1), Cond, Flags);
42872 }
42873
42874 return SDValue();
42875}
42876
42877static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
42878 SelectionDAG &DAG) {
42879 // Take advantage of vector comparisons producing 0 or -1 in each lane to
42880 // optimize away operation when it's from a constant.
42881 //
42882 // The general transformation is:
42883 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
42884 // AND(VECTOR_CMP(x,y), constant2)
42885 // constant2 = UNARYOP(constant)
42886
42887 // Early exit if this isn't a vector operation, the operand of the
42888 // unary operation isn't a bitwise AND, or if the sizes of the operations
42889 // aren't the same.
42890 EVT VT = N->getValueType(0);
42891 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
42892 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
42893 VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits())
42894 return SDValue();
42895
42896 // Now check that the other operand of the AND is a constant. We could
42897 // make the transformation for non-constant splats as well, but it's unclear
42898 // that would be a benefit as it would not eliminate any operations, just
42899 // perform one more step in scalar code before moving to the vector unit.
42900 if (auto *BV = dyn_cast<BuildVectorSDNode>(N->getOperand(0).getOperand(1))) {
42901 // Bail out if the vector isn't a constant.
42902 if (!BV->isConstant())
42903 return SDValue();
42904
42905 // Everything checks out. Build up the new and improved node.
42906 SDLoc DL(N);
42907 EVT IntVT = BV->getValueType(0);
42908 // Create a new constant of the appropriate type for the transformed
42909 // DAG.
42910 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
42911 // The AND node needs bitcasts to/from an integer vector type around it.
42912 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
42913 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
42914 N->getOperand(0)->getOperand(0), MaskConst);
42915 SDValue Res = DAG.getBitcast(VT, NewAnd);
42916 return Res;
42917 }
42918
42919 return SDValue();
42920}
42921
42922/// If we are converting a value to floating-point, try to replace scalar
42923/// truncate of an extracted vector element with a bitcast. This tries to keep
42924/// the sequence on XMM registers rather than moving between vector and GPRs.
42925static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
42926 // TODO: This is currently only used by combineSIntToFP, but it is generalized
42927 // to allow being called by any similar cast opcode.
42928 // TODO: Consider merging this into lowering: vectorizeExtractedCast().
42929 SDValue Trunc = N->getOperand(0);
42930 if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
42931 return SDValue();
42932
42933 SDValue ExtElt = Trunc.getOperand(0);
42934 if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
42935 !isNullConstant(ExtElt.getOperand(1)))
42936 return SDValue();
42937
42938 EVT TruncVT = Trunc.getValueType();
42939 EVT SrcVT = ExtElt.getValueType();
42940 unsigned DestWidth = TruncVT.getSizeInBits();
42941 unsigned SrcWidth = SrcVT.getSizeInBits();
42942 if (SrcWidth % DestWidth != 0)
42943 return SDValue();
42944
42945 // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
42946 EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
42947 unsigned VecWidth = SrcVecVT.getSizeInBits();
42948 unsigned NumElts = VecWidth / DestWidth;
42949 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
42950 SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
42951 SDLoc DL(N);
42952 SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
42953 BitcastVec, ExtElt.getOperand(1));
42954 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
42955}
42956
42957static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
42958 const X86Subtarget &Subtarget) {
42959 SDValue Op0 = N->getOperand(0);
42960 EVT VT = N->getValueType(0);
42961 EVT InVT = Op0.getValueType();
42962
42963 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
42964 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
42965 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
42966 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
42967 SDLoc dl(N);
42968 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
42969 InVT.getVectorNumElements());
42970 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
42971
42972 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
42973 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
42974 }
42975
42976 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
42977 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
42978 // the optimization here.
42979 if (DAG.SignBitIsZero(Op0))
42980 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
42981
42982 return SDValue();
42983}
42984
42985static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
42986 TargetLowering::DAGCombinerInfo &DCI,
42987 const X86Subtarget &Subtarget) {
42988 // First try to optimize away the conversion entirely when it's
42989 // conditionally from a constant. Vectors only.
42990 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
42991 return Res;
42992
42993 // Now move on to more general possibilities.
42994 SDValue Op0 = N->getOperand(0);
42995 EVT VT = N->getValueType(0);
42996 EVT InVT = Op0.getValueType();
42997
42998 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
42999 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
43000 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
43001 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
43002 SDLoc dl(N);
43003 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43004 InVT.getVectorNumElements());
43005 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
43006 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
43007 }
43008
43009 // Without AVX512DQ we only support i64 to float scalar conversion. For both
43010 // vectors and scalars, see if we know that the upper bits are all the sign
43011 // bit, in which case we can truncate the input to i32 and convert from that.
43012 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
43013 unsigned BitWidth = InVT.getScalarSizeInBits();
43014 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
43015 if (NumSignBits >= (BitWidth - 31)) {
43016 EVT TruncVT = MVT::i32;
43017 if (InVT.isVector())
43018 TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
43019 InVT.getVectorNumElements());
43020 SDLoc dl(N);
43021 if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
43022 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
43023 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
43024 }
43025 // If we're after legalize and the type is v2i32 we need to shuffle and
43026 // use CVTSI2P.
43027 assert(InVT == MVT::v2i64 && "Unexpected VT!")((InVT == MVT::v2i64 && "Unexpected VT!") ? static_cast
<void> (0) : __assert_fail ("InVT == MVT::v2i64 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43027, __PRETTY_FUNCTION__))
;
43028 SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
43029 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
43030 { 0, 2, -1, -1 });
43031 return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
43032 }
43033 }
43034
43035 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
43036 // a 32-bit target where SSE doesn't support i64->FP operations.
43037 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
43038 Op0.getOpcode() == ISD::LOAD) {
43039 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
43040 EVT LdVT = Ld->getValueType(0);
43041
43042 // This transformation is not supported if the result type is f16 or f128.
43043 if (VT == MVT::f16 || VT == MVT::f128)
43044 return SDValue();
43045
43046 // If we have AVX512DQ we can use packed conversion instructions unless
43047 // the VT is f80.
43048 if (Subtarget.hasDQI() && VT != MVT::f80)
43049 return SDValue();
43050
43051 if (Ld->isSimple() && !VT.isVector() &&
43052 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
43053 !Subtarget.is64Bit() && LdVT == MVT::i64) {
43054 SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD(
43055 SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG);
43056 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
43057 return FILDChain;
43058 }
43059 }
43060
43061 if (SDValue V = combineToFPTruncExtElt(N, DAG))
43062 return V;
43063
43064 return SDValue();
43065}
43066
43067static bool needCarryOrOverflowFlag(SDValue Flags) {
43068 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!")((Flags.getValueType() == MVT::i32 && "Unexpected VT!"
) ? static_cast<void> (0) : __assert_fail ("Flags.getValueType() == MVT::i32 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43068, __PRETTY_FUNCTION__))
;
43069
43070 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
43071 UI != UE; ++UI) {
43072 SDNode *User = *UI;
43073
43074 X86::CondCode CC;
43075 switch (User->getOpcode()) {
43076 default:
43077 // Be conservative.
43078 return true;
43079 case X86ISD::SETCC:
43080 case X86ISD::SETCC_CARRY:
43081 CC = (X86::CondCode)User->getConstantOperandVal(0);
43082 break;
43083 case X86ISD::BRCOND:
43084 CC = (X86::CondCode)User->getConstantOperandVal(2);
43085 break;
43086 case X86ISD::CMOV:
43087 CC = (X86::CondCode)User->getConstantOperandVal(2);
43088 break;
43089 }
43090
43091 switch (CC) {
43092 default: break;
43093 case X86::COND_A: case X86::COND_AE:
43094 case X86::COND_B: case X86::COND_BE:
43095 case X86::COND_O: case X86::COND_NO:
43096 case X86::COND_G: case X86::COND_GE:
43097 case X86::COND_L: case X86::COND_LE:
43098 return true;
43099 }
43100 }
43101
43102 return false;
43103}
43104
43105static bool onlyZeroFlagUsed(SDValue Flags) {
43106 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!")((Flags.getValueType() == MVT::i32 && "Unexpected VT!"
) ? static_cast<void> (0) : __assert_fail ("Flags.getValueType() == MVT::i32 && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43106, __PRETTY_FUNCTION__))
;
43107
43108 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
43109 UI != UE; ++UI) {
43110 SDNode *User = *UI;
43111
43112 unsigned CCOpNo;
43113 switch (User->getOpcode()) {
43114 default:
43115 // Be conservative.
43116 return false;
43117 case X86ISD::SETCC: CCOpNo = 0; break;
43118 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
43119 case X86ISD::BRCOND: CCOpNo = 2; break;
43120 case X86ISD::CMOV: CCOpNo = 2; break;
43121 }
43122
43123 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
43124 if (CC != X86::COND_E && CC != X86::COND_NE)
43125 return false;
43126 }
43127
43128 return true;
43129}
43130
43131static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
43132 // Only handle test patterns.
43133 if (!isNullConstant(N->getOperand(1)))
43134 return SDValue();
43135
43136 // If we have a CMP of a truncated binop, see if we can make a smaller binop
43137 // and use its flags directly.
43138 // TODO: Maybe we should try promoting compares that only use the zero flag
43139 // first if we can prove the upper bits with computeKnownBits?
43140 SDLoc dl(N);
43141 SDValue Op = N->getOperand(0);
43142 EVT VT = Op.getValueType();
43143
43144 // If we have a constant logical shift that's only used in a comparison
43145 // against zero turn it into an equivalent AND. This allows turning it into
43146 // a TEST instruction later.
43147 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
43148 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
43149 onlyZeroFlagUsed(SDValue(N, 0))) {
43150 unsigned BitWidth = VT.getSizeInBits();
43151 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
43152 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
43153 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
43154 APInt Mask = Op.getOpcode() == ISD::SRL
43155 ? APInt::getHighBitsSet(BitWidth, MaskBits)
43156 : APInt::getLowBitsSet(BitWidth, MaskBits);
43157 if (Mask.isSignedIntN(32)) {
43158 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
43159 DAG.getConstant(Mask, dl, VT));
43160 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
43161 DAG.getConstant(0, dl, VT));
43162 }
43163 }
43164 }
43165
43166 // Look for a truncate with a single use.
43167 if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
43168 return SDValue();
43169
43170 Op = Op.getOperand(0);
43171
43172 // Arithmetic op can only have one use.
43173 if (!Op.hasOneUse())
43174 return SDValue();
43175
43176 unsigned NewOpc;
43177 switch (Op.getOpcode()) {
43178 default: return SDValue();
43179 case ISD::AND:
43180 // Skip and with constant. We have special handling for and with immediate
43181 // during isel to generate test instructions.
43182 if (isa<ConstantSDNode>(Op.getOperand(1)))
43183 return SDValue();
43184 NewOpc = X86ISD::AND;
43185 break;
43186 case ISD::OR: NewOpc = X86ISD::OR; break;
43187 case ISD::XOR: NewOpc = X86ISD::XOR; break;
43188 case ISD::ADD:
43189 // If the carry or overflow flag is used, we can't truncate.
43190 if (needCarryOrOverflowFlag(SDValue(N, 0)))
43191 return SDValue();
43192 NewOpc = X86ISD::ADD;
43193 break;
43194 case ISD::SUB:
43195 // If the carry or overflow flag is used, we can't truncate.
43196 if (needCarryOrOverflowFlag(SDValue(N, 0)))
43197 return SDValue();
43198 NewOpc = X86ISD::SUB;
43199 break;
43200 }
43201
43202 // We found an op we can narrow. Truncate its inputs.
43203 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
43204 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
43205
43206 // Use a X86 specific opcode to avoid DAG combine messing with it.
43207 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
43208 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
43209
43210 // For AND, keep a CMP so that we can match the test pattern.
43211 if (NewOpc == X86ISD::AND)
43212 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
43213 DAG.getConstant(0, dl, VT));
43214
43215 // Return the flags.
43216 return Op.getValue(1);
43217}
43218
43219static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
43220 TargetLowering::DAGCombinerInfo &DCI) {
43221 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&(((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode
()) && "Expected X86ISD::ADD or X86ISD::SUB") ? static_cast
<void> (0) : __assert_fail ("(X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) && \"Expected X86ISD::ADD or X86ISD::SUB\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43222, __PRETTY_FUNCTION__))
43222 "Expected X86ISD::ADD or X86ISD::SUB")(((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode
()) && "Expected X86ISD::ADD or X86ISD::SUB") ? static_cast
<void> (0) : __assert_fail ("(X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) && \"Expected X86ISD::ADD or X86ISD::SUB\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43222, __PRETTY_FUNCTION__))
;
43223
43224 SDLoc DL(N);
43225 SDValue LHS = N->getOperand(0);
43226 SDValue RHS = N->getOperand(1);
43227 MVT VT = LHS.getSimpleValueType();
43228 unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;
43229
43230 // If we don't use the flag result, simplify back to a generic ADD/SUB.
43231 if (!N->hasAnyUseOfValue(1)) {
43232 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
43233 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
43234 }
43235
43236 // Fold any similar generic ADD/SUB opcodes to reuse this node.
43237 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
43238 SDValue Ops[] = {N0, N1};
43239 SDVTList VTs = DAG.getVTList(N->getValueType(0));
43240 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
43241 SDValue Op(N, 0);
43242 if (Negate)
43243 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
43244 DCI.CombineTo(GenericAddSub, Op);
43245 }
43246 };
43247 MatchGeneric(LHS, RHS, false);
43248 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
43249
43250 return SDValue();
43251}
43252
43253static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
43254 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
43255 MVT VT = N->getSimpleValueType(0);
43256 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
43257 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
43258 N->getOperand(0), N->getOperand(1),
43259 Flags);
43260 }
43261
43262 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
43263 // iff the flag result is dead.
43264 SDValue Op0 = N->getOperand(0);
43265 SDValue Op1 = N->getOperand(1);
43266 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
43267 !N->hasAnyUseOfValue(1))
43268 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
43269 Op0.getOperand(1), N->getOperand(2));
43270
43271 return SDValue();
43272}
43273
43274// Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
43275static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
43276 TargetLowering::DAGCombinerInfo &DCI) {
43277 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
43278 // the result is either zero or one (depending on the input carry bit).
43279 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
43280 if (X86::isZeroNode(N->getOperand(0)) &&
43281 X86::isZeroNode(N->getOperand(1)) &&
43282 // We don't have a good way to replace an EFLAGS use, so only do this when
43283 // dead right now.
43284 SDValue(N, 1).use_empty()) {
43285 SDLoc DL(N);
43286 EVT VT = N->getValueType(0);
43287 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
43288 SDValue Res1 =
43289 DAG.getNode(ISD::AND, DL, VT,
43290 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43291 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
43292 N->getOperand(2)),
43293 DAG.getConstant(1, DL, VT));
43294 return DCI.CombineTo(N, Res1, CarryOut);
43295 }
43296
43297 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
43298 MVT VT = N->getSimpleValueType(0);
43299 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
43300 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
43301 N->getOperand(0), N->getOperand(1),
43302 Flags);
43303 }
43304
43305 return SDValue();
43306}
43307
43308/// If this is an add or subtract where one operand is produced by a cmp+setcc,
43309/// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
43310/// with CMP+{ADC, SBB}.
43311static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
43312 bool IsSub = N->getOpcode() == ISD::SUB;
43313 SDValue X = N->getOperand(0);
43314 SDValue Y = N->getOperand(1);
43315
43316 // If this is an add, canonicalize a zext operand to the RHS.
43317 // TODO: Incomplete? What if both sides are zexts?
43318 if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
43319 Y.getOpcode() != ISD::ZERO_EXTEND)
43320 std::swap(X, Y);
43321
43322 // Look through a one-use zext.
43323 bool PeekedThroughZext = false;
43324 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
43325 Y = Y.getOperand(0);
43326 PeekedThroughZext = true;
43327 }
43328
43329 // If this is an add, canonicalize a setcc operand to the RHS.
43330 // TODO: Incomplete? What if both sides are setcc?
43331 // TODO: Should we allow peeking through a zext of the other operand?
43332 if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
43333 Y.getOpcode() != X86ISD::SETCC)
43334 std::swap(X, Y);
43335
43336 if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
43337 return SDValue();
43338
43339 SDLoc DL(N);
43340 EVT VT = N->getValueType(0);
43341 X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
43342
43343 // If X is -1 or 0, then we have an opportunity to avoid constants required in
43344 // the general case below.
43345 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
43346 if (ConstantX) {
43347 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
43348 (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
43349 // This is a complicated way to get -1 or 0 from the carry flag:
43350 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
43351 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
43352 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43353 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
43354 Y.getOperand(1));
43355 }
43356
43357 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
43358 (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
43359 SDValue EFLAGS = Y->getOperand(1);
43360 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
43361 EFLAGS.getValueType().isInteger() &&
43362 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
43363 // Swap the operands of a SUB, and we have the same pattern as above.
43364 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
43365 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
43366 SDValue NewSub = DAG.getNode(
43367 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
43368 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
43369 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
43370 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43371 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
43372 NewEFLAGS);
43373 }
43374 }
43375 }
43376
43377 if (CC == X86::COND_B) {
43378 // X + SETB Z --> adc X, 0
43379 // X - SETB Z --> sbb X, 0
43380 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
43381 DAG.getVTList(VT, MVT::i32), X,
43382 DAG.getConstant(0, DL, VT), Y.getOperand(1));
43383 }
43384
43385 if (CC == X86::COND_A) {
43386 SDValue EFLAGS = Y->getOperand(1);
43387 // Try to convert COND_A into COND_B in an attempt to facilitate
43388 // materializing "setb reg".
43389 //
43390 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
43391 // cannot take an immediate as its first operand.
43392 //
43393 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
43394 EFLAGS.getValueType().isInteger() &&
43395 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
43396 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
43397 EFLAGS.getNode()->getVTList(),
43398 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
43399 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
43400 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
43401 DAG.getVTList(VT, MVT::i32), X,
43402 DAG.getConstant(0, DL, VT), NewEFLAGS);
43403 }
43404 }
43405
43406 if (CC != X86::COND_E && CC != X86::COND_NE)
43407 return SDValue();
43408
43409 SDValue Cmp = Y.getOperand(1);
43410 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
43411 !X86::isZeroNode(Cmp.getOperand(1)) ||
43412 !Cmp.getOperand(0).getValueType().isInteger())
43413 return SDValue();
43414
43415 SDValue Z = Cmp.getOperand(0);
43416 EVT ZVT = Z.getValueType();
43417
43418 // If X is -1 or 0, then we have an opportunity to avoid constants required in
43419 // the general case below.
43420 if (ConstantX) {
43421 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
43422 // fake operands:
43423 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
43424 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
43425 if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
43426 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
43427 SDValue Zero = DAG.getConstant(0, DL, ZVT);
43428 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
43429 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
43430 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43431 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
43432 SDValue(Neg.getNode(), 1));
43433 }
43434
43435 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
43436 // with fake operands:
43437 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
43438 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
43439 if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
43440 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
43441 SDValue One = DAG.getConstant(1, DL, ZVT);
43442 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
43443 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
43444 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cmp1);
43445 }
43446 }
43447
43448 // (cmp Z, 1) sets the carry flag if Z is 0.
43449 SDValue One = DAG.getConstant(1, DL, ZVT);
43450 SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
43451
43452 // Add the flags type for ADC/SBB nodes.
43453 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
43454
43455 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
43456 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
43457 if (CC == X86::COND_NE)
43458 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
43459 DAG.getConstant(-1ULL, DL, VT), Cmp1);
43460
43461 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
43462 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
43463 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
43464 DAG.getConstant(0, DL, VT), Cmp1);
43465}
43466
43467static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG,
43468 const X86Subtarget &Subtarget) {
43469 if (!Subtarget.hasSSE2())
43470 return SDValue();
43471
43472 EVT VT = N->getValueType(0);
43473
43474 // If the vector size is less than 128, or greater than the supported RegSize,
43475 // do not use PMADD.
43476 if (!VT.isVector() || VT.getVectorNumElements() < 8)
43477 return SDValue();
43478
43479 SDValue Op0 = N->getOperand(0);
43480 SDValue Op1 = N->getOperand(1);
43481
43482 auto UsePMADDWD = [&](SDValue Op) {
43483 ShrinkMode Mode;
43484 return Op.getOpcode() == ISD::MUL &&
43485 canReduceVMulWidth(Op.getNode(), DAG, Mode) && Mode != MULU16 &&
43486 (!Subtarget.hasSSE41() ||
43487 (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
43488 Op->isOnlyUserOf(Op.getOperand(1).getNode())));
43489 };
43490
43491 SDValue MulOp, OtherOp;
43492 if (UsePMADDWD(Op0)) {
43493 MulOp = Op0;
43494 OtherOp = Op1;
43495 } else if (UsePMADDWD(Op1)) {
43496 MulOp = Op1;
43497 OtherOp = Op0;
43498 } else
43499 return SDValue();
43500
43501 SDLoc DL(N);
43502 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
43503 VT.getVectorNumElements());
43504 EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43505 VT.getVectorNumElements() / 2);
43506
43507 // Shrink the operands of mul.
43508 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(0));
43509 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, MulOp->getOperand(1));
43510
43511 // Madd vector size is half of the original vector size
43512 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43513 ArrayRef<SDValue> Ops) {
43514 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
43515 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
43516 };
43517 SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 },
43518 PMADDWDBuilder);
43519 // Fill the rest of the output with 0
43520 SDValue Zero = DAG.getConstant(0, DL, Madd.getSimpleValueType());
43521 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd, Zero);
43522
43523 // Preserve the reduction flag on the ADD. We may need to revisit for the
43524 // other operand.
43525 SDNodeFlags Flags;
43526 Flags.setVectorReduction(true);
43527 return DAG.getNode(ISD::ADD, DL, VT, Concat, OtherOp, Flags);
43528}
43529
43530static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG,
43531 const X86Subtarget &Subtarget) {
43532 if (!Subtarget.hasSSE2())
43533 return SDValue();
43534
43535 SDLoc DL(N);
43536 EVT VT = N->getValueType(0);
43537
43538 // TODO: There's nothing special about i32, any integer type above i16 should
43539 // work just as well.
43540 if (!VT.isVector() || !VT.isSimple() ||
43541 !(VT.getVectorElementType() == MVT::i32))
43542 return SDValue();
43543
43544 unsigned RegSize = 128;
43545 if (Subtarget.useBWIRegs())
43546 RegSize = 512;
43547 else if (Subtarget.hasAVX())
43548 RegSize = 256;
43549
43550 // We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512.
43551 // TODO: We should be able to handle larger vectors by splitting them before
43552 // feeding them into several SADs, and then reducing over those.
43553 if (VT.getSizeInBits() / 4 > RegSize)
43554 return SDValue();
43555
43556 // We know N is a reduction add. To match SAD, we need one of the operands to
43557 // be an ABS.
43558 SDValue AbsOp = N->getOperand(0);
43559 SDValue OtherOp = N->getOperand(1);
43560 if (AbsOp.getOpcode() != ISD::ABS)
43561 std::swap(AbsOp, OtherOp);
43562 if (AbsOp.getOpcode() != ISD::ABS)
43563 return SDValue();
43564
43565 // Check whether we have an abs-diff pattern feeding into the select.
43566 SDValue SadOp0, SadOp1;
43567 if(!detectZextAbsDiff(AbsOp, SadOp0, SadOp1))
43568 return SDValue();
43569
43570 // SAD pattern detected. Now build a SAD instruction and an addition for
43571 // reduction. Note that the number of elements of the result of SAD is less
43572 // than the number of elements of its input. Therefore, we could only update
43573 // part of elements in the reduction vector.
43574 SDValue Sad = createPSADBW(DAG, SadOp0, SadOp1, DL, Subtarget);
43575
43576 // The output of PSADBW is a vector of i64.
43577 // We need to turn the vector of i64 into a vector of i32.
43578 // If the reduction vector is at least as wide as the psadbw result, just
43579 // bitcast. If it's narrower which can only occur for v2i32, bits 127:16 of
43580 // the PSADBW will be zero. If we promote/ narrow vectors, truncate the v2i64
43581 // result to v2i32 which will be removed by type legalization. If we/ widen
43582 // narrow vectors then we bitcast to v4i32 and extract v2i32.
43583 MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32);
43584 Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad);
43585
43586 if (VT.getSizeInBits() > ResVT.getSizeInBits()) {
43587 // Fill the upper elements with zero to match the add width.
43588 assert(VT.getSizeInBits() % ResVT.getSizeInBits() == 0 && "Unexpected VTs")((VT.getSizeInBits() % ResVT.getSizeInBits() == 0 && "Unexpected VTs"
) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() % ResVT.getSizeInBits() == 0 && \"Unexpected VTs\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43588, __PRETTY_FUNCTION__))
;
43589 unsigned NumConcats = VT.getSizeInBits() / ResVT.getSizeInBits();
43590 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, DL, ResVT));
43591 Ops[0] = Sad;
43592 Sad = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Ops);
43593 } else if (VT.getSizeInBits() < ResVT.getSizeInBits()) {
43594 Sad = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Sad,
43595 DAG.getIntPtrConstant(0, DL));
43596 }
43597
43598 // Preserve the reduction flag on the ADD. We may need to revisit for the
43599 // other operand.
43600 SDNodeFlags Flags;
43601 Flags.setVectorReduction(true);
43602 return DAG.getNode(ISD::ADD, DL, VT, Sad, OtherOp, Flags);
43603}
43604
43605static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
43606 const SDLoc &DL, EVT VT,
43607 const X86Subtarget &Subtarget) {
43608 // Example of pattern we try to detect:
43609 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
43610 //(add (build_vector (extract_elt t, 0),
43611 // (extract_elt t, 2),
43612 // (extract_elt t, 4),
43613 // (extract_elt t, 6)),
43614 // (build_vector (extract_elt t, 1),
43615 // (extract_elt t, 3),
43616 // (extract_elt t, 5),
43617 // (extract_elt t, 7)))
43618
43619 if (!Subtarget.hasSSE2())
43620 return SDValue();
43621
43622 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
43623 Op1.getOpcode() != ISD::BUILD_VECTOR)
43624 return SDValue();
43625
43626 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
43627 VT.getVectorNumElements() < 4 ||
43628 !isPowerOf2_32(VT.getVectorNumElements()))
43629 return SDValue();
43630
43631 // Check if one of Op0,Op1 is of the form:
43632 // (build_vector (extract_elt Mul, 0),
43633 // (extract_elt Mul, 2),
43634 // (extract_elt Mul, 4),
43635 // ...
43636 // the other is of the form:
43637 // (build_vector (extract_elt Mul, 1),
43638 // (extract_elt Mul, 3),
43639 // (extract_elt Mul, 5),
43640 // ...
43641 // and identify Mul.
43642 SDValue Mul;
43643 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
43644 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
43645 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
43646 // TODO: Be more tolerant to undefs.
43647 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43648 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43649 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43650 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
43651 return SDValue();
43652 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
43653 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
43654 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
43655 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
43656 if (!Const0L || !Const1L || !Const0H || !Const1H)
43657 return SDValue();
43658 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
43659 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
43660 // Commutativity of mul allows factors of a product to reorder.
43661 if (Idx0L > Idx1L)
43662 std::swap(Idx0L, Idx1L);
43663 if (Idx0H > Idx1H)
43664 std::swap(Idx0H, Idx1H);
43665 // Commutativity of add allows pairs of factors to reorder.
43666 if (Idx0L > Idx0H) {
43667 std::swap(Idx0L, Idx0H);
43668 std::swap(Idx1L, Idx1H);
43669 }
43670 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
43671 Idx1H != 2 * i + 3)
43672 return SDValue();
43673 if (!Mul) {
43674 // First time an extract_elt's source vector is visited. Must be a MUL
43675 // with 2X number of vector elements than the BUILD_VECTOR.
43676 // Both extracts must be from same MUL.
43677 Mul = Op0L->getOperand(0);
43678 if (Mul->getOpcode() != ISD::MUL ||
43679 Mul.getValueType().getVectorNumElements() != 2 * e)
43680 return SDValue();
43681 }
43682 // Check that the extract is from the same MUL previously seen.
43683 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
43684 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
43685 return SDValue();
43686 }
43687
43688 // Check if the Mul source can be safely shrunk.
43689 ShrinkMode Mode;
43690 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) || Mode == MULU16)
43691 return SDValue();
43692
43693 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43694 ArrayRef<SDValue> Ops) {
43695 // Shrink by adding truncate nodes and let DAGCombine fold with the
43696 // sources.
43697 EVT InVT = Ops[0].getValueType();
43698 assert(InVT.getScalarType() == MVT::i32 &&((InVT.getScalarType() == MVT::i32 && "Unexpected scalar element type"
) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i32 && \"Unexpected scalar element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43699, __PRETTY_FUNCTION__))
43699 "Unexpected scalar element type")((InVT.getScalarType() == MVT::i32 && "Unexpected scalar element type"
) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i32 && \"Unexpected scalar element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43699, __PRETTY_FUNCTION__))
;
43700 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch")((InVT == Ops[1].getValueType() && "Operands' types mismatch"
) ? static_cast<void> (0) : __assert_fail ("InVT == Ops[1].getValueType() && \"Operands' types mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43700, __PRETTY_FUNCTION__))
;
43701 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43702 InVT.getVectorNumElements() / 2);
43703 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
43704 InVT.getVectorNumElements());
43705 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
43706 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]),
43707 DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1]));
43708 };
43709 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
43710 { Mul.getOperand(0), Mul.getOperand(1) },
43711 PMADDBuilder);
43712}
43713
43714// Attempt to turn this pattern into PMADDWD.
43715// (mul (add (sext (build_vector)), (sext (build_vector))),
43716// (add (sext (build_vector)), (sext (build_vector)))
43717static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
43718 const SDLoc &DL, EVT VT,
43719 const X86Subtarget &Subtarget) {
43720 if (!Subtarget.hasSSE2())
43721 return SDValue();
43722
43723 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
43724 return SDValue();
43725
43726 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
43727 VT.getVectorNumElements() < 4 ||
43728 !isPowerOf2_32(VT.getVectorNumElements()))
43729 return SDValue();
43730
43731 SDValue N00 = N0.getOperand(0);
43732 SDValue N01 = N0.getOperand(1);
43733 SDValue N10 = N1.getOperand(0);
43734 SDValue N11 = N1.getOperand(1);
43735
43736 // All inputs need to be sign extends.
43737 // TODO: Support ZERO_EXTEND from known positive?
43738 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
43739 N01.getOpcode() != ISD::SIGN_EXTEND ||
43740 N10.getOpcode() != ISD::SIGN_EXTEND ||
43741 N11.getOpcode() != ISD::SIGN_EXTEND)
43742 return SDValue();
43743
43744 // Peek through the extends.
43745 N00 = N00.getOperand(0);
43746 N01 = N01.getOperand(0);
43747 N10 = N10.getOperand(0);
43748 N11 = N11.getOperand(0);
43749
43750 // Must be extending from vXi16.
43751 EVT InVT = N00.getValueType();
43752 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
43753 N10.getValueType() != InVT || N11.getValueType() != InVT)
43754 return SDValue();
43755
43756 // All inputs should be build_vectors.
43757 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
43758 N01.getOpcode() != ISD::BUILD_VECTOR ||
43759 N10.getOpcode() != ISD::BUILD_VECTOR ||
43760 N11.getOpcode() != ISD::BUILD_VECTOR)
43761 return SDValue();
43762
43763 // For each element, we need to ensure we have an odd element from one vector
43764 // multiplied by the odd element of another vector and the even element from
43765 // one of the same vectors being multiplied by the even element from the
43766 // other vector. So we need to make sure for each element i, this operator
43767 // is being performed:
43768 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
43769 SDValue In0, In1;
43770 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
43771 SDValue N00Elt = N00.getOperand(i);
43772 SDValue N01Elt = N01.getOperand(i);
43773 SDValue N10Elt = N10.getOperand(i);
43774 SDValue N11Elt = N11.getOperand(i);
43775 // TODO: Be more tolerant to undefs.
43776 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43777 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43778 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
43779 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
43780 return SDValue();
43781 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
43782 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
43783 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
43784 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
43785 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
43786 return SDValue();
43787 unsigned IdxN00 = ConstN00Elt->getZExtValue();
43788 unsigned IdxN01 = ConstN01Elt->getZExtValue();
43789 unsigned IdxN10 = ConstN10Elt->getZExtValue();
43790 unsigned IdxN11 = ConstN11Elt->getZExtValue();
43791 // Add is commutative so indices can be reordered.
43792 if (IdxN00 > IdxN10) {
43793 std::swap(IdxN00, IdxN10);
43794 std::swap(IdxN01, IdxN11);
43795 }
43796 // N0 indices be the even element. N1 indices must be the next odd element.
43797 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
43798 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
43799 return SDValue();
43800 SDValue N00In = N00Elt.getOperand(0);
43801 SDValue N01In = N01Elt.getOperand(0);
43802 SDValue N10In = N10Elt.getOperand(0);
43803 SDValue N11In = N11Elt.getOperand(0);
43804 // First time we find an input capture it.
43805 if (!In0) {
43806 In0 = N00In;
43807 In1 = N01In;
43808 }
43809 // Mul is commutative so the input vectors can be in any order.
43810 // Canonicalize to make the compares easier.
43811 if (In0 != N00In)
43812 std::swap(N00In, N01In);
43813 if (In0 != N10In)
43814 std::swap(N10In, N11In);
43815 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
43816 return SDValue();
43817 }
43818
43819 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43820 ArrayRef<SDValue> Ops) {
43821 // Shrink by adding truncate nodes and let DAGCombine fold with the
43822 // sources.
43823 EVT OpVT = Ops[0].getValueType();
43824 assert(OpVT.getScalarType() == MVT::i16 &&((OpVT.getScalarType() == MVT::i16 && "Unexpected scalar element type"
) ? static_cast<void> (0) : __assert_fail ("OpVT.getScalarType() == MVT::i16 && \"Unexpected scalar element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43825, __PRETTY_FUNCTION__))
43825 "Unexpected scalar element type")((OpVT.getScalarType() == MVT::i16 && "Unexpected scalar element type"
) ? static_cast<void> (0) : __assert_fail ("OpVT.getScalarType() == MVT::i16 && \"Unexpected scalar element type\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43825, __PRETTY_FUNCTION__))
;
43826 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch")((OpVT == Ops[1].getValueType() && "Operands' types mismatch"
) ? static_cast<void> (0) : __assert_fail ("OpVT == Ops[1].getValueType() && \"Operands' types mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43826, __PRETTY_FUNCTION__))
;
43827 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
43828 OpVT.getVectorNumElements() / 2);
43829 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
43830 };
43831 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
43832 PMADDBuilder);
43833}
43834
43835static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
43836 TargetLowering::DAGCombinerInfo &DCI,
43837 const X86Subtarget &Subtarget) {
43838 const SDNodeFlags Flags = N->getFlags();
43839 if (Flags.hasVectorReduction()) {
43840 if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget))
43841 return Sad;
43842 if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget))
43843 return MAdd;
43844 }
43845 EVT VT = N->getValueType(0);
43846 SDValue Op0 = N->getOperand(0);
43847 SDValue Op1 = N->getOperand(1);
43848
43849 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
43850 return MAdd;
43851 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
43852 return MAdd;
43853
43854 // Try to synthesize horizontal adds from adds of shuffles.
43855 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
43856 VT == MVT::v8i32) &&
43857 Subtarget.hasSSSE3() &&
43858 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, true)) {
43859 auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43860 ArrayRef<SDValue> Ops) {
43861 return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops);
43862 };
43863 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
43864 HADDBuilder);
43865 }
43866
43867 // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
43868 // (sub Y, (sext (vXi1 X))).
43869 // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
43870 // generic DAG combine without a legal type check, but adding this there
43871 // caused regressions.
43872 if (VT.isVector()) {
43873 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43874 if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
43875 Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
43876 TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
43877 SDLoc DL(N);
43878 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
43879 return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
43880 }
43881
43882 if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
43883 Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
43884 TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
43885 SDLoc DL(N);
43886 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
43887 return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
43888 }
43889 }
43890
43891 return combineAddOrSubToADCOrSBB(N, DAG);
43892}
43893
43894static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
43895 const X86Subtarget &Subtarget) {
43896 SDValue Op0 = N->getOperand(0);
43897 SDValue Op1 = N->getOperand(1);
43898 EVT VT = N->getValueType(0);
43899
43900 if (!VT.isVector())
43901 return SDValue();
43902
43903 // PSUBUS is supported, starting from SSE2, but truncation for v8i32
43904 // is only worth it with SSSE3 (PSHUFB).
43905 EVT EltVT = VT.getVectorElementType();
43906 if (!(Subtarget.hasSSE2() && (EltVT == MVT::i8 || EltVT == MVT::i16)) &&
43907 !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
43908 !(Subtarget.useBWIRegs() && (VT == MVT::v16i32)))
43909 return SDValue();
43910
43911 SDValue SubusLHS, SubusRHS;
43912 // Try to find umax(a,b) - b or a - umin(a,b) patterns
43913 // they may be converted to subus(a,b).
43914 // TODO: Need to add IR canonicalization for this code.
43915 if (Op0.getOpcode() == ISD::UMAX) {
43916 SubusRHS = Op1;
43917 SDValue MaxLHS = Op0.getOperand(0);
43918 SDValue MaxRHS = Op0.getOperand(1);
43919 if (MaxLHS == Op1)
43920 SubusLHS = MaxRHS;
43921 else if (MaxRHS == Op1)
43922 SubusLHS = MaxLHS;
43923 else
43924 return SDValue();
43925 } else if (Op1.getOpcode() == ISD::UMIN) {
43926 SubusLHS = Op0;
43927 SDValue MinLHS = Op1.getOperand(0);
43928 SDValue MinRHS = Op1.getOperand(1);
43929 if (MinLHS == Op0)
43930 SubusRHS = MinRHS;
43931 else if (MinRHS == Op0)
43932 SubusRHS = MinLHS;
43933 else
43934 return SDValue();
43935 } else
43936 return SDValue();
43937
43938 // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
43939 // special preprocessing in some cases.
43940 if (EltVT == MVT::i8 || EltVT == MVT::i16)
43941 return DAG.getNode(ISD::USUBSAT, SDLoc(N), VT, SubusLHS, SubusRHS);
43942
43943 assert((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) &&(((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64)
&& "Unexpected VT!") ? static_cast<void> (0) :
__assert_fail ("(VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43944, __PRETTY_FUNCTION__))
43944 "Unexpected VT!")(((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64)
&& "Unexpected VT!") ? static_cast<void> (0) :
__assert_fail ("(VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) && \"Unexpected VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 43944, __PRETTY_FUNCTION__))
;
43945
43946 // Special preprocessing case can be only applied
43947 // if the value was zero extended from 16 bit,
43948 // so we require first 16 bits to be zeros for 32 bit
43949 // values, or first 48 bits for 64 bit values.
43950 KnownBits Known = DAG.computeKnownBits(SubusLHS);
43951 unsigned NumZeros = Known.countMinLeadingZeros();
43952 if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
43953 return SDValue();
43954
43955 EVT ExtType = SubusLHS.getValueType();
43956 EVT ShrinkedType;
43957 if (VT == MVT::v8i32 || VT == MVT::v8i64)
43958 ShrinkedType = MVT::v8i16;
43959 else
43960 ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
43961
43962 // If SubusLHS is zeroextended - truncate SubusRHS to it's
43963 // size SubusRHS = umin(0xFFF.., SubusRHS).
43964 SDValue SaturationConst =
43965 DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
43966 ShrinkedType.getScalarSizeInBits()),
43967 SDLoc(SubusLHS), ExtType);
43968 SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
43969 SaturationConst);
43970 SDValue NewSubusLHS =
43971 DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
43972 SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
43973 SDValue Psubus = DAG.getNode(ISD::USUBSAT, SDLoc(N), ShrinkedType,
43974 NewSubusLHS, NewSubusRHS);
43975
43976 // Zero extend the result, it may be used somewhere as 32 bit,
43977 // if not zext and following trunc will shrink.
43978 return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
43979}
43980
43981static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
43982 TargetLowering::DAGCombinerInfo &DCI,
43983 const X86Subtarget &Subtarget) {
43984 SDValue Op0 = N->getOperand(0);
43985 SDValue Op1 = N->getOperand(1);
43986
43987 // X86 can't encode an immediate LHS of a sub. See if we can push the
43988 // negation into a preceding instruction.
43989 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
43990 // If the RHS of the sub is a XOR with one use and a constant, invert the
43991 // immediate. Then add one to the LHS of the sub so we can turn
43992 // X-Y -> X+~Y+1, saving one register.
43993 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
43994 isa<ConstantSDNode>(Op1.getOperand(1))) {
43995 const APInt &XorC = Op1.getConstantOperandAPInt(1);
43996 EVT VT = Op0.getValueType();
43997 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
43998 Op1.getOperand(0),
43999 DAG.getConstant(~XorC, SDLoc(Op1), VT));
44000 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
44001 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
44002 }
44003 }
44004
44005 // Try to synthesize horizontal subs from subs of shuffles.
44006 EVT VT = N->getValueType(0);
44007 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
44008 VT == MVT::v8i32) &&
44009 Subtarget.hasSSSE3() &&
44010 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, false)) {
44011 auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44012 ArrayRef<SDValue> Ops) {
44013 return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops);
44014 };
44015 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1},
44016 HSUBBuilder);
44017 }
44018
44019 // Try to create PSUBUS if SUB's argument is max/min
44020 if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
44021 return V;
44022
44023 return combineAddOrSubToADCOrSBB(N, DAG);
44024}
44025
44026static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
44027 const X86Subtarget &Subtarget) {
44028 MVT VT = N->getSimpleValueType(0);
44029 SDLoc DL(N);
44030
44031 if (N->getOperand(0) == N->getOperand(1)) {
44032 if (N->getOpcode() == X86ISD::PCMPEQ)
44033 return DAG.getConstant(-1, DL, VT);
44034 if (N->getOpcode() == X86ISD::PCMPGT)
44035 return DAG.getConstant(0, DL, VT);
44036 }
44037
44038 return SDValue();
44039}
44040
44041/// Helper that combines an array of subvector ops as if they were the operands
44042/// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
44043/// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
44044static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
44045 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
44046 TargetLowering::DAGCombinerInfo &DCI,
44047 const X86Subtarget &Subtarget) {
44048 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors")((Subtarget.hasAVX() && "AVX assumed for concat_vectors"
) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX() && \"AVX assumed for concat_vectors\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 44048, __PRETTY_FUNCTION__))
;
44049
44050 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
44051 return DAG.getUNDEF(VT);
44052
44053 if (llvm::all_of(Ops, [](SDValue Op) {
44054 return ISD::isBuildVectorAllZeros(Op.getNode());
44055 }))
44056 return getZeroVector(VT, Subtarget, DAG, DL);
44057
44058 SDValue Op0 = Ops[0];
44059
44060 // Fold subvector loads into one.
44061 // If needed, look through bitcasts to get to the load.
44062 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
44063 bool Fast;
44064 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
44065 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
44066 *FirstLd->getMemOperand(), &Fast) &&
44067 Fast) {
44068 if (SDValue Ld =
44069 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
44070 return Ld;
44071 }
44072 }
44073
44074 // Repeated subvectors.
44075 if (llvm::all_of(Ops, [Op0](SDValue Op) { return Op == Op0; })) {
44076 // If this broadcast/subv_broadcast is inserted into both halves, use a
44077 // larger broadcast/subv_broadcast.
44078 if (Op0.getOpcode() == X86ISD::VBROADCAST ||
44079 Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
44080 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
44081
44082 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
44083 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
44084 (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
44085 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
44086 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
44087 Op0.getOperand(0),
44088 DAG.getIntPtrConstant(0, DL)));
44089
44090 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
44091 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
44092 (Subtarget.hasAVX2() ||
44093 (VT.getScalarSizeInBits() >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
44094 Op0.getOperand(0).getValueType() == VT.getScalarType())
44095 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
44096 }
44097
44098 bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
44099
44100 // Repeated opcode.
44101 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
44102 // but it currently struggles with different vector widths.
44103 if (llvm::all_of(Ops, [Op0](SDValue Op) {
44104 return Op.getOpcode() == Op0.getOpcode();
44105 })) {
44106 unsigned NumOps = Ops.size();
44107 switch (Op0.getOpcode()) {
44108 case X86ISD::PSHUFHW:
44109 case X86ISD::PSHUFLW:
44110 case X86ISD::PSHUFD:
44111 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
44112 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
44113 SmallVector<SDValue, 2> Src;
44114 for (unsigned i = 0; i != NumOps; ++i)
44115 Src.push_back(Ops[i].getOperand(0));
44116 return DAG.getNode(Op0.getOpcode(), DL, VT,
44117 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
44118 Op0.getOperand(1));
44119 }
44120 LLVM_FALLTHROUGH[[gnu::fallthrough]];
44121 case X86ISD::VPERMILPI:
44122 // TODO - add support for vXf64/vXi64 shuffles.
44123 if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
44124 Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
44125 SmallVector<SDValue, 2> Src;
44126 for (unsigned i = 0; i != NumOps; ++i)
44127 Src.push_back(DAG.getBitcast(MVT::v4f32, Ops[i].getOperand(0)));
44128 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f32, Src);
44129 Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
44130 Op0.getOperand(1));
44131 return DAG.getBitcast(VT, Res);
44132 }
44133 break;
44134 case X86ISD::PACKUS:
44135 if (NumOps == 2 && VT.is256BitVector() && Subtarget.hasInt256()) {
44136 SmallVector<SDValue, 2> LHS, RHS;
44137 for (unsigned i = 0; i != NumOps; ++i) {
44138 LHS.push_back(Ops[i].getOperand(0));
44139 RHS.push_back(Ops[i].getOperand(1));
44140 }
44141 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
44142 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
44143 NumOps * SrcVT.getVectorNumElements());
44144 return DAG.getNode(Op0.getOpcode(), DL, VT,
44145 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
44146 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
44147 }
44148 break;
44149 }
44150 }
44151
44152 return SDValue();
44153}
44154
44155static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
44156 TargetLowering::DAGCombinerInfo &DCI,
44157 const X86Subtarget &Subtarget) {
44158 EVT VT = N->getValueType(0);
44159 EVT SrcVT = N->getOperand(0).getValueType();
44160 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44161
44162 // Don't do anything for i1 vectors.
44163 if (VT.getVectorElementType() == MVT::i1)
44164 return SDValue();
44165
44166 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
44167 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
44168 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
44169 DCI, Subtarget))
44170 return R;
44171 }
44172
44173 return SDValue();
44174}
44175
44176static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
44177 TargetLowering::DAGCombinerInfo &DCI,
44178 const X86Subtarget &Subtarget) {
44179 if (DCI.isBeforeLegalizeOps())
44180 return SDValue();
44181
44182 MVT OpVT = N->getSimpleValueType(0);
44183
44184 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
44185
44186 SDLoc dl(N);
44187 SDValue Vec = N->getOperand(0);
44188 SDValue SubVec = N->getOperand(1);
44189
44190 uint64_t IdxVal = N->getConstantOperandVal(2);
44191 MVT SubVecVT = SubVec.getSimpleValueType();
44192
44193 if (Vec.isUndef() && SubVec.isUndef())
44194 return DAG.getUNDEF(OpVT);
44195
44196 // Inserting undefs/zeros into zeros/undefs is a zero vector.
44197 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
44198 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
44199 return getZeroVector(OpVT, Subtarget, DAG, dl);
44200
44201 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
44202 // If we're inserting into a zero vector and then into a larger zero vector,
44203 // just insert into the larger zero vector directly.
44204 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
44205 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
44206 uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
44207 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
44208 getZeroVector(OpVT, Subtarget, DAG, dl),
44209 SubVec.getOperand(1),
44210 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
44211 }
44212
44213 // If we're inserting into a zero vector and our input was extracted from an
44214 // insert into a zero vector of the same type and the extraction was at
44215 // least as large as the original insertion. Just insert the original
44216 // subvector into a zero vector.
44217 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
44218 isNullConstant(SubVec.getOperand(1)) &&
44219 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
44220 SDValue Ins = SubVec.getOperand(0);
44221 if (isNullConstant(Ins.getOperand(2)) &&
44222 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
44223 Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
44224 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
44225 getZeroVector(OpVT, Subtarget, DAG, dl),
44226 Ins.getOperand(1), N->getOperand(2));
44227 }
44228 }
44229
44230 // Stop here if this is an i1 vector.
44231 if (IsI1Vector)
44232 return SDValue();
44233
44234 // If this is an insert of an extract, combine to a shuffle. Don't do this
44235 // if the insert or extract can be represented with a subregister operation.
44236 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
44237 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
44238 (IdxVal != 0 || !Vec.isUndef())) {
44239 int ExtIdxVal = SubVec.getConstantOperandVal(1);
44240 if (ExtIdxVal != 0) {
44241 int VecNumElts = OpVT.getVectorNumElements();
44242 int SubVecNumElts = SubVecVT.getVectorNumElements();
44243 SmallVector<int, 64> Mask(VecNumElts);
44244 // First create an identity shuffle mask.
44245 for (int i = 0; i != VecNumElts; ++i)
44246 Mask[i] = i;
44247 // Now insert the extracted portion.
44248 for (int i = 0; i != SubVecNumElts; ++i)
44249 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
44250
44251 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
44252 }
44253 }
44254
44255 // Match concat_vector style patterns.
44256 SmallVector<SDValue, 2> SubVectorOps;
44257 if (collectConcatOps(N, SubVectorOps)) {
44258 if (SDValue Fold =
44259 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
44260 return Fold;
44261
44262 // If we're inserting all zeros into the upper half, change this to
44263 // a concat with zero. We will match this to a move
44264 // with implicit upper bit zeroing during isel.
44265 // We do this here because we don't want combineConcatVectorOps to
44266 // create INSERT_SUBVECTOR from CONCAT_VECTORS.
44267 if (SubVectorOps.size() == 2 &&
44268 ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
44269 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
44270 getZeroVector(OpVT, Subtarget, DAG, dl),
44271 SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
44272 }
44273
44274 // If this is a broadcast insert into an upper undef, use a larger broadcast.
44275 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
44276 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
44277
44278 // If this is a broadcast load inserted into an upper undef, use a larger
44279 // broadcast load.
44280 if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
44281 SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
44282 auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
44283 SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
44284 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
44285 SDValue BcastLd =
44286 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
44287 MemIntr->getMemoryVT(),
44288 MemIntr->getMemOperand());
44289 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
44290 return BcastLd;
44291 }
44292
44293 return SDValue();
44294}
44295
44296/// If we are extracting a subvector of a vector select and the select condition
44297/// is composed of concatenated vectors, try to narrow the select width. This
44298/// is a common pattern for AVX1 integer code because 256-bit selects may be
44299/// legal, but there is almost no integer math/logic available for 256-bit.
44300/// This function should only be called with legal types (otherwise, the calls
44301/// to get simple value types will assert).
44302static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
44303 SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
44304 SmallVector<SDValue, 4> CatOps;
44305 if (Sel.getOpcode() != ISD::VSELECT ||
44306 !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
44307 return SDValue();
44308
44309 // Note: We assume simple value types because this should only be called with
44310 // legal operations/types.
44311 // TODO: This can be extended to handle extraction to 256-bits.
44312 MVT VT = Ext->getSimpleValueType(0);
44313 if (!VT.is128BitVector())
44314 return SDValue();
44315
44316 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
44317 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
44318 return SDValue();
44319
44320 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
44321 MVT SelVT = Sel.getSimpleValueType();
44322 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&(((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
"Unexpected vector type with legal operations") ? static_cast
<void> (0) : __assert_fail ("(SelVT.is256BitVector() || SelVT.is512BitVector()) && \"Unexpected vector type with legal operations\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 44323, __PRETTY_FUNCTION__))
44323 "Unexpected vector type with legal operations")(((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
"Unexpected vector type with legal operations") ? static_cast
<void> (0) : __assert_fail ("(SelVT.is256BitVector() || SelVT.is512BitVector()) && \"Unexpected vector type with legal operations\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 44323, __PRETTY_FUNCTION__))
;
44324
44325 unsigned SelElts = SelVT.getVectorNumElements();
44326 unsigned CastedElts = WideVT.getVectorNumElements();
44327 unsigned ExtIdx = cast<ConstantSDNode>(Ext->getOperand(1))->getZExtValue();
44328 if (SelElts % CastedElts == 0) {
44329 // The select has the same or more (narrower) elements than the extract
44330 // operand. The extraction index gets scaled by that factor.
44331 ExtIdx *= (SelElts / CastedElts);
44332 } else if (CastedElts % SelElts == 0) {
44333 // The select has less (wider) elements than the extract operand. Make sure
44334 // that the extraction index can be divided evenly.
44335 unsigned IndexDivisor = CastedElts / SelElts;
44336 if (ExtIdx % IndexDivisor != 0)
44337 return SDValue();
44338 ExtIdx /= IndexDivisor;
44339 } else {
44340 llvm_unreachable("Element count of simple vector types are not divisible?")::llvm::llvm_unreachable_internal("Element count of simple vector types are not divisible?"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 44340)
;
44341 }
44342
44343 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
44344 unsigned NarrowElts = SelElts / NarrowingFactor;
44345 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
44346 SDLoc DL(Ext);
44347 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
44348 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
44349 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
44350 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
44351 return DAG.getBitcast(VT, NarrowSel);
44352}
44353
44354static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
44355 TargetLowering::DAGCombinerInfo &DCI,
44356 const X86Subtarget &Subtarget) {
44357 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
44358 // eventually get combined/lowered into ANDNP) with a concatenated operand,
44359 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
44360 // We let generic combining take over from there to simplify the
44361 // insert/extract and 'not'.
44362 // This pattern emerges during AVX1 legalization. We handle it before lowering
44363 // to avoid complications like splitting constant vector loads.
44364
44365 // Capture the original wide type in the likely case that we need to bitcast
44366 // back to this type.
44367 if (!N->getValueType(0).isSimple())
44368 return SDValue();
44369
44370 MVT VT = N->getSimpleValueType(0);
44371 SDValue InVec = N->getOperand(0);
44372 SDValue InVecBC = peekThroughBitcasts(InVec);
44373 EVT InVecVT = InVec.getValueType();
44374 EVT InVecBCVT = InVecBC.getValueType();
44375 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44376
44377 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
44378 TLI.isTypeLegal(InVecVT) &&
44379 InVecVT.getSizeInBits() == 256 && InVecBC.getOpcode() == ISD::AND) {
44380 auto isConcatenatedNot = [] (SDValue V) {
44381 V = peekThroughBitcasts(V);
44382 if (!isBitwiseNot(V))
44383 return false;
44384 SDValue NotOp = V->getOperand(0);
44385 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
44386 };
44387 if (isConcatenatedNot(InVecBC.getOperand(0)) ||
44388 isConcatenatedNot(InVecBC.getOperand(1))) {
44389 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
44390 SDValue Concat = split256IntArith(InVecBC, DAG);
44391 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
44392 DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
44393 }
44394 }
44395
44396 if (DCI.isBeforeLegalizeOps())
44397 return SDValue();
44398
44399 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
44400 return V;
44401
44402 unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
44403
44404 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
44405 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
44406
44407 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
44408 if (VT.getScalarType() == MVT::i1)
44409 return DAG.getConstant(1, SDLoc(N), VT);
44410 return getOnesVector(VT, DAG, SDLoc(N));
44411 }
44412
44413 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
44414 return DAG.getBuildVector(
44415 VT, SDLoc(N),
44416 InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
44417
44418 // Try to move vector bitcast after extract_subv by scaling extraction index:
44419 // extract_subv (bitcast X), Index --> bitcast (extract_subv X, Index')
44420 // TODO: Move this to DAGCombiner::visitEXTRACT_SUBVECTOR
44421 if (InVec != InVecBC && InVecBCVT.isVector()) {
44422 unsigned SrcNumElts = InVecBCVT.getVectorNumElements();
44423 unsigned DestNumElts = InVecVT.getVectorNumElements();
44424 if ((DestNumElts % SrcNumElts) == 0) {
44425 unsigned DestSrcRatio = DestNumElts / SrcNumElts;
44426 if ((VT.getVectorNumElements() % DestSrcRatio) == 0) {
44427 unsigned NewExtNumElts = VT.getVectorNumElements() / DestSrcRatio;
44428 EVT NewExtVT = EVT::getVectorVT(*DAG.getContext(),
44429 InVecBCVT.getScalarType(), NewExtNumElts);
44430 if ((N->getConstantOperandVal(1) % DestSrcRatio) == 0 &&
44431 TLI.isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, NewExtVT)) {
44432 unsigned IndexValScaled = N->getConstantOperandVal(1) / DestSrcRatio;
44433 SDLoc DL(N);
44434 SDValue NewIndex = DAG.getIntPtrConstant(IndexValScaled, DL);
44435 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewExtVT,
44436 InVecBC, NewIndex);
44437 return DAG.getBitcast(VT, NewExtract);
44438 }
44439 }
44440 }
44441 }
44442
44443 // If we are extracting from an insert into a zero vector, replace with a
44444 // smaller insert into zero if we don't access less than the original
44445 // subvector. Don't do this for i1 vectors.
44446 if (VT.getVectorElementType() != MVT::i1 &&
44447 InVec.getOpcode() == ISD::INSERT_SUBVECTOR && IdxVal == 0 &&
44448 InVec.hasOneUse() && isNullConstant(InVec.getOperand(2)) &&
44449 ISD::isBuildVectorAllZeros(InVec.getOperand(0).getNode()) &&
44450 InVec.getOperand(1).getValueSizeInBits() <= VT.getSizeInBits()) {
44451 SDLoc DL(N);
44452 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
44453 getZeroVector(VT, Subtarget, DAG, DL),
44454 InVec.getOperand(1), InVec.getOperand(2));
44455 }
44456
44457 // If we're extracting from a broadcast then we're better off just
44458 // broadcasting to the smaller type directly, assuming this is the only use.
44459 // As its a broadcast we don't care about the extraction index.
44460 if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
44461 InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
44462 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
44463
44464 if (InVec.getOpcode() == X86ISD::VBROADCAST_LOAD && InVec.hasOneUse()) {
44465 auto *MemIntr = cast<MemIntrinsicSDNode>(InVec);
44466 if (MemIntr->getMemoryVT().getSizeInBits() <= VT.getSizeInBits()) {
44467 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
44468 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
44469 SDValue BcastLd =
44470 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
44471 MemIntr->getMemoryVT(),
44472 MemIntr->getMemOperand());
44473 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
44474 return BcastLd;
44475 }
44476 }
44477
44478 // If we're extracting the lowest subvector and we're the only user,
44479 // we may be able to perform this with a smaller vector width.
44480 if (IdxVal == 0 && InVec.hasOneUse()) {
44481 unsigned InOpcode = InVec.getOpcode();
44482 if (VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
44483 // v2f64 CVTDQ2PD(v4i32).
44484 if (InOpcode == ISD::SINT_TO_FP &&
44485 InVec.getOperand(0).getValueType() == MVT::v4i32) {
44486 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
44487 }
44488 // v2f64 CVTUDQ2PD(v4i32).
44489 if (InOpcode == ISD::UINT_TO_FP &&
44490 InVec.getOperand(0).getValueType() == MVT::v4i32) {
44491 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
44492 }
44493 // v2f64 CVTPS2PD(v4f32).
44494 if (InOpcode == ISD::FP_EXTEND &&
44495 InVec.getOperand(0).getValueType() == MVT::v4f32) {
44496 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
44497 }
44498 }
44499 if ((InOpcode == ISD::ANY_EXTEND ||
44500 InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
44501 InOpcode == ISD::ZERO_EXTEND ||
44502 InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
44503 InOpcode == ISD::SIGN_EXTEND ||
44504 InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
44505 VT.is128BitVector() &&
44506 InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
44507 unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
44508 return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
44509 }
44510 if (InOpcode == ISD::VSELECT &&
44511 InVec.getOperand(0).getValueType().is256BitVector() &&
44512 InVec.getOperand(1).getValueType().is256BitVector() &&
44513 InVec.getOperand(2).getValueType().is256BitVector()) {
44514 SDLoc DL(N);
44515 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
44516 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
44517 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
44518 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
44519 }
44520 }
44521
44522 return SDValue();
44523}
44524
44525static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
44526 EVT VT = N->getValueType(0);
44527 SDValue Src = N->getOperand(0);
44528 SDLoc DL(N);
44529
44530 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
44531 // This occurs frequently in our masked scalar intrinsic code and our
44532 // floating point select lowering with AVX512.
44533 // TODO: SimplifyDemandedBits instead?
44534 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
44535 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
44536 if (C->getAPIntValue().isOneValue())
44537 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
44538 Src.getOperand(0));
44539
44540 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
44541 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
44542 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
44543 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
44544 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
44545 if (C->isNullValue())
44546 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
44547 Src.getOperand(1));
44548
44549 // Reduce v2i64 to v4i32 if we don't need the upper bits.
44550 // TODO: Move to DAGCombine?
44551 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::ANY_EXTEND &&
44552 Src.getValueType() == MVT::i64 && Src.hasOneUse() &&
44553 Src.getOperand(0).getScalarValueSizeInBits() <= 32)
44554 return DAG.getBitcast(
44555 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
44556 DAG.getAnyExtOrTrunc(Src.getOperand(0), DL, MVT::i32)));
44557
44558 return SDValue();
44559}
44560
44561// Simplify PMULDQ and PMULUDQ operations.
44562static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
44563 TargetLowering::DAGCombinerInfo &DCI,
44564 const X86Subtarget &Subtarget) {
44565 SDValue LHS = N->getOperand(0);
44566 SDValue RHS = N->getOperand(1);
44567
44568 // Canonicalize constant to RHS.
44569 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
44570 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
44571 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
44572
44573 // Multiply by zero.
44574 // Don't return RHS as it may contain UNDEFs.
44575 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
44576 return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
44577
44578 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
44579 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44580 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
44581 return SDValue(N, 0);
44582
44583 // If the input is an extend_invec and the SimplifyDemandedBits call didn't
44584 // convert it to any_extend_invec, due to the LegalOperations check, do the
44585 // conversion directly to a vector shuffle manually. This exposes combine
44586 // opportunities missed by combineExtInVec not calling
44587 // combineX86ShufflesRecursively on SSE4.1 targets.
44588 // FIXME: This is basically a hack around several other issues related to
44589 // ANY_EXTEND_VECTOR_INREG.
44590 if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
44591 (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
44592 LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
44593 LHS.getOperand(0).getValueType() == MVT::v4i32) {
44594 SDLoc dl(N);
44595 LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
44596 LHS.getOperand(0), { 0, -1, 1, -1 });
44597 LHS = DAG.getBitcast(MVT::v2i64, LHS);
44598 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
44599 }
44600 if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
44601 (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
44602 RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
44603 RHS.getOperand(0).getValueType() == MVT::v4i32) {
44604 SDLoc dl(N);
44605 RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
44606 RHS.getOperand(0), { 0, -1, 1, -1 });
44607 RHS = DAG.getBitcast(MVT::v2i64, RHS);
44608 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
44609 }
44610
44611 return SDValue();
44612}
44613
44614static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
44615 TargetLowering::DAGCombinerInfo &DCI,
44616 const X86Subtarget &Subtarget) {
44617 EVT VT = N->getValueType(0);
44618 SDValue In = N->getOperand(0);
44619 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44620
44621 // Try to merge vector loads and extend_inreg to an extload.
44622 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
44623 In.hasOneUse()) {
44624 auto *Ld = cast<LoadSDNode>(In);
44625 if (Ld->isSimple()) {
44626 MVT SVT = In.getSimpleValueType().getVectorElementType();
44627 ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
44628 EVT MemVT = EVT::getVectorVT(*DAG.getContext(), SVT,
44629 VT.getVectorNumElements());
44630 if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
44631 SDValue Load =
44632 DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
44633 Ld->getPointerInfo(), MemVT, Ld->getAlignment(),
44634 Ld->getMemOperand()->getFlags());
44635 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
44636 return Load;
44637 }
44638 }
44639 }
44640
44641 // Attempt to combine as a shuffle.
44642 // TODO: SSE41 support
44643 if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
44644 SDValue Op(N, 0);
44645 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
44646 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
44647 return Res;
44648 }
44649
44650 return SDValue();
44651}
44652
44653static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
44654 TargetLowering::DAGCombinerInfo &DCI) {
44655 EVT VT = N->getValueType(0);
44656
44657 APInt KnownUndef, KnownZero;
44658 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44659 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
44660 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
44661 KnownZero, DCI))
44662 return SDValue(N, 0);
44663
44664 return SDValue();
44665}
44666
44667SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
44668 DAGCombinerInfo &DCI) const {
44669 SelectionDAG &DAG = DCI.DAG;
44670 switch (N->getOpcode()) {
44671 default: break;
44672 case ISD::SCALAR_TO_VECTOR:
44673 return combineScalarToVector(N, DAG);
44674 case ISD::EXTRACT_VECTOR_ELT:
44675 case X86ISD::PEXTRW:
44676 case X86ISD::PEXTRB:
44677 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
44678 case ISD::CONCAT_VECTORS:
44679 return combineConcatVectors(N, DAG, DCI, Subtarget);
44680 case ISD::INSERT_SUBVECTOR:
44681 return combineInsertSubvector(N, DAG, DCI, Subtarget);
44682 case ISD::EXTRACT_SUBVECTOR:
44683 return combineExtractSubvector(N, DAG, DCI, Subtarget);
44684 case ISD::VSELECT:
44685 case ISD::SELECT:
44686 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
44687 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
44688 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
44689 case X86ISD::CMP: return combineCMP(N, DAG);
44690 case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
44691 case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
44692 case X86ISD::ADD:
44693 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
44694 case X86ISD::SBB: return combineSBB(N, DAG);
44695 case X86ISD::ADC: return combineADC(N, DAG, DCI);
44696 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
44697 case ISD::SHL: return combineShiftLeft(N, DAG);
44698 case ISD::SRA: return combineShiftRightArithmetic(N, DAG);
44699 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI);
44700 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
44701 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
44702 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
44703 case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget);
44704 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
44705 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
44706 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
44707 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
44708 case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, DCI, Subtarget);
44709 case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget);
44710 case ISD::FADD:
44711 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
44712 case ISD::FNEG: return combineFneg(N, DAG, Subtarget);
44713 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
44714 case X86ISD::VTRUNC: return combineVTRUNC(N, DAG);
44715 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
44716 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
44717 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
44718 case X86ISD::FXOR:
44719 case X86ISD::FOR: return combineFOr(N, DAG, Subtarget);
44720 case X86ISD::FMIN:
44721 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
44722 case ISD::FMINNUM:
44723 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
44724 case X86ISD::CVTSI2P:
44725 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
44726 case X86ISD::CVTP2SI:
44727 case X86ISD::CVTP2UI:
44728 case X86ISD::CVTTP2SI:
44729 case X86ISD::CVTTP2UI: return combineCVTP2I_CVTTP2I(N, DAG, DCI);
44730 case X86ISD::BT: return combineBT(N, DAG, DCI);
44731 case ISD::ANY_EXTEND:
44732 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
44733 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
44734 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
44735 case ISD::ANY_EXTEND_VECTOR_INREG:
44736 case ISD::SIGN_EXTEND_VECTOR_INREG:
44737 case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
44738 Subtarget);
44739 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
44740 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
44741 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
44742 case X86ISD::PACKSS:
44743 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
44744 case X86ISD::VSHL:
44745 case X86ISD::VSRA:
44746 case X86ISD::VSRL:
44747 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
44748 case X86ISD::VSHLI:
44749 case X86ISD::VSRAI:
44750 case X86ISD::VSRLI:
44751 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
44752 case X86ISD::PINSRB:
44753 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
44754 case X86ISD::SHUFP: // Handle all target specific shuffles
44755 case X86ISD::INSERTPS:
44756 case X86ISD::EXTRQI:
44757 case X86ISD::INSERTQI:
44758 case X86ISD::PALIGNR:
44759 case X86ISD::VSHLDQ:
44760 case X86ISD::VSRLDQ:
44761 case X86ISD::BLENDI:
44762 case X86ISD::UNPCKH:
44763 case X86ISD::UNPCKL:
44764 case X86ISD::MOVHLPS:
44765 case X86ISD::MOVLHPS:
44766 case X86ISD::PSHUFB:
44767 case X86ISD::PSHUFD:
44768 case X86ISD::PSHUFHW:
44769 case X86ISD::PSHUFLW:
44770 case X86ISD::MOVSHDUP:
44771 case X86ISD::MOVSLDUP:
44772 case X86ISD::MOVDDUP:
44773 case X86ISD::MOVSS:
44774 case X86ISD::MOVSD:
44775 case X86ISD::VBROADCAST:
44776 case X86ISD::VPPERM:
44777 case X86ISD::VPERMI:
44778 case X86ISD::VPERMV:
44779 case X86ISD::VPERMV3:
44780 case X86ISD::VPERMIL2:
44781 case X86ISD::VPERMILPI:
44782 case X86ISD::VPERMILPV:
44783 case X86ISD::VPERM2X128:
44784 case X86ISD::SHUF128:
44785 case X86ISD::VZEXT_MOVL:
44786 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
44787 case X86ISD::FMADD_RND:
44788 case X86ISD::FMSUB:
44789 case X86ISD::FMSUB_RND:
44790 case X86ISD::FNMADD:
44791 case X86ISD::FNMADD_RND:
44792 case X86ISD::FNMSUB:
44793 case X86ISD::FNMSUB_RND:
44794 case ISD::FMA: return combineFMA(N, DAG, DCI, Subtarget);
44795 case X86ISD::FMADDSUB_RND:
44796 case X86ISD::FMSUBADD_RND:
44797 case X86ISD::FMADDSUB:
44798 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, DCI);
44799 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI, Subtarget);
44800 case X86ISD::MGATHER:
44801 case X86ISD::MSCATTER: return combineX86GatherScatter(N, DAG, DCI);
44802 case ISD::MGATHER:
44803 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
44804 case X86ISD::PCMPEQ:
44805 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
44806 case X86ISD::PMULDQ:
44807 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
44808 case X86ISD::KSHIFTL:
44809 case X86ISD::KSHIFTR: return combineKSHIFT(N, DAG, DCI);
44810 }
44811
44812 return SDValue();
44813}
44814
44815bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
44816 if (!isTypeLegal(VT))
44817 return false;
44818
44819 // There are no vXi8 shifts.
44820 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
44821 return false;
44822
44823 // TODO: Almost no 8-bit ops are desirable because they have no actual
44824 // size/speed advantages vs. 32-bit ops, but they do have a major
44825 // potential disadvantage by causing partial register stalls.
44826 //
44827 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
44828 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
44829 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
44830 // check for a constant operand to the multiply.
44831 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
44832 return false;
44833
44834 // i16 instruction encodings are longer and some i16 instructions are slow,
44835 // so those are not desirable.
44836 if (VT == MVT::i16) {
44837 switch (Opc) {
44838 default:
44839 break;
44840 case ISD::LOAD:
44841 case ISD::SIGN_EXTEND:
44842 case ISD::ZERO_EXTEND:
44843 case ISD::ANY_EXTEND:
44844 case ISD::SHL:
44845 case ISD::SRA:
44846 case ISD::SRL:
44847 case ISD::SUB:
44848 case ISD::ADD:
44849 case ISD::MUL:
44850 case ISD::AND:
44851 case ISD::OR:
44852 case ISD::XOR:
44853 return false;
44854 }
44855 }
44856
44857 // Any legal type not explicitly accounted for above here is desirable.
44858 return true;
44859}
44860
44861SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
44862 SDValue Value, SDValue Addr,
44863 SelectionDAG &DAG) const {
44864 const Module *M = DAG.getMachineFunction().getMMI().getModule();
44865 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
44866 if (IsCFProtectionSupported) {
44867 // In case control-flow branch protection is enabled, we need to add
44868 // notrack prefix to the indirect branch.
44869 // In order to do that we create NT_BRIND SDNode.
44870 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
44871 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
44872 }
44873
44874 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
44875}
44876
44877bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
44878 EVT VT = Op.getValueType();
44879 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
44880 isa<ConstantSDNode>(Op.getOperand(1));
44881
44882 // i16 is legal, but undesirable since i16 instruction encodings are longer
44883 // and some i16 instructions are slow.
44884 // 8-bit multiply-by-constant can usually be expanded to something cheaper
44885 // using LEA and/or other ALU ops.
44886 if (VT != MVT::i16 && !Is8BitMulByConstant)
44887 return false;
44888
44889 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
44890 if (!Op.hasOneUse())
44891 return false;
44892 SDNode *User = *Op->use_begin();
44893 if (!ISD::isNormalStore(User))
44894 return false;
44895 auto *Ld = cast<LoadSDNode>(Load);
44896 auto *St = cast<StoreSDNode>(User);
44897 return Ld->getBasePtr() == St->getBasePtr();
44898 };
44899
44900 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
44901 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
44902 return false;
44903 if (!Op.hasOneUse())
44904 return false;
44905 SDNode *User = *Op->use_begin();
44906 if (User->getOpcode() != ISD::ATOMIC_STORE)
44907 return false;
44908 auto *Ld = cast<AtomicSDNode>(Load);
44909 auto *St = cast<AtomicSDNode>(User);
44910 return Ld->getBasePtr() == St->getBasePtr();
44911 };
44912
44913 bool Commute = false;
44914 switch (Op.getOpcode()) {
44915 default: return false;
44916 case ISD::SIGN_EXTEND:
44917 case ISD::ZERO_EXTEND:
44918 case ISD::ANY_EXTEND:
44919 break;
44920 case ISD::SHL:
44921 case ISD::SRA:
44922 case ISD::SRL: {
44923 SDValue N0 = Op.getOperand(0);
44924 // Look out for (store (shl (load), x)).
44925 if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
44926 return false;
44927 break;
44928 }
44929 case ISD::ADD:
44930 case ISD::MUL:
44931 case ISD::AND:
44932 case ISD::OR:
44933 case ISD::XOR:
44934 Commute = true;
44935 LLVM_FALLTHROUGH[[gnu::fallthrough]];
44936 case ISD::SUB: {
44937 SDValue N0 = Op.getOperand(0);
44938 SDValue N1 = Op.getOperand(1);
44939 // Avoid disabling potential load folding opportunities.
44940 if (MayFoldLoad(N1) &&
44941 (!Commute || !isa<ConstantSDNode>(N0) ||
44942 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
44943 return false;
44944 if (MayFoldLoad(N0) &&
44945 ((Commute && !isa<ConstantSDNode>(N1)) ||
44946 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
44947 return false;
44948 if (IsFoldableAtomicRMW(N0, Op) ||
44949 (Commute && IsFoldableAtomicRMW(N1, Op)))
44950 return false;
44951 }
44952 }
44953
44954 PVT = MVT::i32;
44955 return true;
44956}
44957
44958bool X86TargetLowering::
44959 isDesirableToCombineBuildVectorToShuffleTruncate(
44960 ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const {
44961
44962 assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&((SrcVT.getVectorNumElements() == ShuffleMask.size() &&
"Element count mismatch") ? static_cast<void> (0) : __assert_fail
("SrcVT.getVectorNumElements() == ShuffleMask.size() && \"Element count mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 44963, __PRETTY_FUNCTION__))
44963 "Element count mismatch")((SrcVT.getVectorNumElements() == ShuffleMask.size() &&
"Element count mismatch") ? static_cast<void> (0) : __assert_fail
("SrcVT.getVectorNumElements() == ShuffleMask.size() && \"Element count mismatch\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 44963, __PRETTY_FUNCTION__))
;
44964 assert(((Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask
, SrcVT) && "Shuffle Mask expected to be legal") ? static_cast
<void> (0) : __assert_fail ("Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) && \"Shuffle Mask expected to be legal\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 44966, __PRETTY_FUNCTION__))
44965 Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&((Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask
, SrcVT) && "Shuffle Mask expected to be legal") ? static_cast
<void> (0) : __assert_fail ("Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) && \"Shuffle Mask expected to be legal\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 44966, __PRETTY_FUNCTION__))
44966 "Shuffle Mask expected to be legal")((Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask
, SrcVT) && "Shuffle Mask expected to be legal") ? static_cast
<void> (0) : __assert_fail ("Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) && \"Shuffle Mask expected to be legal\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 44966, __PRETTY_FUNCTION__))
;
44967
44968 // For 32-bit elements VPERMD is better than shuffle+truncate.
44969 // TODO: After we improve lowerBuildVector, add execption for VPERMW.
44970 if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2())
44971 return false;
44972
44973 if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask))
44974 return false;
44975
44976 return true;
44977}
44978
44979//===----------------------------------------------------------------------===//
44980// X86 Inline Assembly Support
44981//===----------------------------------------------------------------------===//
44982
44983// Helper to match a string separated by whitespace.
44984static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
44985 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
44986
44987 for (StringRef Piece : Pieces) {
44988 if (!S.startswith(Piece)) // Check if the piece matches.
44989 return false;
44990
44991 S = S.substr(Piece.size());
44992 StringRef::size_type Pos = S.find_first_not_of(" \t");
44993 if (Pos == 0) // We matched a prefix.
44994 return false;
44995
44996 S = S.substr(Pos);
44997 }
44998
44999 return S.empty();
45000}
45001
45002static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
45003
45004 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
45005 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
45006 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
45007 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
45008
45009 if (AsmPieces.size() == 3)
45010 return true;
45011 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
45012 return true;
45013 }
45014 }
45015 return false;
45016}
45017
45018bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
45019 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
45020
45021 const std::string &AsmStr = IA->getAsmString();
45022
45023 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
45024 if (!Ty || Ty->getBitWidth() % 16 != 0)
45025 return false;
45026
45027 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
45028 SmallVector<StringRef, 4> AsmPieces;
45029 SplitString(AsmStr, AsmPieces, ";\n");
45030
45031 switch (AsmPieces.size()) {
45032 default: return false;
45033 case 1:
45034 // FIXME: this should verify that we are targeting a 486 or better. If not,
45035 // we will turn this bswap into something that will be lowered to logical
45036 // ops instead of emitting the bswap asm. For now, we don't support 486 or
45037 // lower so don't worry about this.
45038 // bswap $0
45039 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
45040 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
45041 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
45042 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
45043 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
45044 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
45045 // No need to check constraints, nothing other than the equivalent of
45046 // "=r,0" would be valid here.
45047 return IntrinsicLowering::LowerToByteSwap(CI);
45048 }
45049
45050 // rorw $$8, ${0:w} --> llvm.bswap.i16
45051 if (CI->getType()->isIntegerTy(16) &&
45052 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
45053 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
45054 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
45055 AsmPieces.clear();
45056 StringRef ConstraintsStr = IA->getConstraintString();
45057 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
45058 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
45059 if (clobbersFlagRegisters(AsmPieces))
45060 return IntrinsicLowering::LowerToByteSwap(CI);
45061 }
45062 break;
45063 case 3:
45064 if (CI->getType()->isIntegerTy(32) &&
45065 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
45066 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
45067 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
45068 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
45069 AsmPieces.clear();
45070 StringRef ConstraintsStr = IA->getConstraintString();
45071 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
45072 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
45073 if (clobbersFlagRegisters(AsmPieces))
45074 return IntrinsicLowering::LowerToByteSwap(CI);
45075 }
45076
45077 if (CI->getType()->isIntegerTy(64)) {
45078 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
45079 if (Constraints.size() >= 2 &&
45080 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
45081 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
45082 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
45083 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
45084 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
45085 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
45086 return IntrinsicLowering::LowerToByteSwap(CI);
45087 }
45088 }
45089 break;
45090 }
45091 return false;
45092}
45093
45094static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
45095 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
45096 .Case("{@cca}", X86::COND_A)
45097 .Case("{@ccae}", X86::COND_AE)
45098 .Case("{@ccb}", X86::COND_B)
45099 .Case("{@ccbe}", X86::COND_BE)
45100 .Case("{@ccc}", X86::COND_B)
45101 .Case("{@cce}", X86::COND_E)
45102 .Case("{@ccz}", X86::COND_E)
45103 .Case("{@ccg}", X86::COND_G)
45104 .Case("{@ccge}", X86::COND_GE)
45105 .Case("{@ccl}", X86::COND_L)
45106 .Case("{@ccle}", X86::COND_LE)
45107 .Case("{@ccna}", X86::COND_BE)
45108 .Case("{@ccnae}", X86::COND_B)
45109 .Case("{@ccnb}", X86::COND_AE)
45110 .Case("{@ccnbe}", X86::COND_A)
45111 .Case("{@ccnc}", X86::COND_AE)
45112 .Case("{@ccne}", X86::COND_NE)
45113 .Case("{@ccnz}", X86::COND_NE)
45114 .Case("{@ccng}", X86::COND_LE)
45115 .Case("{@ccnge}", X86::COND_L)
45116 .Case("{@ccnl}", X86::COND_GE)
45117 .Case("{@ccnle}", X86::COND_G)
45118 .Case("{@ccno}", X86::COND_NO)
45119 .Case("{@ccnp}", X86::COND_P)
45120 .Case("{@ccns}", X86::COND_NS)
45121 .Case("{@cco}", X86::COND_O)
45122 .Case("{@ccp}", X86::COND_P)
45123 .Case("{@ccs}", X86::COND_S)
45124 .Default(X86::COND_INVALID);
45125 return Cond;
45126}
45127
45128/// Given a constraint letter, return the type of constraint for this target.
45129X86TargetLowering::ConstraintType
45130X86TargetLowering::getConstraintType(StringRef Constraint) const {
45131 if (Constraint.size() == 1) {
45132 switch (Constraint[0]) {
45133 case 'R':
45134 case 'q':
45135 case 'Q':
45136 case 'f':
45137 case 't':
45138 case 'u':
45139 case 'y':
45140 case 'x':
45141 case 'v':
45142 case 'Y':
45143 case 'l':
45144 case 'k': // AVX512 masking registers.
45145 return C_RegisterClass;
45146 case 'a':
45147 case 'b':
45148 case 'c':
45149 case 'd':
45150 case 'S':
45151 case 'D':
45152 case 'A':
45153 return C_Register;
45154 case 'I':
45155 case 'J':
45156 case 'K':
45157 case 'N':
45158 case 'G':
45159 case 'L':
45160 case 'M':
45161 return C_Immediate;
45162 case 'C':
45163 case 'e':
45164 case 'Z':
45165 return C_Other;
45166 default:
45167 break;
45168 }
45169 }
45170 else if (Constraint.size() == 2) {
45171 switch (Constraint[0]) {
45172 default:
45173 break;
45174 case 'Y':
45175 switch (Constraint[1]) {
45176 default:
45177 break;
45178 case 'z':
45179 case '0':
45180 return C_Register;
45181 case 'i':
45182 case 'm':
45183 case 'k':
45184 case 't':
45185 case '2':
45186 return C_RegisterClass;
45187 }
45188 }
45189 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
45190 return C_Other;
45191 return TargetLowering::getConstraintType(Constraint);
45192}
45193
45194/// Examine constraint type and operand type and determine a weight value.
45195/// This object must already have been set up with the operand type
45196/// and the current alternative constraint selected.
45197TargetLowering::ConstraintWeight
45198 X86TargetLowering::getSingleConstraintMatchWeight(
45199 AsmOperandInfo &info, const char *constraint) const {
45200 ConstraintWeight weight = CW_Invalid;
45201 Value *CallOperandVal = info.CallOperandVal;
45202 // If we don't have a value, we can't do a match,
45203 // but allow it at the lowest weight.
45204 if (!CallOperandVal)
45205 return CW_Default;
45206 Type *type = CallOperandVal->getType();
45207 // Look at the constraint type.
45208 switch (*constraint) {
45209 default:
45210 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
45211 LLVM_FALLTHROUGH[[gnu::fallthrough]];
45212 case 'R':
45213 case 'q':
45214 case 'Q':
45215 case 'a':
45216 case 'b':
45217 case 'c':
45218 case 'd':
45219 case 'S':
45220 case 'D':
45221 case 'A':
45222 if (CallOperandVal->getType()->isIntegerTy())
45223 weight = CW_SpecificReg;
45224 break;
45225 case 'f':
45226 case 't':
45227 case 'u':
45228 if (type->isFloatingPointTy())
45229 weight = CW_SpecificReg;
45230 break;
45231 case 'y':
45232 if (type->isX86_MMXTy() && Subtarget.hasMMX())
45233 weight = CW_SpecificReg;
45234 break;
45235 case 'Y': {
45236 unsigned Size = StringRef(constraint).size();
45237 // Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y'
45238 char NextChar = Size == 2 ? constraint[1] : 'i';
45239 if (Size > 2)
45240 break;
45241 switch (NextChar) {
45242 default:
45243 return CW_Invalid;
45244 // XMM0
45245 case 'z':
45246 case '0':
45247 if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1())
45248 return CW_SpecificReg;
45249 return CW_Invalid;
45250 // Conditional OpMask regs (AVX512)
45251 case 'k':
45252 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
45253 return CW_Register;
45254 return CW_Invalid;
45255 // Any MMX reg
45256 case 'm':
45257 if (type->isX86_MMXTy() && Subtarget.hasMMX())
45258 return weight;
45259 return CW_Invalid;
45260 // Any SSE reg when ISA >= SSE2, same as 'Y'
45261 case 'i':
45262 case 't':
45263 case '2':
45264 if (!Subtarget.hasSSE2())
45265 return CW_Invalid;
45266 break;
45267 }
45268 // Fall through (handle "Y" constraint).
45269 LLVM_FALLTHROUGH[[gnu::fallthrough]];
45270 }
45271 case 'v':
45272 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
45273 weight = CW_Register;
45274 LLVM_FALLTHROUGH[[gnu::fallthrough]];
45275 case 'x':
45276 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
45277 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
45278 weight = CW_Register;
45279 break;
45280 case 'k':
45281 // Enable conditional vector operations using %k<#> registers.
45282 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
45283 weight = CW_Register;
45284 break;
45285 case 'I':
45286 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
45287 if (C->getZExtValue() <= 31)
45288 weight = CW_Constant;
45289 }
45290 break;
45291 case 'J':
45292 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45293 if (C->getZExtValue() <= 63)
45294 weight = CW_Constant;
45295 }
45296 break;
45297 case 'K':
45298 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45299 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
45300 weight = CW_Constant;
45301 }
45302 break;
45303 case 'L':
45304 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45305 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
45306 weight = CW_Constant;
45307 }
45308 break;
45309 case 'M':
45310 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45311 if (C->getZExtValue() <= 3)
45312 weight = CW_Constant;
45313 }
45314 break;
45315 case 'N':
45316 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45317 if (C->getZExtValue() <= 0xff)
45318 weight = CW_Constant;
45319 }
45320 break;
45321 case 'G':
45322 case 'C':
45323 if (isa<ConstantFP>(CallOperandVal)) {
45324 weight = CW_Constant;
45325 }
45326 break;
45327 case 'e':
45328 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45329 if ((C->getSExtValue() >= -0x80000000LL) &&
45330 (C->getSExtValue() <= 0x7fffffffLL))
45331 weight = CW_Constant;
45332 }
45333 break;
45334 case 'Z':
45335 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
45336 if (C->getZExtValue() <= 0xffffffff)
45337 weight = CW_Constant;
45338 }
45339 break;
45340 }
45341 return weight;
45342}
45343
45344/// Try to replace an X constraint, which matches anything, with another that
45345/// has more specific requirements based on the type of the corresponding
45346/// operand.
45347const char *X86TargetLowering::
45348LowerXConstraint(EVT ConstraintVT) const {
45349 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
45350 // 'f' like normal targets.
45351 if (ConstraintVT.isFloatingPoint()) {
45352 if (Subtarget.hasSSE2())
45353 return "Y";
45354 if (Subtarget.hasSSE1())
45355 return "x";
45356 }
45357
45358 return TargetLowering::LowerXConstraint(ConstraintVT);
45359}
45360
45361// Lower @cc targets via setcc.
45362SDValue X86TargetLowering::LowerAsmOutputForConstraint(
45363 SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
45364 SelectionDAG &DAG) const {
45365 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
45366 if (Cond == X86::COND_INVALID)
45367 return SDValue();
45368 // Check that return type is valid.
45369 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
45370 OpInfo.ConstraintVT.getSizeInBits() < 8)
45371 report_fatal_error("Flag output operand is of invalid type");
45372
45373 // Get EFLAGS register. Only update chain when copyfrom is glued.
45374 if (Flag.getNode()) {
45375 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
45376 Chain = Flag.getValue(1);
45377 } else
45378 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
45379 // Extract CC code.
45380 SDValue CC = getSETCC(Cond, Flag, DL, DAG);
45381 // Extend to 32-bits
45382 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
45383
45384 return Result;
45385}
45386
45387/// Lower the specified operand into the Ops vector.
45388/// If it is invalid, don't add anything to Ops.
45389void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
45390 std::string &Constraint,
45391 std::vector<SDValue>&Ops,
45392 SelectionDAG &DAG) const {
45393 SDValue Result;
45394
45395 // Only support length 1 constraints for now.
45396 if (Constraint.length() > 1) return;
45397
45398 char ConstraintLetter = Constraint[0];
45399 switch (ConstraintLetter) {
45400 default: break;
45401 case 'I':
45402 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45403 if (C->getZExtValue() <= 31) {
45404 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45405 Op.getValueType());
45406 break;
45407 }
45408 }
45409 return;
45410 case 'J':
45411 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45412 if (C->getZExtValue() <= 63) {
45413 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45414 Op.getValueType());
45415 break;
45416 }
45417 }
45418 return;
45419 case 'K':
45420 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45421 if (isInt<8>(C->getSExtValue())) {
45422 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45423 Op.getValueType());
45424 break;
45425 }
45426 }
45427 return;
45428 case 'L':
45429 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45430 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
45431 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
45432 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
45433 Op.getValueType());
45434 break;
45435 }
45436 }
45437 return;
45438 case 'M':
45439 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45440 if (C->getZExtValue() <= 3) {
45441 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45442 Op.getValueType());
45443 break;
45444 }
45445 }
45446 return;
45447 case 'N':
45448 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45449 if (C->getZExtValue() <= 255) {
45450 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45451 Op.getValueType());
45452 break;
45453 }
45454 }
45455 return;
45456 case 'O':
45457 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45458 if (C->getZExtValue() <= 127) {
45459 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45460 Op.getValueType());
45461 break;
45462 }
45463 }
45464 return;
45465 case 'e': {
45466 // 32-bit signed value
45467 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45468 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
45469 C->getSExtValue())) {
45470 // Widen to 64 bits here to get it sign extended.
45471 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
45472 break;
45473 }
45474 // FIXME gcc accepts some relocatable values here too, but only in certain
45475 // memory models; it's complicated.
45476 }
45477 return;
45478 }
45479 case 'Z': {
45480 // 32-bit unsigned value
45481 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
45482 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
45483 C->getZExtValue())) {
45484 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
45485 Op.getValueType());
45486 break;
45487 }
45488 }
45489 // FIXME gcc accepts some relocatable values here too, but only in certain
45490 // memory models; it's complicated.
45491 return;
45492 }
45493 case 'i': {
45494 // Literal immediates are always ok.
45495 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
45496 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
45497 BooleanContent BCont = getBooleanContents(MVT::i64);
45498 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
45499 : ISD::SIGN_EXTEND;
45500 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
45501 : CST->getSExtValue();
45502 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
45503 break;
45504 }
45505
45506 // In any sort of PIC mode addresses need to be computed at runtime by
45507 // adding in a register or some sort of table lookup. These can't
45508 // be used as immediates.
45509 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
45510 return;
45511
45512 // If we are in non-pic codegen mode, we allow the address of a global (with
45513 // an optional displacement) to be used with 'i'.
45514 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
45515 // If we require an extra load to get this address, as in PIC mode, we
45516 // can't accept it.
45517 if (isGlobalStubReference(
45518 Subtarget.classifyGlobalReference(GA->getGlobal())))
45519 return;
45520 break;
45521 }
45522 }
45523
45524 if (Result.getNode()) {
45525 Ops.push_back(Result);
45526 return;
45527 }
45528 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
45529}
45530
45531/// Check if \p RC is a general purpose register class.
45532/// I.e., GR* or one of their variant.
45533static bool isGRClass(const TargetRegisterClass &RC) {
45534 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
45535 RC.hasSuperClassEq(&X86::GR16RegClass) ||
45536 RC.hasSuperClassEq(&X86::GR32RegClass) ||
45537 RC.hasSuperClassEq(&X86::GR64RegClass) ||
45538 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
45539}
45540
45541/// Check if \p RC is a vector register class.
45542/// I.e., FR* / VR* or one of their variant.
45543static bool isFRClass(const TargetRegisterClass &RC) {
45544 return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
45545 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
45546 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
45547 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
45548 RC.hasSuperClassEq(&X86::VR512RegClass);
45549}
45550
45551/// Check if \p RC is a mask register class.
45552/// I.e., VK* or one of their variant.
45553static bool isVKClass(const TargetRegisterClass &RC) {
45554 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
45555 RC.hasSuperClassEq(&X86::VK2RegClass) ||
45556 RC.hasSuperClassEq(&X86::VK4RegClass) ||
45557 RC.hasSuperClassEq(&X86::VK8RegClass) ||
45558 RC.hasSuperClassEq(&X86::VK16RegClass) ||
45559 RC.hasSuperClassEq(&X86::VK32RegClass) ||
45560 RC.hasSuperClassEq(&X86::VK64RegClass);
45561}
45562
45563std::pair<unsigned, const TargetRegisterClass *>
45564X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
45565 StringRef Constraint,
45566 MVT VT) const {
45567 // First, see if this is a constraint that directly corresponds to an LLVM
45568 // register class.
45569 if (Constraint.size() == 1) {
45570 // GCC Constraint Letters
45571 switch (Constraint[0]) {
45572 default: break;
45573 // 'A' means [ER]AX + [ER]DX.
45574 case 'A':
45575 if (Subtarget.is64Bit())
45576 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
45577 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&(((Subtarget.is32Bit() || Subtarget.is16Bit()) && "Expecting 64, 32 or 16 bit subtarget"
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.is32Bit() || Subtarget.is16Bit()) && \"Expecting 64, 32 or 16 bit subtarget\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 45578, __PRETTY_FUNCTION__))
45578 "Expecting 64, 32 or 16 bit subtarget")(((Subtarget.is32Bit() || Subtarget.is16Bit()) && "Expecting 64, 32 or 16 bit subtarget"
) ? static_cast<void> (0) : __assert_fail ("(Subtarget.is32Bit() || Subtarget.is16Bit()) && \"Expecting 64, 32 or 16 bit subtarget\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 45578, __PRETTY_FUNCTION__))
;
45579 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
45580
45581 // TODO: Slight differences here in allocation order and leaving
45582 // RIP in the class. Do they matter any more here than they do
45583 // in the normal allocation?
45584 case 'k':
45585 if (Subtarget.hasAVX512()) {
45586 if (VT == MVT::i1)
45587 return std::make_pair(0U, &X86::VK1RegClass);
45588 if (VT == MVT::i8)
45589 return std::make_pair(0U, &X86::VK8RegClass);
45590 if (VT == MVT::i16)
45591 return std::make_pair(0U, &X86::VK16RegClass);
45592 }
45593 if (Subtarget.hasBWI()) {
45594 if (VT == MVT::i32)
45595 return std::make_pair(0U, &X86::VK32RegClass);
45596 if (VT == MVT::i64)
45597 return std::make_pair(0U, &X86::VK64RegClass);
45598 }
45599 break;
45600 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
45601 if (Subtarget.is64Bit()) {
45602 if (VT == MVT::i32 || VT == MVT::f32)
45603 return std::make_pair(0U, &X86::GR32RegClass);
45604 if (VT == MVT::i16)
45605 return std::make_pair(0U, &X86::GR16RegClass);
45606 if (VT == MVT::i8 || VT == MVT::i1)
45607 return std::make_pair(0U, &X86::GR8RegClass);
45608 if (VT == MVT::i64 || VT == MVT::f64)
45609 return std::make_pair(0U, &X86::GR64RegClass);
45610 break;
45611 }
45612 LLVM_FALLTHROUGH[[gnu::fallthrough]];
45613 // 32-bit fallthrough
45614 case 'Q': // Q_REGS
45615 if (VT == MVT::i32 || VT == MVT::f32)
45616 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
45617 if (VT == MVT::i16)
45618 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
45619 if (VT == MVT::i8 || VT == MVT::i1)
45620 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
45621 if (VT == MVT::i64)
45622 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
45623 break;
45624 case 'r': // GENERAL_REGS
45625 case 'l': // INDEX_REGS
45626 if (VT == MVT::i8 || VT == MVT::i1)
45627 return std::make_pair(0U, &X86::GR8RegClass);
45628 if (VT == MVT::i16)
45629 return std::make_pair(0U, &X86::GR16RegClass);
45630 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
45631 return std::make_pair(0U, &X86::GR32RegClass);
45632 return std::make_pair(0U, &X86::GR64RegClass);
45633 case 'R': // LEGACY_REGS
45634 if (VT == MVT::i8 || VT == MVT::i1)
45635 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
45636 if (VT == MVT::i16)
45637 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
45638 if (VT == MVT::i32 || !Subtarget.is64Bit())
45639 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
45640 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
45641 case 'f': // FP Stack registers.
45642 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
45643 // value to the correct fpstack register class.
45644 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
45645 return std::make_pair(0U, &X86::RFP32RegClass);
45646 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
45647 return std::make_pair(0U, &X86::RFP64RegClass);
45648 return std::make_pair(0U, &X86::RFP80RegClass);
45649 case 'y': // MMX_REGS if MMX allowed.
45650 if (!Subtarget.hasMMX()) break;
45651 return std::make_pair(0U, &X86::VR64RegClass);
45652 case 'Y': // SSE_REGS if SSE2 allowed
45653 if (!Subtarget.hasSSE2()) break;
45654 LLVM_FALLTHROUGH[[gnu::fallthrough]];
45655 case 'v':
45656 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
45657 if (!Subtarget.hasSSE1()) break;
45658 bool VConstraint = (Constraint[0] == 'v');
45659
45660 switch (VT.SimpleTy) {
45661 default: break;
45662 // Scalar SSE types.
45663 case MVT::f32:
45664 case MVT::i32:
45665 if (VConstraint && Subtarget.hasVLX())
45666 return std::make_pair(0U, &X86::FR32XRegClass);
45667 return std::make_pair(0U, &X86::FR32RegClass);
45668 case MVT::f64:
45669 case MVT::i64:
45670 if (VConstraint && Subtarget.hasVLX())
45671 return std::make_pair(0U, &X86::FR64XRegClass);
45672 return std::make_pair(0U, &X86::FR64RegClass);
45673 // TODO: Handle i128 in FR128RegClass after it is tested well.
45674 // Vector types and fp128.
45675 case MVT::f128:
45676 case MVT::v16i8:
45677 case MVT::v8i16:
45678 case MVT::v4i32:
45679 case MVT::v2i64:
45680 case MVT::v4f32:
45681 case MVT::v2f64:
45682 if (VConstraint && Subtarget.hasVLX())
45683 return std::make_pair(0U, &X86::VR128XRegClass);
45684 return std::make_pair(0U, &X86::VR128RegClass);
45685 // AVX types.
45686 case MVT::v32i8:
45687 case MVT::v16i16:
45688 case MVT::v8i32:
45689 case MVT::v4i64:
45690 case MVT::v8f32:
45691 case MVT::v4f64:
45692 if (VConstraint && Subtarget.hasVLX())
45693 return std::make_pair(0U, &X86::VR256XRegClass);
45694 if (Subtarget.hasAVX())
45695 return std::make_pair(0U, &X86::VR256RegClass);
45696 break;
45697 case MVT::v8f64:
45698 case MVT::v16f32:
45699 case MVT::v16i32:
45700 case MVT::v8i64:
45701 if (!Subtarget.hasAVX512()) break;
45702 if (VConstraint)
45703 return std::make_pair(0U, &X86::VR512RegClass);
45704 return std::make_pair(0U, &X86::VR512_0_15RegClass);
45705 }
45706 break;
45707 }
45708 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
45709 switch (Constraint[1]) {
45710 default:
45711 break;
45712 case 'i':
45713 case 't':
45714 case '2':
45715 return getRegForInlineAsmConstraint(TRI, "Y", VT);
45716 case 'm':
45717 if (!Subtarget.hasMMX()) break;
45718 return std::make_pair(0U, &X86::VR64RegClass);
45719 case 'z':
45720 case '0':
45721 if (!Subtarget.hasSSE1()) break;
45722 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
45723 case 'k':
45724 // This register class doesn't allocate k0 for masked vector operation.
45725 if (Subtarget.hasAVX512()) {
45726 if (VT == MVT::i1)
45727 return std::make_pair(0U, &X86::VK1WMRegClass);
45728 if (VT == MVT::i8)
45729 return std::make_pair(0U, &X86::VK8WMRegClass);
45730 if (VT == MVT::i16)
45731 return std::make_pair(0U, &X86::VK16WMRegClass);
45732 }
45733 if (Subtarget.hasBWI()) {
45734 if (VT == MVT::i32)
45735 return std::make_pair(0U, &X86::VK32WMRegClass);
45736 if (VT == MVT::i64)
45737 return std::make_pair(0U, &X86::VK64WMRegClass);
45738 }
45739 break;
45740 }
45741 }
45742
45743 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
45744 return std::make_pair(0U, &X86::GR32RegClass);
45745
45746 // Use the default implementation in TargetLowering to convert the register
45747 // constraint into a member of a register class.
45748 std::pair<unsigned, const TargetRegisterClass*> Res;
45749 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
45750
45751 // Not found as a standard register?
45752 if (!Res.second) {
45753 // Map st(0) -> st(7) -> ST0
45754 if (Constraint.size() == 7 && Constraint[0] == '{' &&
45755 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
45756 Constraint[3] == '(' &&
45757 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
45758 Constraint[5] == ')' && Constraint[6] == '}') {
45759 // st(7) is not allocatable and thus not a member of RFP80. Return
45760 // singleton class in cases where we have a reference to it.
45761 if (Constraint[4] == '7')
45762 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
45763 return std::make_pair(X86::FP0 + Constraint[4] - '0',
45764 &X86::RFP80RegClass);
45765 }
45766
45767 // GCC allows "st(0)" to be called just plain "st".
45768 if (StringRef("{st}").equals_lower(Constraint))
45769 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
45770
45771 // flags -> EFLAGS
45772 if (StringRef("{flags}").equals_lower(Constraint))
45773 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
45774
45775 // dirflag -> DF
45776 if (StringRef("{dirflag}").equals_lower(Constraint))
45777 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
45778
45779 // fpsr -> FPSW
45780 if (StringRef("{fpsr}").equals_lower(Constraint))
45781 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
45782
45783 return Res;
45784 }
45785
45786 // Make sure it isn't a register that requires 64-bit mode.
45787 if (!Subtarget.is64Bit() &&
45788 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
45789 TRI->getEncodingValue(Res.first) >= 8) {
45790 // Register requires REX prefix, but we're in 32-bit mode.
45791 return std::make_pair(0, nullptr);
45792 }
45793
45794 // Make sure it isn't a register that requires AVX512.
45795 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
45796 TRI->getEncodingValue(Res.first) & 0x10) {
45797 // Register requires EVEX prefix.
45798 return std::make_pair(0, nullptr);
45799 }
45800
45801 // Otherwise, check to see if this is a register class of the wrong value
45802 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
45803 // turn into {ax},{dx}.
45804 // MVT::Other is used to specify clobber names.
45805 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
45806 return Res; // Correct type already, nothing to do.
45807
45808 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
45809 // return "eax". This should even work for things like getting 64bit integer
45810 // registers when given an f64 type.
45811 const TargetRegisterClass *Class = Res.second;
45812 // The generic code will match the first register class that contains the
45813 // given register. Thus, based on the ordering of the tablegened file,
45814 // the "plain" GR classes might not come first.
45815 // Therefore, use a helper method.
45816 if (isGRClass(*Class)) {
45817 unsigned Size = VT.getSizeInBits();
45818 if (Size == 1) Size = 8;
45819 unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
45820 if (DestReg > 0) {
45821 bool is64Bit = Subtarget.is64Bit();
45822 const TargetRegisterClass *RC =
45823 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
45824 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
45825 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
45826 : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
45827 : nullptr;
45828 if (Size == 64 && !is64Bit) {
45829 // Model GCC's behavior here and select a fixed pair of 32-bit
45830 // registers.
45831 switch (DestReg) {
45832 case X86::RAX:
45833 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
45834 case X86::RDX:
45835 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
45836 case X86::RCX:
45837 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
45838 case X86::RBX:
45839 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
45840 case X86::RSI:
45841 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
45842 case X86::RDI:
45843 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
45844 case X86::RBP:
45845 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
45846 default:
45847 return std::make_pair(0, nullptr);
45848 }
45849 }
45850 if (RC && RC->contains(DestReg))
45851 return std::make_pair(DestReg, RC);
45852 return Res;
45853 }
45854 // No register found/type mismatch.
45855 return std::make_pair(0, nullptr);
45856 } else if (isFRClass(*Class)) {
45857 // Handle references to XMM physical registers that got mapped into the
45858 // wrong class. This can happen with constraints like {xmm0} where the
45859 // target independent register mapper will just pick the first match it can
45860 // find, ignoring the required type.
45861
45862 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
45863 if (VT == MVT::f32 || VT == MVT::i32)
45864 Res.second = &X86::FR32XRegClass;
45865 else if (VT == MVT::f64 || VT == MVT::i64)
45866 Res.second = &X86::FR64XRegClass;
45867 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
45868 Res.second = &X86::VR128XRegClass;
45869 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
45870 Res.second = &X86::VR256XRegClass;
45871 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
45872 Res.second = &X86::VR512RegClass;
45873 else {
45874 // Type mismatch and not a clobber: Return an error;
45875 Res.first = 0;
45876 Res.second = nullptr;
45877 }
45878 } else if (isVKClass(*Class)) {
45879 if (VT == MVT::i1)
45880 Res.second = &X86::VK1RegClass;
45881 else if (VT == MVT::i8)
45882 Res.second = &X86::VK8RegClass;
45883 else if (VT == MVT::i16)
45884 Res.second = &X86::VK16RegClass;
45885 else if (VT == MVT::i32)
45886 Res.second = &X86::VK32RegClass;
45887 else if (VT == MVT::i64)
45888 Res.second = &X86::VK64RegClass;
45889 else {
45890 // Type mismatch and not a clobber: Return an error;
45891 Res.first = 0;
45892 Res.second = nullptr;
45893 }
45894 }
45895
45896 return Res;
45897}
45898
45899int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
45900 const AddrMode &AM, Type *Ty,
45901 unsigned AS) const {
45902 // Scaling factors are not free at all.
45903 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
45904 // will take 2 allocations in the out of order engine instead of 1
45905 // for plain addressing mode, i.e. inst (reg1).
45906 // E.g.,
45907 // vaddps (%rsi,%rdx), %ymm0, %ymm1
45908 // Requires two allocations (one for the load, one for the computation)
45909 // whereas:
45910 // vaddps (%rsi), %ymm0, %ymm1
45911 // Requires just 1 allocation, i.e., freeing allocations for other operations
45912 // and having less micro operations to execute.
45913 //
45914 // For some X86 architectures, this is even worse because for instance for
45915 // stores, the complex addressing mode forces the instruction to use the
45916 // "load" ports instead of the dedicated "store" port.
45917 // E.g., on Haswell:
45918 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
45919 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
45920 if (isLegalAddressingMode(DL, AM, Ty, AS))
45921 // Scale represents reg2 * scale, thus account for 1
45922 // as soon as we use a second register.
45923 return AM.Scale != 0;
45924 return -1;
45925}
45926
45927bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
45928 // Integer division on x86 is expensive. However, when aggressively optimizing
45929 // for code size, we prefer to use a div instruction, as it is usually smaller
45930 // than the alternative sequence.
45931 // The exception to this is vector division. Since x86 doesn't have vector
45932 // integer division, leaving the division as-is is a loss even in terms of
45933 // size, because it will have to be scalarized, while the alternative code
45934 // sequence can be performed in vector form.
45935 bool OptSize =
45936 Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
45937 return OptSize && !VT.isVector();
45938}
45939
45940void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
45941 if (!Subtarget.is64Bit())
45942 return;
45943
45944 // Update IsSplitCSR in X86MachineFunctionInfo.
45945 X86MachineFunctionInfo *AFI =
45946 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
45947 AFI->setIsSplitCSR(true);
45948}
45949
45950void X86TargetLowering::insertCopiesSplitCSR(
45951 MachineBasicBlock *Entry,
45952 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
45953 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
45954 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
45955 if (!IStart)
45956 return;
45957
45958 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
45959 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
45960 MachineBasicBlock::iterator MBBI = Entry->begin();
45961 for (const MCPhysReg *I = IStart; *I; ++I) {
45962 const TargetRegisterClass *RC = nullptr;
45963 if (X86::GR64RegClass.contains(*I))
45964 RC = &X86::GR64RegClass;
45965 else
45966 llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!"
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 45966)
;
45967
45968 Register NewVR = MRI->createVirtualRegister(RC);
45969 // Create copy from CSR to a virtual register.
45970 // FIXME: this currently does not emit CFI pseudo-instructions, it works
45971 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
45972 // nounwind. If we want to generalize this later, we may need to emit
45973 // CFI pseudo-instructions.
45974 assert(((Entry->getParent()->getFunction().hasFnAttribute(Attribute
::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!"
) ? static_cast<void> (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 45976, __PRETTY_FUNCTION__))
45975 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&((Entry->getParent()->getFunction().hasFnAttribute(Attribute
::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!"
) ? static_cast<void> (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 45976, __PRETTY_FUNCTION__))
45976 "Function should be nounwind in insertCopiesSplitCSR!")((Entry->getParent()->getFunction().hasFnAttribute(Attribute
::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!"
) ? static_cast<void> (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/lib/Target/X86/X86ISelLowering.cpp"
, 45976, __PRETTY_FUNCTION__))
;
45977 Entry->addLiveIn(*I);
45978 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
45979 .addReg(*I);
45980
45981 // Insert the copy-back instructions right before the terminator.
45982 for (auto *Exit : Exits)
45983 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
45984 TII->get(TargetOpcode::COPY), *I)
45985 .addReg(NewVR);
45986 }
45987}
45988
45989bool X86TargetLowering::supportSwiftError() const {
45990 return Subtarget.is64Bit();
45991}
45992
45993/// Returns the name of the symbol used to emit stack probes or the empty
45994/// string if not applicable.
45995StringRef
45996X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
45997 // If the function specifically requests stack probes, emit them.
45998 if (MF.getFunction().hasFnAttribute("probe-stack"))
45999 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
46000
46001 // Generally, if we aren't on Windows, the platform ABI does not include
46002 // support for stack probes, so don't emit them.
46003 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
46004 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
46005 return "";
46006
46007 // We need a stack probe to conform to the Windows ABI. Choose the right
46008 // symbol.
46009 if (Subtarget.is64Bit())
46010 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
46011 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
46012}
46013
46014unsigned
46015X86TargetLowering::getStackProbeSize(MachineFunction &MF) const {
46016 // The default stack probe size is 4096 if the function has no stackprobesize
46017 // attribute.
46018 unsigned StackProbeSize = 4096;
46019 const Function &Fn = MF.getFunction();
46020 if (Fn.hasFnAttribute("stack-probe-size"))
46021 Fn.getFnAttribute("stack-probe-size")
46022 .getValueAsString()
46023 .getAsInteger(0, StackProbeSize);
46024 return StackProbeSize;
46025}

/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h

1//===- Support/MachineValueType.h - Machine-Level types ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the set of machine-level target independent types which
10// legal values in the code generator use.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_SUPPORT_MACHINEVALUETYPE_H
15#define LLVM_SUPPORT_MACHINEVALUETYPE_H
16
17#include "llvm/ADT/iterator_range.h"
18#include "llvm/Support/ErrorHandling.h"
19#include "llvm/Support/MathExtras.h"
20#include "llvm/Support/TypeSize.h"
21#include <cassert>
22
23namespace llvm {
24
25 class Type;
26
27 /// Machine Value Type. Every type that is supported natively by some
28 /// processor targeted by LLVM occurs here. This means that any legal value
29 /// type can be represented by an MVT.
30 class MVT {
31 public:
32 enum SimpleValueType : uint8_t {
33 // Simple value types that aren't explicitly part of this enumeration
34 // are considered extended value types.
35 INVALID_SIMPLE_VALUE_TYPE = 0,
36
37 // If you change this numbering, you must change the values in
38 // ValueTypes.td as well!
39 Other = 1, // This is a non-standard value
40 i1 = 2, // This is a 1 bit integer value
41 i8 = 3, // This is an 8 bit integer value
42 i16 = 4, // This is a 16 bit integer value
43 i32 = 5, // This is a 32 bit integer value
44 i64 = 6, // This is a 64 bit integer value
45 i128 = 7, // This is a 128 bit integer value
46
47 FIRST_INTEGER_VALUETYPE = i1,
48 LAST_INTEGER_VALUETYPE = i128,
49
50 f16 = 8, // This is a 16 bit floating point value
51 f32 = 9, // This is a 32 bit floating point value
52 f64 = 10, // This is a 64 bit floating point value
53 f80 = 11, // This is a 80 bit floating point value
54 f128 = 12, // This is a 128 bit floating point value
55 ppcf128 = 13, // This is a PPC 128-bit floating point value
56
57 FIRST_FP_VALUETYPE = f16,
58 LAST_FP_VALUETYPE = ppcf128,
59
60 v1i1 = 14, // 1 x i1
61 v2i1 = 15, // 2 x i1
62 v4i1 = 16, // 4 x i1
63 v8i1 = 17, // 8 x i1
64 v16i1 = 18, // 16 x i1
65 v32i1 = 19, // 32 x i1
66 v64i1 = 20, // 64 x i1
67 v128i1 = 21, // 128 x i1
68 v256i1 = 22, // 256 x i1
69 v512i1 = 23, // 512 x i1
70 v1024i1 = 24, // 1024 x i1
71
72 v1i8 = 25, // 1 x i8
73 v2i8 = 26, // 2 x i8
74 v4i8 = 27, // 4 x i8
75 v8i8 = 28, // 8 x i8
76 v16i8 = 29, // 16 x i8
77 v32i8 = 30, // 32 x i8
78 v64i8 = 31, // 64 x i8
79 v128i8 = 32, //128 x i8
80 v256i8 = 33, //256 x i8
81
82 v1i16 = 34, // 1 x i16
83 v2i16 = 35, // 2 x i16
84 v3i16 = 36, // 3 x i16
85 v4i16 = 37, // 4 x i16
86 v8i16 = 38, // 8 x i16
87 v16i16 = 39, // 16 x i16
88 v32i16 = 40, // 32 x i16
89 v64i16 = 41, // 64 x i16
90 v128i16 = 42, //128 x i16
91
92 v1i32 = 43, // 1 x i32
93 v2i32 = 44, // 2 x i32
94 v3i32 = 45, // 3 x i32
95 v4i32 = 46, // 4 x i32
96 v5i32 = 47, // 5 x i32
97 v8i32 = 48, // 8 x i32
98 v16i32 = 49, // 16 x i32
99 v32i32 = 50, // 32 x i32
100 v64i32 = 51, // 64 x i32
101 v128i32 = 52, // 128 x i32
102 v256i32 = 53, // 256 x i32
103 v512i32 = 54, // 512 x i32
104 v1024i32 = 55, // 1024 x i32
105 v2048i32 = 56, // 2048 x i32
106
107 v1i64 = 57, // 1 x i64
108 v2i64 = 58, // 2 x i64
109 v4i64 = 59, // 4 x i64
110 v8i64 = 60, // 8 x i64
111 v16i64 = 61, // 16 x i64
112 v32i64 = 62, // 32 x i64
113
114 v1i128 = 63, // 1 x i128
115
116 FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
117 LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE = v1i128,
118
119 v2f16 = 64, // 2 x f16
120 v3f16 = 65, // 3 x f16
121 v4f16 = 66, // 4 x f16
122 v8f16 = 67, // 8 x f16
123 v16f16 = 68, // 16 x f16
124 v32f16 = 69, // 32 x f16
125 v1f32 = 70, // 1 x f32
126 v2f32 = 71, // 2 x f32
127 v3f32 = 72, // 3 x f32
128 v4f32 = 73, // 4 x f32
129 v5f32 = 74, // 5 x f32
130 v8f32 = 75, // 8 x f32
131 v16f32 = 76, // 16 x f32
132 v32f32 = 77, // 32 x f32
133 v64f32 = 78, // 64 x f32
134 v128f32 = 79, // 128 x f32
135 v256f32 = 80, // 256 x f32
136 v512f32 = 81, // 512 x f32
137 v1024f32 = 82, // 1024 x f32
138 v2048f32 = 83, // 2048 x f32
139 v1f64 = 84, // 1 x f64
140 v2f64 = 85, // 2 x f64
141 v4f64 = 86, // 4 x f64
142 v8f64 = 87, // 8 x f64
143
144 FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE = v2f16,
145 LAST_FP_FIXEDLEN_VECTOR_VALUETYPE = v8f64,
146
147 FIRST_FIXEDLEN_VECTOR_VALUETYPE = v1i1,
148 LAST_FIXEDLEN_VECTOR_VALUETYPE = v8f64,
149
150 nxv1i1 = 88, // n x 1 x i1
151 nxv2i1 = 89, // n x 2 x i1
152 nxv4i1 = 90, // n x 4 x i1
153 nxv8i1 = 91, // n x 8 x i1
154 nxv16i1 = 92, // n x 16 x i1
155 nxv32i1 = 93, // n x 32 x i1
156
157 nxv1i8 = 94, // n x 1 x i8
158 nxv2i8 = 95, // n x 2 x i8
159 nxv4i8 = 96, // n x 4 x i8
160 nxv8i8 = 97, // n x 8 x i8
161 nxv16i8 = 98, // n x 16 x i8
162 nxv32i8 = 99, // n x 32 x i8
163
164 nxv1i16 = 100, // n x 1 x i16
165 nxv2i16 = 101, // n x 2 x i16
166 nxv4i16 = 102, // n x 4 x i16
167 nxv8i16 = 103, // n x 8 x i16
168 nxv16i16 = 104, // n x 16 x i16
169 nxv32i16 = 105, // n x 32 x i16
170
171 nxv1i32 = 106, // n x 1 x i32
172 nxv2i32 = 107, // n x 2 x i32
173 nxv4i32 = 108, // n x 4 x i32
174 nxv8i32 = 109, // n x 8 x i32
175 nxv16i32 = 110, // n x 16 x i32
176 nxv32i32 = 111, // n x 32 x i32
177
178 nxv1i64 = 112, // n x 1 x i64
179 nxv2i64 = 113, // n x 2 x i64
180 nxv4i64 = 114, // n x 4 x i64
181 nxv8i64 = 115, // n x 8 x i64
182 nxv16i64 = 116, // n x 16 x i64
183 nxv32i64 = 117, // n x 32 x i64
184
185 FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
186 LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE = nxv32i64,
187
188 nxv2f16 = 118, // n x 2 x f16
189 nxv4f16 = 119, // n x 4 x f16
190 nxv8f16 = 120, // n x 8 x f16
191 nxv1f32 = 121, // n x 1 x f32
192 nxv2f32 = 122, // n x 2 x f32
193 nxv4f32 = 123, // n x 4 x f32
194 nxv8f32 = 124, // n x 8 x f32
195 nxv16f32 = 125, // n x 16 x f32
196 nxv1f64 = 126, // n x 1 x f64
197 nxv2f64 = 127, // n x 2 x f64
198 nxv4f64 = 128, // n x 4 x f64
199 nxv8f64 = 129, // n x 8 x f64
200
201 FIRST_FP_SCALABLE_VECTOR_VALUETYPE = nxv2f16,
202 LAST_FP_SCALABLE_VECTOR_VALUETYPE = nxv8f64,
203
204 FIRST_SCALABLE_VECTOR_VALUETYPE = nxv1i1,
205 LAST_SCALABLE_VECTOR_VALUETYPE = nxv8f64,
206
207 FIRST_VECTOR_VALUETYPE = v1i1,
208 LAST_VECTOR_VALUETYPE = nxv8f64,
209
210 x86mmx = 130, // This is an X86 MMX value
211
212 Glue = 131, // This glues nodes together during pre-RA sched
213
214 isVoid = 132, // This has no value
215
216 Untyped = 133, // This value takes a register, but has
217 // unspecified type. The register class
218 // will be determined by the opcode.
219
220 exnref = 134, // WebAssembly's exnref type
221
222 FIRST_VALUETYPE = 1, // This is always the beginning of the list.
223 LAST_VALUETYPE = 135, // This always remains at the end of the list.
224
225 // This is the current maximum for LAST_VALUETYPE.
226 // MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
227 // This value must be a multiple of 32.
228 MAX_ALLOWED_VALUETYPE = 160,
229
230 // A value of type llvm::TokenTy
231 token = 248,
232
233 // This is MDNode or MDString.
234 Metadata = 249,
235
236 // An int value the size of the pointer of the current
237 // target to any address space. This must only be used internal to
238 // tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
239 iPTRAny = 250,
240
241 // A vector with any length and element size. This is used
242 // for intrinsics that have overloadings based on vector types.
243 // This is only for tblgen's consumption!
244 vAny = 251,
245
246 // Any floating-point or vector floating-point value. This is used
247 // for intrinsics that have overloadings based on floating-point types.
248 // This is only for tblgen's consumption!
249 fAny = 252,
250
251 // An integer or vector integer value of any bit width. This is
252 // used for intrinsics that have overloadings based on integer bit widths.
253 // This is only for tblgen's consumption!
254 iAny = 253,
255
256 // An int value the size of the pointer of the current
257 // target. This should only be used internal to tblgen!
258 iPTR = 254,
259
260 // Any type. This is used for intrinsics that have overloadings.
261 // This is only for tblgen's consumption!
262 Any = 255
263 };
264
265 SimpleValueType SimpleTy = INVALID_SIMPLE_VALUE_TYPE;
266
267 constexpr MVT() = default;
268 constexpr MVT(SimpleValueType SVT) : SimpleTy(SVT) {}
269
270 bool operator>(const MVT& S) const { return SimpleTy > S.SimpleTy; }
271 bool operator<(const MVT& S) const { return SimpleTy < S.SimpleTy; }
272 bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
273 bool operator!=(const MVT& S) const { return SimpleTy != S.SimpleTy; }
274 bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
275 bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }
276
277 /// Return true if this is a valid simple valuetype.
278 bool isValid() const {
279 return (SimpleTy >= MVT::FIRST_VALUETYPE &&
280 SimpleTy < MVT::LAST_VALUETYPE);
281 }
282
283 /// Return true if this is a FP or a vector FP type.
284 bool isFloatingPoint() const {
285 return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
286 SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
287 (SimpleTy >= MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE &&
288 SimpleTy <= MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE) ||
289 (SimpleTy >= MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE &&
290 SimpleTy <= MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE));
291 }
292
293 /// Return true if this is an integer or a vector integer type.
294 bool isInteger() const {
295 return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
296 SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
297 (SimpleTy >= MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE &&
298 SimpleTy <= MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE) ||
299 (SimpleTy >= MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE &&
300 SimpleTy <= MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE));
301 }
302
303 /// Return true if this is an integer, not including vectors.
304 bool isScalarInteger() const {
305 return (SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
306 SimpleTy <= MVT::LAST_INTEGER_VALUETYPE);
307 }
308
309 /// Return true if this is a vector value type.
310 bool isVector() const {
311 return (SimpleTy
18.1
Field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
30.1
Field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
18.1
Field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
30.1
Field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
18.1
Field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
30.1
Field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
18.1
Field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
30.1
Field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
>= MVT::FIRST_VECTOR_VALUETYPE
&&
4
Assuming field 'SimpleTy' is >= FIRST_VECTOR_VALUETYPE
6
Returning the value 1, which participates in a condition later
19
Returning the value 1, which participates in a condition later
31
Returning the value 1, which participates in a condition later
312 SimpleTy
18.2
Field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
30.2
Field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
18.2
Field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
30.2
Field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
18.2
Field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
30.2
Field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
18.2
Field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
30.2
Field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
<= MVT::LAST_VECTOR_VALUETYPE
)
;
5
Assuming field 'SimpleTy' is <= LAST_VECTOR_VALUETYPE
313 }
314
315 /// Return true if this is a vector value type where the
316 /// runtime length is machine dependent
317 bool isScalableVector() const {
318 return (SimpleTy >= MVT::FIRST_SCALABLE_VECTOR_VALUETYPE &&
319 SimpleTy <= MVT::LAST_SCALABLE_VECTOR_VALUETYPE);
320 }
321
322 bool isFixedLengthVector() const {
323 return (SimpleTy >= MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE &&
324 SimpleTy <= MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE);
325 }
326
327 /// Return true if this is a 16-bit vector type.
328 bool is16BitVector() const {
329 return (SimpleTy == MVT::v2i8 || SimpleTy == MVT::v1i16 ||
330 SimpleTy == MVT::v16i1);
331 }
332
333 /// Return true if this is a 32-bit vector type.
334 bool is32BitVector() const {
335 return (SimpleTy == MVT::v32i1 || SimpleTy == MVT::v4i8 ||
336 SimpleTy == MVT::v2i16 || SimpleTy == MVT::v1i32 ||
337 SimpleTy == MVT::v2f16 || SimpleTy == MVT::v1f32);
338 }
339
340 /// Return true if this is a 64-bit vector type.
341 bool is64BitVector() const {
342 return (SimpleTy == MVT::v64i1 || SimpleTy == MVT::v8i8 ||
343 SimpleTy == MVT::v4i16 || SimpleTy == MVT::v2i32 ||
344 SimpleTy == MVT::v1i64 || SimpleTy == MVT::v4f16 ||
345 SimpleTy == MVT::v2f32 || SimpleTy == MVT::v1f64);
346 }
347
348 /// Return true if this is a 128-bit vector type.
349 bool is128BitVector() const {
350 return (SimpleTy == MVT::v128i1 || SimpleTy == MVT::v16i8 ||
351 SimpleTy == MVT::v8i16 || SimpleTy == MVT::v4i32 ||
352 SimpleTy == MVT::v2i64 || SimpleTy == MVT::v1i128 ||
353 SimpleTy == MVT::v8f16 || SimpleTy == MVT::v4f32 ||
354 SimpleTy == MVT::v2f64);
355 }
356
357 /// Return true if this is a 256-bit vector type.
358 bool is256BitVector() const {
359 return (SimpleTy == MVT::v16f16 || SimpleTy == MVT::v8f32 ||
360 SimpleTy == MVT::v4f64 || SimpleTy == MVT::v32i8 ||
361 SimpleTy == MVT::v16i16 || SimpleTy == MVT::v8i32 ||
362 SimpleTy == MVT::v4i64 || SimpleTy == MVT::v256i1);
363 }
364
365 /// Return true if this is a 512-bit vector type.
366 bool is512BitVector() const {
367 return (SimpleTy == MVT::v32f16 || SimpleTy == MVT::v16f32 ||
368 SimpleTy == MVT::v8f64 || SimpleTy == MVT::v512i1 ||
369 SimpleTy == MVT::v64i8 || SimpleTy == MVT::v32i16 ||
370 SimpleTy == MVT::v16i32 || SimpleTy == MVT::v8i64);
371 }
372
373 /// Return true if this is a 1024-bit vector type.
374 bool is1024BitVector() const {
375 return (SimpleTy == MVT::v1024i1 || SimpleTy == MVT::v128i8 ||
376 SimpleTy == MVT::v64i16 || SimpleTy == MVT::v32i32 ||
377 SimpleTy == MVT::v16i64);
378 }
379
380 /// Return true if this is a 2048-bit vector type.
381 bool is2048BitVector() const {
382 return (SimpleTy == MVT::v256i8 || SimpleTy == MVT::v128i16 ||
383 SimpleTy == MVT::v64i32 || SimpleTy == MVT::v32i64);
384 }
385
386 /// Return true if this is an overloaded type for TableGen.
387 bool isOverloaded() const {
388 return (SimpleTy==MVT::Any ||
389 SimpleTy==MVT::iAny || SimpleTy==MVT::fAny ||
390 SimpleTy==MVT::vAny || SimpleTy==MVT::iPTRAny);
391 }
392
393 /// Return a VT for a vector type with the same element type but
394 /// half the number of elements.
395 MVT getHalfNumVectorElementsVT() const {
396 MVT EltVT = getVectorElementType();
397 auto EltCnt = getVectorElementCount();
398 assert(!(EltCnt.Min & 1) && "Splitting vector, but not in half!")((!(EltCnt.Min & 1) && "Splitting vector, but not in half!"
) ? static_cast<void> (0) : __assert_fail ("!(EltCnt.Min & 1) && \"Splitting vector, but not in half!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 398, __PRETTY_FUNCTION__))
;
399 return getVectorVT(EltVT, EltCnt / 2);
400 }
401
402 /// Returns true if the given vector is a power of 2.
403 bool isPow2VectorType() const {
404 unsigned NElts = getVectorNumElements();
405 return !(NElts & (NElts - 1));
406 }
407
408 /// Widens the length of the given vector MVT up to the nearest power of 2
409 /// and returns that type.
410 MVT getPow2VectorType() const {
411 if (isPow2VectorType())
412 return *this;
413
414 unsigned NElts = getVectorNumElements();
415 unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
416 return MVT::getVectorVT(getVectorElementType(), Pow2NElts);
417 }
418
419 /// If this is a vector, return the element type, otherwise return this.
420 MVT getScalarType() const {
421 return isVector() ? getVectorElementType() : *this;
422 }
423
424 MVT getVectorElementType() const {
425 switch (SimpleTy) {
426 default:
427 llvm_unreachable("Not a vector MVT!")::llvm::llvm_unreachable_internal("Not a vector MVT!", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 427)
;
428 case v1i1:
429 case v2i1:
430 case v4i1:
431 case v8i1:
432 case v16i1:
433 case v32i1:
434 case v64i1:
435 case v128i1:
436 case v256i1:
437 case v512i1:
438 case v1024i1:
439 case nxv1i1:
440 case nxv2i1:
441 case nxv4i1:
442 case nxv8i1:
443 case nxv16i1:
444 case nxv32i1: return i1;
445 case v1i8:
446 case v2i8:
447 case v4i8:
448 case v8i8:
449 case v16i8:
450 case v32i8:
451 case v64i8:
452 case v128i8:
453 case v256i8:
454 case nxv1i8:
455 case nxv2i8:
456 case nxv4i8:
457 case nxv8i8:
458 case nxv16i8:
459 case nxv32i8: return i8;
460 case v1i16:
461 case v2i16:
462 case v3i16:
463 case v4i16:
464 case v8i16:
465 case v16i16:
466 case v32i16:
467 case v64i16:
468 case v128i16:
469 case nxv1i16:
470 case nxv2i16:
471 case nxv4i16:
472 case nxv8i16:
473 case nxv16i16:
474 case nxv32i16: return i16;
475 case v1i32:
476 case v2i32:
477 case v3i32:
478 case v4i32:
479 case v5i32:
480 case v8i32:
481 case v16i32:
482 case v32i32:
483 case v64i32:
484 case v128i32:
485 case v256i32:
486 case v512i32:
487 case v1024i32:
488 case v2048i32:
489 case nxv1i32:
490 case nxv2i32:
491 case nxv4i32:
492 case nxv8i32:
493 case nxv16i32:
494 case nxv32i32: return i32;
495 case v1i64:
496 case v2i64:
497 case v4i64:
498 case v8i64:
499 case v16i64:
500 case v32i64:
501 case nxv1i64:
502 case nxv2i64:
503 case nxv4i64:
504 case nxv8i64:
505 case nxv16i64:
506 case nxv32i64: return i64;
507 case v1i128: return i128;
508 case v2f16:
509 case v3f16:
510 case v4f16:
511 case v8f16:
512 case v16f16:
513 case v32f16:
514 case nxv2f16:
515 case nxv4f16:
516 case nxv8f16: return f16;
517 case v1f32:
518 case v2f32:
519 case v3f32:
520 case v4f32:
521 case v5f32:
522 case v8f32:
523 case v16f32:
524 case v32f32:
525 case v64f32:
526 case v128f32:
527 case v256f32:
528 case v512f32:
529 case v1024f32:
530 case v2048f32:
531 case nxv1f32:
532 case nxv2f32:
533 case nxv4f32:
534 case nxv8f32:
535 case nxv16f32: return f32;
536 case v1f64:
537 case v2f64:
538 case v4f64:
539 case v8f64:
540 case nxv1f64:
541 case nxv2f64:
542 case nxv4f64:
543 case nxv8f64: return f64;
544 }
545 }
546
547 unsigned getVectorNumElements() const {
548 switch (SimpleTy) {
549 default:
550 llvm_unreachable("Not a vector MVT!")::llvm::llvm_unreachable_internal("Not a vector MVT!", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 550)
;
551 case v2048i32:
552 case v2048f32: return 2048;
553 case v1024i1:
554 case v1024i32:
555 case v1024f32: return 1024;
556 case v512i1:
557 case v512i32:
558 case v512f32: return 512;
559 case v256i1:
560 case v256i8:
561 case v256i32:
562 case v256f32: return 256;
563 case v128i1:
564 case v128i8:
565 case v128i16:
566 case v128i32:
567 case v128f32: return 128;
568 case v64i1:
569 case v64i8:
570 case v64i16:
571 case v64i32:
572 case v64f32: return 64;
573 case v32i1:
574 case v32i8:
575 case v32i16:
576 case v32i32:
577 case v32i64:
578 case v32f16:
579 case v32f32:
580 case nxv32i1:
581 case nxv32i8:
582 case nxv32i16:
583 case nxv32i32:
584 case nxv32i64: return 32;
585 case v16i1:
586 case v16i8:
587 case v16i16:
588 case v16i32:
589 case v16i64:
590 case v16f16:
591 case v16f32:
592 case nxv16i1:
593 case nxv16i8:
594 case nxv16i16:
595 case nxv16i32:
596 case nxv16i64:
597 case nxv16f32: return 16;
598 case v8i1:
599 case v8i8:
600 case v8i16:
601 case v8i32:
602 case v8i64:
603 case v8f16:
604 case v8f32:
605 case v8f64:
606 case nxv8i1:
607 case nxv8i8:
608 case nxv8i16:
609 case nxv8i32:
610 case nxv8i64:
611 case nxv8f16:
612 case nxv8f32:
613 case nxv8f64: return 8;
614 case v5i32:
615 case v5f32: return 5;
616 case v4i1:
617 case v4i8:
618 case v4i16:
619 case v4i32:
620 case v4i64:
621 case v4f16:
622 case v4f32:
623 case v4f64:
624 case nxv4i1:
625 case nxv4i8:
626 case nxv4i16:
627 case nxv4i32:
628 case nxv4i64:
629 case nxv4f16:
630 case nxv4f32:
631 case nxv4f64: return 4;
632 case v3i16:
633 case v3i32:
634 case v3f16:
635 case v3f32: return 3;
636 case v2i1:
637 case v2i8:
638 case v2i16:
639 case v2i32:
640 case v2i64:
641 case v2f16:
642 case v2f32:
643 case v2f64:
644 case nxv2i1:
645 case nxv2i8:
646 case nxv2i16:
647 case nxv2i32:
648 case nxv2i64:
649 case nxv2f16:
650 case nxv2f32:
651 case nxv2f64: return 2;
652 case v1i1:
653 case v1i8:
654 case v1i16:
655 case v1i32:
656 case v1i64:
657 case v1i128:
658 case v1f32:
659 case v1f64:
660 case nxv1i1:
661 case nxv1i8:
662 case nxv1i16:
663 case nxv1i32:
664 case nxv1i64:
665 case nxv1f32:
666 case nxv1f64: return 1;
667 }
668 }
669
670 ElementCount getVectorElementCount() const {
671 return { getVectorNumElements(), isScalableVector() };
672 }
673
674 unsigned getSizeInBits() const {
675 switch (SimpleTy) {
676 default:
677 llvm_unreachable("getSizeInBits called on extended MVT.")::llvm::llvm_unreachable_internal("getSizeInBits called on extended MVT."
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 677)
;
678 case Other:
679 llvm_unreachable("Value type is non-standard value, Other.")::llvm::llvm_unreachable_internal("Value type is non-standard value, Other."
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 679)
;
680 case iPTR:
681 llvm_unreachable("Value type size is target-dependent. Ask TLI.")::llvm::llvm_unreachable_internal("Value type size is target-dependent. Ask TLI."
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 681)
;
682 case iPTRAny:
683 case iAny:
684 case fAny:
685 case vAny:
686 case Any:
687 llvm_unreachable("Value type is overloaded.")::llvm::llvm_unreachable_internal("Value type is overloaded."
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 687)
;
688 case token:
689 llvm_unreachable("Token type is a sentinel that cannot be used "::llvm::llvm_unreachable_internal("Token type is a sentinel that cannot be used "
"in codegen and has no size", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 690)
690 "in codegen and has no size")::llvm::llvm_unreachable_internal("Token type is a sentinel that cannot be used "
"in codegen and has no size", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 690)
;
691 case Metadata:
692 llvm_unreachable("Value type is metadata.")::llvm::llvm_unreachable_internal("Value type is metadata.", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 692)
;
693 case i1:
694 case v1i1:
695 case nxv1i1: return 1;
696 case v2i1:
697 case nxv2i1: return 2;
698 case v4i1:
699 case nxv4i1: return 4;
700 case i8 :
701 case v1i8:
702 case v8i1:
703 case nxv1i8:
704 case nxv8i1: return 8;
705 case i16 :
706 case f16:
707 case v16i1:
708 case v2i8:
709 case v1i16:
710 case nxv16i1:
711 case nxv2i8:
712 case nxv1i16: return 16;
713 case f32 :
714 case i32 :
715 case v32i1:
716 case v4i8:
717 case v2i16:
718 case v2f16:
719 case v1f32:
720 case v1i32:
721 case nxv32i1:
722 case nxv4i8:
723 case nxv2i16:
724 case nxv1i32:
725 case nxv2f16:
726 case nxv1f32: return 32;
727 case v3i16:
728 case v3f16: return 48;
729 case x86mmx:
730 case f64 :
731 case i64 :
732 case v64i1:
733 case v8i8:
734 case v4i16:
735 case v2i32:
736 case v1i64:
737 case v4f16:
738 case v2f32:
739 case v1f64:
740 case nxv8i8:
741 case nxv4i16:
742 case nxv2i32:
743 case nxv1i64:
744 case nxv4f16:
745 case nxv2f32:
746 case nxv1f64: return 64;
747 case f80 : return 80;
748 case v3i32:
749 case v3f32: return 96;
750 case f128:
751 case ppcf128:
752 case i128:
753 case v128i1:
754 case v16i8:
755 case v8i16:
756 case v4i32:
757 case v2i64:
758 case v1i128:
759 case v8f16:
760 case v4f32:
761 case v2f64:
762 case nxv16i8:
763 case nxv8i16:
764 case nxv4i32:
765 case nxv2i64:
766 case nxv8f16:
767 case nxv4f32:
768 case nxv2f64: return 128;
769 case v5i32:
770 case v5f32: return 160;
771 case v256i1:
772 case v32i8:
773 case v16i16:
774 case v8i32:
775 case v4i64:
776 case v16f16:
777 case v8f32:
778 case v4f64:
779 case nxv32i8:
780 case nxv16i16:
781 case nxv8i32:
782 case nxv4i64:
783 case nxv8f32:
784 case nxv4f64: return 256;
785 case v512i1:
786 case v64i8:
787 case v32i16:
788 case v16i32:
789 case v8i64:
790 case v32f16:
791 case v16f32:
792 case v8f64:
793 case nxv32i16:
794 case nxv16i32:
795 case nxv8i64:
796 case nxv16f32:
797 case nxv8f64: return 512;
798 case v1024i1:
799 case v128i8:
800 case v64i16:
801 case v32i32:
802 case v16i64:
803 case v32f32:
804 case nxv32i32:
805 case nxv16i64: return 1024;
806 case v256i8:
807 case v128i16:
808 case v64i32:
809 case v32i64:
810 case v64f32:
811 case nxv32i64: return 2048;
812 case v128i32:
813 case v128f32: return 4096;
814 case v256i32:
815 case v256f32: return 8192;
816 case v512i32:
817 case v512f32: return 16384;
818 case v1024i32:
819 case v1024f32: return 32768;
820 case v2048i32:
821 case v2048f32: return 65536;
822 case exnref: return 0; // opaque type
823 }
824 }
825
826 unsigned getScalarSizeInBits() const {
827 return getScalarType().getSizeInBits();
828 }
829
830 /// Return the number of bytes overwritten by a store of the specified value
831 /// type.
832 unsigned getStoreSize() const {
833 return (getSizeInBits() + 7) / 8;
834 }
835
836 /// Return the number of bits overwritten by a store of the specified value
837 /// type.
838 unsigned getStoreSizeInBits() const {
839 return getStoreSize() * 8;
840 }
841
842 /// Return true if this has more bits than VT.
843 bool bitsGT(MVT VT) const {
844 return getSizeInBits() > VT.getSizeInBits();
845 }
846
847 /// Return true if this has no less bits than VT.
848 bool bitsGE(MVT VT) const {
849 return getSizeInBits() >= VT.getSizeInBits();
850 }
851
852 /// Return true if this has less bits than VT.
853 bool bitsLT(MVT VT) const {
854 return getSizeInBits() < VT.getSizeInBits();
855 }
856
857 /// Return true if this has no more bits than VT.
858 bool bitsLE(MVT VT) const {
859 return getSizeInBits() <= VT.getSizeInBits();
860 }
861
862 static MVT getFloatingPointVT(unsigned BitWidth) {
863 switch (BitWidth) {
864 default:
865 llvm_unreachable("Bad bit width!")::llvm::llvm_unreachable_internal("Bad bit width!", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 865)
;
866 case 16:
867 return MVT::f16;
868 case 32:
869 return MVT::f32;
870 case 64:
871 return MVT::f64;
872 case 80:
873 return MVT::f80;
874 case 128:
875 return MVT::f128;
876 }
877 }
878
879 static MVT getIntegerVT(unsigned BitWidth) {
880 switch (BitWidth) {
881 default:
882 return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
883 case 1:
884 return MVT::i1;
885 case 8:
886 return MVT::i8;
887 case 16:
888 return MVT::i16;
889 case 32:
890 return MVT::i32;
891 case 64:
892 return MVT::i64;
893 case 128:
894 return MVT::i128;
895 }
896 }
897
898 static MVT getVectorVT(MVT VT, unsigned NumElements) {
899 switch (VT.SimpleTy) {
900 default:
901 break;
902 case MVT::i1:
903 if (NumElements == 1) return MVT::v1i1;
904 if (NumElements == 2) return MVT::v2i1;
905 if (NumElements == 4) return MVT::v4i1;
906 if (NumElements == 8) return MVT::v8i1;
907 if (NumElements == 16) return MVT::v16i1;
908 if (NumElements == 32) return MVT::v32i1;
909 if (NumElements == 64) return MVT::v64i1;
910 if (NumElements == 128) return MVT::v128i1;
911 if (NumElements == 256) return MVT::v256i1;
912 if (NumElements == 512) return MVT::v512i1;
913 if (NumElements == 1024) return MVT::v1024i1;
914 break;
915 case MVT::i8:
916 if (NumElements == 1) return MVT::v1i8;
917 if (NumElements == 2) return MVT::v2i8;
918 if (NumElements == 4) return MVT::v4i8;
919 if (NumElements == 8) return MVT::v8i8;
920 if (NumElements == 16) return MVT::v16i8;
921 if (NumElements == 32) return MVT::v32i8;
922 if (NumElements == 64) return MVT::v64i8;
923 if (NumElements == 128) return MVT::v128i8;
924 if (NumElements == 256) return MVT::v256i8;
925 break;
926 case MVT::i16:
927 if (NumElements == 1) return MVT::v1i16;
928 if (NumElements == 2) return MVT::v2i16;
929 if (NumElements == 3) return MVT::v3i16;
930 if (NumElements == 4) return MVT::v4i16;
931 if (NumElements == 8) return MVT::v8i16;
932 if (NumElements == 16) return MVT::v16i16;
933 if (NumElements == 32) return MVT::v32i16;
934 if (NumElements == 64) return MVT::v64i16;
935 if (NumElements == 128) return MVT::v128i16;
936 break;
937 case MVT::i32:
938 if (NumElements == 1) return MVT::v1i32;
939 if (NumElements == 2) return MVT::v2i32;
940 if (NumElements == 3) return MVT::v3i32;
941 if (NumElements == 4) return MVT::v4i32;
942 if (NumElements == 5) return MVT::v5i32;
943 if (NumElements == 8) return MVT::v8i32;
944 if (NumElements == 16) return MVT::v16i32;
945 if (NumElements == 32) return MVT::v32i32;
946 if (NumElements == 64) return MVT::v64i32;
947 if (NumElements == 128) return MVT::v128i32;
948 if (NumElements == 256) return MVT::v256i32;
949 if (NumElements == 512) return MVT::v512i32;
950 if (NumElements == 1024) return MVT::v1024i32;
951 if (NumElements == 2048) return MVT::v2048i32;
952 break;
953 case MVT::i64:
954 if (NumElements == 1) return MVT::v1i64;
955 if (NumElements == 2) return MVT::v2i64;
956 if (NumElements == 4) return MVT::v4i64;
957 if (NumElements == 8) return MVT::v8i64;
958 if (NumElements == 16) return MVT::v16i64;
959 if (NumElements == 32) return MVT::v32i64;
960 break;
961 case MVT::i128:
962 if (NumElements == 1) return MVT::v1i128;
963 break;
964 case MVT::f16:
965 if (NumElements == 2) return MVT::v2f16;
966 if (NumElements == 3) return MVT::v3f16;
967 if (NumElements == 4) return MVT::v4f16;
968 if (NumElements == 8) return MVT::v8f16;
969 if (NumElements == 16) return MVT::v16f16;
970 if (NumElements == 32) return MVT::v32f16;
971 break;
972 case MVT::f32:
973 if (NumElements == 1) return MVT::v1f32;
974 if (NumElements == 2) return MVT::v2f32;
975 if (NumElements == 3) return MVT::v3f32;
976 if (NumElements == 4) return MVT::v4f32;
977 if (NumElements == 5) return MVT::v5f32;
978 if (NumElements == 8) return MVT::v8f32;
979 if (NumElements == 16) return MVT::v16f32;
980 if (NumElements == 32) return MVT::v32f32;
981 if (NumElements == 64) return MVT::v64f32;
982 if (NumElements == 128) return MVT::v128f32;
983 if (NumElements == 256) return MVT::v256f32;
984 if (NumElements == 512) return MVT::v512f32;
985 if (NumElements == 1024) return MVT::v1024f32;
986 if (NumElements == 2048) return MVT::v2048f32;
987 break;
988 case MVT::f64:
989 if (NumElements == 1) return MVT::v1f64;
990 if (NumElements == 2) return MVT::v2f64;
991 if (NumElements == 4) return MVT::v4f64;
992 if (NumElements == 8) return MVT::v8f64;
993 break;
994 }
995 return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
996 }
997
998 static MVT getScalableVectorVT(MVT VT, unsigned NumElements) {
999 switch(VT.SimpleTy) {
1000 default:
1001 break;
1002 case MVT::i1:
1003 if (NumElements == 1) return MVT::nxv1i1;
1004 if (NumElements == 2) return MVT::nxv2i1;
1005 if (NumElements == 4) return MVT::nxv4i1;
1006 if (NumElements == 8) return MVT::nxv8i1;
1007 if (NumElements == 16) return MVT::nxv16i1;
1008 if (NumElements == 32) return MVT::nxv32i1;
1009 break;
1010 case MVT::i8:
1011 if (NumElements == 1) return MVT::nxv1i8;
1012 if (NumElements == 2) return MVT::nxv2i8;
1013 if (NumElements == 4) return MVT::nxv4i8;
1014 if (NumElements == 8) return MVT::nxv8i8;
1015 if (NumElements == 16) return MVT::nxv16i8;
1016 if (NumElements == 32) return MVT::nxv32i8;
1017 break;
1018 case MVT::i16:
1019 if (NumElements == 1) return MVT::nxv1i16;
1020 if (NumElements == 2) return MVT::nxv2i16;
1021 if (NumElements == 4) return MVT::nxv4i16;
1022 if (NumElements == 8) return MVT::nxv8i16;
1023 if (NumElements == 16) return MVT::nxv16i16;
1024 if (NumElements == 32) return MVT::nxv32i16;
1025 break;
1026 case MVT::i32:
1027 if (NumElements == 1) return MVT::nxv1i32;
1028 if (NumElements == 2) return MVT::nxv2i32;
1029 if (NumElements == 4) return MVT::nxv4i32;
1030 if (NumElements == 8) return MVT::nxv8i32;
1031 if (NumElements == 16) return MVT::nxv16i32;
1032 if (NumElements == 32) return MVT::nxv32i32;
1033 break;
1034 case MVT::i64:
1035 if (NumElements == 1) return MVT::nxv1i64;
1036 if (NumElements == 2) return MVT::nxv2i64;
1037 if (NumElements == 4) return MVT::nxv4i64;
1038 if (NumElements == 8) return MVT::nxv8i64;
1039 if (NumElements == 16) return MVT::nxv16i64;
1040 if (NumElements == 32) return MVT::nxv32i64;
1041 break;
1042 case MVT::f16:
1043 if (NumElements == 2) return MVT::nxv2f16;
1044 if (NumElements == 4) return MVT::nxv4f16;
1045 if (NumElements == 8) return MVT::nxv8f16;
1046 break;
1047 case MVT::f32:
1048 if (NumElements == 1) return MVT::nxv1f32;
1049 if (NumElements == 2) return MVT::nxv2f32;
1050 if (NumElements == 4) return MVT::nxv4f32;
1051 if (NumElements == 8) return MVT::nxv8f32;
1052 if (NumElements == 16) return MVT::nxv16f32;
1053 break;
1054 case MVT::f64:
1055 if (NumElements == 1) return MVT::nxv1f64;
1056 if (NumElements == 2) return MVT::nxv2f64;
1057 if (NumElements == 4) return MVT::nxv4f64;
1058 if (NumElements == 8) return MVT::nxv8f64;
1059 break;
1060 }
1061 return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
1062 }
1063
1064 static MVT getVectorVT(MVT VT, unsigned NumElements, bool IsScalable) {
1065 if (IsScalable)
1066 return getScalableVectorVT(VT, NumElements);
1067 return getVectorVT(VT, NumElements);
1068 }
1069
1070 static MVT getVectorVT(MVT VT, ElementCount EC) {
1071 if (EC.Scalable)
1072 return getScalableVectorVT(VT, EC.Min);
1073 return getVectorVT(VT, EC.Min);
1074 }
1075
1076 /// Return the value type corresponding to the specified type. This returns
1077 /// all pointers as iPTR. If HandleUnknown is true, unknown types are
1078 /// returned as Other, otherwise they are invalid.
1079 static MVT getVT(Type *Ty, bool HandleUnknown = false);
1080
1081 private:
1082 /// A simple iterator over the MVT::SimpleValueType enum.
1083 struct mvt_iterator {
1084 SimpleValueType VT;
1085
1086 mvt_iterator(SimpleValueType VT) : VT(VT) {}
1087
1088 MVT operator*() const { return VT; }
1089 bool operator!=(const mvt_iterator &LHS) const { return VT != LHS.VT; }
1090
1091 mvt_iterator& operator++() {
1092 VT = (MVT::SimpleValueType)((int)VT + 1);
1093 assert((int)VT <= MVT::MAX_ALLOWED_VALUETYPE &&(((int)VT <= MVT::MAX_ALLOWED_VALUETYPE && "MVT iterator overflowed."
) ? static_cast<void> (0) : __assert_fail ("(int)VT <= MVT::MAX_ALLOWED_VALUETYPE && \"MVT iterator overflowed.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 1094, __PRETTY_FUNCTION__))
1094 "MVT iterator overflowed.")(((int)VT <= MVT::MAX_ALLOWED_VALUETYPE && "MVT iterator overflowed."
) ? static_cast<void> (0) : __assert_fail ("(int)VT <= MVT::MAX_ALLOWED_VALUETYPE && \"MVT iterator overflowed.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/Support/MachineValueType.h"
, 1094, __PRETTY_FUNCTION__))
;
1095 return *this;
1096 }
1097 };
1098
1099 /// A range of the MVT::SimpleValueType enum.
1100 using mvt_range = iterator_range<mvt_iterator>;
1101
1102 public:
1103 /// SimpleValueType Iteration
1104 /// @{
1105 static mvt_range all_valuetypes() {
1106 return mvt_range(MVT::FIRST_VALUETYPE, MVT::LAST_VALUETYPE);
1107 }
1108
1109 static mvt_range integer_valuetypes() {
1110 return mvt_range(MVT::FIRST_INTEGER_VALUETYPE,
1111 (MVT::SimpleValueType)(MVT::LAST_INTEGER_VALUETYPE + 1));
1112 }
1113
1114 static mvt_range fp_valuetypes() {
1115 return mvt_range(MVT::FIRST_FP_VALUETYPE,
1116 (MVT::SimpleValueType)(MVT::LAST_FP_VALUETYPE + 1));
1117 }
1118
1119 static mvt_range vector_valuetypes() {
1120 return mvt_range(MVT::FIRST_VECTOR_VALUETYPE,
1121 (MVT::SimpleValueType)(MVT::LAST_VECTOR_VALUETYPE + 1));
1122 }
1123
1124 static mvt_range fixedlen_vector_valuetypes() {
1125 return mvt_range(
1126 MVT::FIRST_FIXEDLEN_VECTOR_VALUETYPE,
1127 (MVT::SimpleValueType)(MVT::LAST_FIXEDLEN_VECTOR_VALUETYPE + 1));
1128 }
1129
1130 static mvt_range scalable_vector_valuetypes() {
1131 return mvt_range(
1132 MVT::FIRST_SCALABLE_VECTOR_VALUETYPE,
1133 (MVT::SimpleValueType)(MVT::LAST_SCALABLE_VECTOR_VALUETYPE + 1));
1134 }
1135
1136 static mvt_range integer_fixedlen_vector_valuetypes() {
1137 return mvt_range(
1138 MVT::FIRST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE,
1139 (MVT::SimpleValueType)(MVT::LAST_INTEGER_FIXEDLEN_VECTOR_VALUETYPE + 1));
1140 }
1141
1142 static mvt_range fp_fixedlen_vector_valuetypes() {
1143 return mvt_range(
1144 MVT::FIRST_FP_FIXEDLEN_VECTOR_VALUETYPE,
1145 (MVT::SimpleValueType)(MVT::LAST_FP_FIXEDLEN_VECTOR_VALUETYPE + 1));
1146 }
1147
1148 static mvt_range integer_scalable_vector_valuetypes() {
1149 return mvt_range(
1150 MVT::FIRST_INTEGER_SCALABLE_VECTOR_VALUETYPE,
1151 (MVT::SimpleValueType)(MVT::LAST_INTEGER_SCALABLE_VECTOR_VALUETYPE + 1));
1152 }
1153
1154 static mvt_range fp_scalable_vector_valuetypes() {
1155 return mvt_range(
1156 MVT::FIRST_FP_SCALABLE_VECTOR_VALUETYPE,
1157 (MVT::SimpleValueType)(MVT::LAST_FP_SCALABLE_VECTOR_VALUETYPE + 1));
1158 }
1159 /// @}
1160 };
1161
1162} // end namespace llvm
1163
1164#endif // LLVM_CODEGEN_MACHINEVALUETYPE_H

/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h

1//===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the set of low-level target independent types which various
10// values in the code generator are. This allows the target specific behavior
11// of instructions to be described to target independent passes.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_CODEGEN_VALUETYPES_H
16#define LLVM_CODEGEN_VALUETYPES_H
17
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/MachineValueType.h"
20#include "llvm/Support/MathExtras.h"
21#include <cassert>
22#include <cstdint>
23#include <string>
24
25namespace llvm {
26
27 class LLVMContext;
28 class Type;
29
30 /// Extended Value Type. Capable of holding value types which are not native
31 /// for any processor (such as the i12345 type), as well as the types an MVT
32 /// can represent.
33 struct EVT {
34 private:
35 MVT V = MVT::INVALID_SIMPLE_VALUE_TYPE;
36 Type *LLVMTy = nullptr;
37
38 public:
39 constexpr EVT() = default;
40 constexpr EVT(MVT::SimpleValueType SVT) : V(SVT) {}
41 constexpr EVT(MVT S) : V(S) {}
42
43 bool operator==(EVT VT) const {
44 return !(*this != VT);
45 }
46 bool operator!=(EVT VT) const {
47 if (V.SimpleTy != VT.V.SimpleTy)
48 return true;
49 if (V.SimpleTy == MVT::INVALID_SIMPLE_VALUE_TYPE)
50 return LLVMTy != VT.LLVMTy;
51 return false;
52 }
53
54 /// Returns the EVT that represents a floating-point type with the given
55 /// number of bits. There are two floating-point types with 128 bits - this
56 /// returns f128 rather than ppcf128.
57 static EVT getFloatingPointVT(unsigned BitWidth) {
58 return MVT::getFloatingPointVT(BitWidth);
59 }
60
61 /// Returns the EVT that represents an integer with the given number of
62 /// bits.
63 static EVT getIntegerVT(LLVMContext &Context, unsigned BitWidth) {
64 MVT M = MVT::getIntegerVT(BitWidth);
65 if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
66 return M;
67 return getExtendedIntegerVT(Context, BitWidth);
68 }
69
70 /// Returns the EVT that represents a vector NumElements in length, where
71 /// each element is of type VT.
72 static EVT getVectorVT(LLVMContext &Context, EVT VT, unsigned NumElements,
73 bool IsScalable = false) {
74 MVT M = MVT::getVectorVT(VT.V, NumElements, IsScalable);
75 if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
76 return M;
77
78 assert(!IsScalable && "We don't support extended scalable types yet")((!IsScalable && "We don't support extended scalable types yet"
) ? static_cast<void> (0) : __assert_fail ("!IsScalable && \"We don't support extended scalable types yet\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 78, __PRETTY_FUNCTION__))
;
79 return getExtendedVectorVT(Context, VT, NumElements);
80 }
81
82 /// Returns the EVT that represents a vector EC.Min elements in length,
83 /// where each element is of type VT.
84 static EVT getVectorVT(LLVMContext &Context, EVT VT, ElementCount EC) {
85 MVT M = MVT::getVectorVT(VT.V, EC);
86 if (M.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE)
87 return M;
88 assert (!EC.Scalable && "We don't support extended scalable types yet")((!EC.Scalable && "We don't support extended scalable types yet"
) ? static_cast<void> (0) : __assert_fail ("!EC.Scalable && \"We don't support extended scalable types yet\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 88, __PRETTY_FUNCTION__))
;
89 return getExtendedVectorVT(Context, VT, EC.Min);
90 }
91
92 /// Return a vector with the same number of elements as this vector, but
93 /// with the element type converted to an integer type with the same
94 /// bitwidth.
95 EVT changeVectorElementTypeToInteger() const {
96 if (!isSimple()) {
97 assert (!isScalableVector() &&((!isScalableVector() && "We don't support extended scalable types yet"
) ? static_cast<void> (0) : __assert_fail ("!isScalableVector() && \"We don't support extended scalable types yet\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 98, __PRETTY_FUNCTION__))
98 "We don't support extended scalable types yet")((!isScalableVector() && "We don't support extended scalable types yet"
) ? static_cast<void> (0) : __assert_fail ("!isScalableVector() && \"We don't support extended scalable types yet\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 98, __PRETTY_FUNCTION__))
;
99 return changeExtendedVectorElementTypeToInteger();
100 }
101 MVT EltTy = getSimpleVT().getVectorElementType();
102 unsigned BitWidth = EltTy.getSizeInBits();
103 MVT IntTy = MVT::getIntegerVT(BitWidth);
104 MVT VecTy = MVT::getVectorVT(IntTy, getVectorNumElements(),
105 isScalableVector());
106 assert(VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&((VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&
"Simple vector VT not representable by simple integer vector VT!"
) ? static_cast<void> (0) : __assert_fail ("VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Simple vector VT not representable by simple integer vector VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 107, __PRETTY_FUNCTION__))
107 "Simple vector VT not representable by simple integer vector VT!")((VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE &&
"Simple vector VT not representable by simple integer vector VT!"
) ? static_cast<void> (0) : __assert_fail ("VecTy.SimpleTy != MVT::INVALID_SIMPLE_VALUE_TYPE && \"Simple vector VT not representable by simple integer vector VT!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 107, __PRETTY_FUNCTION__))
;
108 return VecTy;
109 }
110
111 /// Return the type converted to an equivalently sized integer or vector
112 /// with integer element type. Similar to changeVectorElementTypeToInteger,
113 /// but also handles scalars.
114 EVT changeTypeToInteger() {
115 if (isVector())
116 return changeVectorElementTypeToInteger();
117
118 if (isSimple())
119 return MVT::getIntegerVT(getSizeInBits());
120
121 return changeExtendedTypeToInteger();
122 }
123
124 /// Test if the given EVT is simple (as opposed to being extended).
125 bool isSimple() const {
126 return V.SimpleTy
13.1
Field 'SimpleTy' is not equal to INVALID_SIMPLE_VALUE_TYPE
25.1
Field 'SimpleTy' is not equal to INVALID_SIMPLE_VALUE_TYPE
13.1
Field 'SimpleTy' is not equal to INVALID_SIMPLE_VALUE_TYPE
25.1
Field 'SimpleTy' is not equal to INVALID_SIMPLE_VALUE_TYPE
13.1
Field 'SimpleTy' is not equal to INVALID_SIMPLE_VALUE_TYPE
25.1
Field 'SimpleTy' is not equal to INVALID_SIMPLE_VALUE_TYPE
13.1
Field 'SimpleTy' is not equal to INVALID_SIMPLE_VALUE_TYPE
25.1
Field 'SimpleTy' is not equal to INVALID_SIMPLE_VALUE_TYPE
!= MVT::INVALID_SIMPLE_VALUE_TYPE
;
14
Returning the value 1, which participates in a condition later
26
Returning the value 1, which participates in a condition later
127 }
128
129 /// Test if the given EVT is extended (as opposed to being simple).
130 bool isExtended() const {
131 return !isSimple();
132 }
133
134 /// Return true if this is a FP or a vector FP type.
135 bool isFloatingPoint() const {
136 return isSimple() ? V.isFloatingPoint() : isExtendedFloatingPoint();
137 }
138
139 /// Return true if this is an integer or a vector integer type.
140 bool isInteger() const {
141 return isSimple() ? V.isInteger() : isExtendedInteger();
142 }
143
144 /// Return true if this is an integer, but not a vector.
145 bool isScalarInteger() const {
146 return isSimple() ? V.isScalarInteger() : isExtendedScalarInteger();
147 }
148
149 /// Return true if this is a vector value type.
150 bool isVector() const {
151 return isSimple() ? V.isVector() : isExtendedVector();
17
'?' condition is true
18
Calling 'MVT::isVector'
20
Returning from 'MVT::isVector'
21
Returning the value 1, which participates in a condition later
29
'?' condition is true
30
Calling 'MVT::isVector'
32
Returning from 'MVT::isVector'
33
Returning the value 1, which participates in a condition later
152 }
153
154 /// Return true if this is a vector type where the runtime
155 /// length is machine dependent
156 bool isScalableVector() const {
157 // FIXME: We don't support extended scalable types yet, because the
158 // matching IR type doesn't exist. Once it has been added, this can
159 // be changed to call isExtendedScalableVector.
160 if (!isSimple())
161 return false;
162 return V.isScalableVector();
163 }
164
165 /// Return true if this is a 16-bit vector type.
166 bool is16BitVector() const {
167 return isSimple() ? V.is16BitVector() : isExtended16BitVector();
168 }
169
170 /// Return true if this is a 32-bit vector type.
171 bool is32BitVector() const {
172 return isSimple() ? V.is32BitVector() : isExtended32BitVector();
173 }
174
175 /// Return true if this is a 64-bit vector type.
176 bool is64BitVector() const {
177 return isSimple() ? V.is64BitVector() : isExtended64BitVector();
178 }
179
180 /// Return true if this is a 128-bit vector type.
181 bool is128BitVector() const {
182 return isSimple() ? V.is128BitVector() : isExtended128BitVector();
183 }
184
185 /// Return true if this is a 256-bit vector type.
186 bool is256BitVector() const {
187 return isSimple() ? V.is256BitVector() : isExtended256BitVector();
188 }
189
190 /// Return true if this is a 512-bit vector type.
191 bool is512BitVector() const {
192 return isSimple() ? V.is512BitVector() : isExtended512BitVector();
193 }
194
195 /// Return true if this is a 1024-bit vector type.
196 bool is1024BitVector() const {
197 return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
198 }
199
200 /// Return true if this is a 2048-bit vector type.
201 bool is2048BitVector() const {
202 return isSimple() ? V.is2048BitVector() : isExtended2048BitVector();
203 }
204
205 /// Return true if this is an overloaded type for TableGen.
206 bool isOverloaded() const {
207 return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny);
208 }
209
210 /// Return true if the bit size is a multiple of 8.
211 bool isByteSized() const {
212 return (getSizeInBits() & 7) == 0;
213 }
214
215 /// Return true if the size is a power-of-two number of bytes.
216 bool isRound() const {
217 unsigned BitSize = getSizeInBits();
218 return BitSize >= 8 && !(BitSize & (BitSize - 1));
219 }
220
221 /// Return true if this has the same number of bits as VT.
222 bool bitsEq(EVT VT) const {
223 if (EVT::operator==(VT)) return true;
224 return getSizeInBits() == VT.getSizeInBits();
225 }
226
227 /// Return true if this has more bits than VT.
228 bool bitsGT(EVT VT) const {
229 if (EVT::operator==(VT)) return false;
230 return getSizeInBits() > VT.getSizeInBits();
231 }
232
233 /// Return true if this has no less bits than VT.
234 bool bitsGE(EVT VT) const {
235 if (EVT::operator==(VT)) return true;
236 return getSizeInBits() >= VT.getSizeInBits();
237 }
238
239 /// Return true if this has less bits than VT.
240 bool bitsLT(EVT VT) const {
241 if (EVT::operator==(VT)) return false;
242 return getSizeInBits() < VT.getSizeInBits();
243 }
244
245 /// Return true if this has no more bits than VT.
246 bool bitsLE(EVT VT) const {
247 if (EVT::operator==(VT)) return true;
248 return getSizeInBits() <= VT.getSizeInBits();
249 }
250
251 /// Return the SimpleValueType held in the specified simple EVT.
252 MVT getSimpleVT() const {
253 assert(isSimple() && "Expected a SimpleValueType!")((isSimple() && "Expected a SimpleValueType!") ? static_cast
<void> (0) : __assert_fail ("isSimple() && \"Expected a SimpleValueType!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 253, __PRETTY_FUNCTION__))
;
254 return V;
255 }
256
257 /// If this is a vector type, return the element type, otherwise return
258 /// this.
259 EVT getScalarType() const {
260 return isVector() ? getVectorElementType() : *this;
261 }
262
263 /// Given a vector type, return the type of each element.
264 EVT getVectorElementType() const {
265 assert(isVector() && "Invalid vector type!")((isVector() && "Invalid vector type!") ? static_cast
<void> (0) : __assert_fail ("isVector() && \"Invalid vector type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 265, __PRETTY_FUNCTION__))
;
266 if (isSimple())
267 return V.getVectorElementType();
268 return getExtendedVectorElementType();
269 }
270
271 /// Given a vector type, return the number of elements it contains.
272 unsigned getVectorNumElements() const {
273 assert(isVector() && "Invalid vector type!")((isVector() && "Invalid vector type!") ? static_cast
<void> (0) : __assert_fail ("isVector() && \"Invalid vector type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 273, __PRETTY_FUNCTION__))
;
274 if (isSimple())
275 return V.getVectorNumElements();
276 return getExtendedVectorNumElements();
277 }
278
279 // Given a (possibly scalable) vector type, return the ElementCount
280 ElementCount getVectorElementCount() const {
281 assert((isVector()) && "Invalid vector type!")(((isVector()) && "Invalid vector type!") ? static_cast
<void> (0) : __assert_fail ("(isVector()) && \"Invalid vector type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 281, __PRETTY_FUNCTION__))
;
282 if (isSimple())
283 return V.getVectorElementCount();
284
285 assert(!isScalableVector() &&((!isScalableVector() && "We don't support extended scalable types yet"
) ? static_cast<void> (0) : __assert_fail ("!isScalableVector() && \"We don't support extended scalable types yet\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 286, __PRETTY_FUNCTION__))
286 "We don't support extended scalable types yet")((!isScalableVector() && "We don't support extended scalable types yet"
) ? static_cast<void> (0) : __assert_fail ("!isScalableVector() && \"We don't support extended scalable types yet\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 286, __PRETTY_FUNCTION__))
;
287 return {getExtendedVectorNumElements(), false};
288 }
289
290 /// Return the size of the specified value type in bits.
291 unsigned getSizeInBits() const {
292 if (isSimple())
293 return V.getSizeInBits();
294 return getExtendedSizeInBits();
295 }
296
297 unsigned getScalarSizeInBits() const {
298 return getScalarType().getSizeInBits();
299 }
300
301 /// Return the number of bytes overwritten by a store of the specified value
302 /// type.
303 unsigned getStoreSize() const {
304 return (getSizeInBits() + 7) / 8;
305 }
306
307 /// Return the number of bits overwritten by a store of the specified value
308 /// type.
309 unsigned getStoreSizeInBits() const {
310 return getStoreSize() * 8;
311 }
312
313 /// Rounds the bit-width of the given integer EVT up to the nearest power of
314 /// two (and at least to eight), and returns the integer EVT with that
315 /// number of bits.
316 EVT getRoundIntegerType(LLVMContext &Context) const {
317 assert(isInteger() && !isVector() && "Invalid integer type!")((isInteger() && !isVector() && "Invalid integer type!"
) ? static_cast<void> (0) : __assert_fail ("isInteger() && !isVector() && \"Invalid integer type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 317, __PRETTY_FUNCTION__))
;
318 unsigned BitWidth = getSizeInBits();
319 if (BitWidth <= 8)
320 return EVT(MVT::i8);
321 return getIntegerVT(Context, 1 << Log2_32_Ceil(BitWidth));
322 }
323
324 /// Finds the smallest simple value type that is greater than or equal to
325 /// half the width of this EVT. If no simple value type can be found, an
326 /// extended integer value type of half the size (rounded up) is returned.
327 EVT getHalfSizedIntegerVT(LLVMContext &Context) const {
328 assert(isInteger() && !isVector() && "Invalid integer type!")((isInteger() && !isVector() && "Invalid integer type!"
) ? static_cast<void> (0) : __assert_fail ("isInteger() && !isVector() && \"Invalid integer type!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 328, __PRETTY_FUNCTION__))
;
329 unsigned EVTSize = getSizeInBits();
330 for (unsigned IntVT = MVT::FIRST_INTEGER_VALUETYPE;
331 IntVT <= MVT::LAST_INTEGER_VALUETYPE; ++IntVT) {
332 EVT HalfVT = EVT((MVT::SimpleValueType)IntVT);
333 if (HalfVT.getSizeInBits() * 2 >= EVTSize)
334 return HalfVT;
335 }
336 return getIntegerVT(Context, (EVTSize + 1) / 2);
337 }
338
339 /// Return a VT for an integer vector type with the size of the
340 /// elements doubled. The typed returned may be an extended type.
341 EVT widenIntegerVectorElementType(LLVMContext &Context) const {
342 EVT EltVT = getVectorElementType();
343 EltVT = EVT::getIntegerVT(Context, 2 * EltVT.getSizeInBits());
344 return EVT::getVectorVT(Context, EltVT, getVectorElementCount());
345 }
346
347 // Return a VT for a vector type with the same element type but
348 // half the number of elements. The type returned may be an
349 // extended type.
350 EVT getHalfNumVectorElementsVT(LLVMContext &Context) const {
351 EVT EltVT = getVectorElementType();
352 auto EltCnt = getVectorElementCount();
353 assert(!(EltCnt.Min & 1) && "Splitting vector, but not in half!")((!(EltCnt.Min & 1) && "Splitting vector, but not in half!"
) ? static_cast<void> (0) : __assert_fail ("!(EltCnt.Min & 1) && \"Splitting vector, but not in half!\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/CodeGen/ValueTypes.h"
, 353, __PRETTY_FUNCTION__))
;
354 return EVT::getVectorVT(Context, EltVT, EltCnt / 2);
355 }
356
357 /// Returns true if the given vector is a power of 2.
358 bool isPow2VectorType() const {
359 unsigned NElts = getVectorNumElements();
360 return !(NElts & (NElts - 1));
361 }
362
363 /// Widens the length of the given vector EVT up to the nearest power of 2
364 /// and returns that type.
365 EVT getPow2VectorType(LLVMContext &Context) const {
366 if (!isPow2VectorType()) {
367 unsigned NElts = getVectorNumElements();
368 unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
369 return EVT::getVectorVT(Context, getVectorElementType(), Pow2NElts,
370 isScalableVector());
371 }
372 else {
373 return *this;
374 }
375 }
376
377 /// This function returns value type as a string, e.g. "i32".
378 std::string getEVTString() const;
379
380 /// This method returns an LLVM type corresponding to the specified EVT.
381 /// For integer types, this returns an unsigned type. Note that this will
382 /// abort for types that cannot be represented.
383 Type *getTypeForEVT(LLVMContext &Context) const;
384
385 /// Return the value type corresponding to the specified type.
386 /// This returns all pointers as iPTR. If HandleUnknown is true, unknown
387 /// types are returned as Other, otherwise they are invalid.
388 static EVT getEVT(Type *Ty, bool HandleUnknown = false);
389
390 intptr_t getRawBits() const {
391 if (isSimple())
392 return V.SimpleTy;
393 else
394 return (intptr_t)(LLVMTy);
395 }
396
397 /// A meaningless but well-behaved order, useful for constructing
398 /// containers.
399 struct compareRawBits {
400 bool operator()(EVT L, EVT R) const {
401 if (L.V.SimpleTy == R.V.SimpleTy)
402 return L.LLVMTy < R.LLVMTy;
403 else
404 return L.V.SimpleTy < R.V.SimpleTy;
405 }
406 };
407
408 private:
409 // Methods for handling the Extended-type case in functions above.
410 // These are all out-of-line to prevent users of this header file
411 // from having a dependency on Type.h.
412 EVT changeExtendedTypeToInteger() const;
413 EVT changeExtendedVectorElementTypeToInteger() const;
414 static EVT getExtendedIntegerVT(LLVMContext &C, unsigned BitWidth);
415 static EVT getExtendedVectorVT(LLVMContext &C, EVT VT,
416 unsigned NumElements);
417 bool isExtendedFloatingPoint() const LLVM_READONLY__attribute__((__pure__));
418 bool isExtendedInteger() const LLVM_READONLY__attribute__((__pure__));
419 bool isExtendedScalarInteger() const LLVM_READONLY__attribute__((__pure__));
420 bool isExtendedVector() const LLVM_READONLY__attribute__((__pure__));
421 bool isExtended16BitVector() const LLVM_READONLY__attribute__((__pure__));
422 bool isExtended32BitVector() const LLVM_READONLY__attribute__((__pure__));
423 bool isExtended64BitVector() const LLVM_READONLY__attribute__((__pure__));
424 bool isExtended128BitVector() const LLVM_READONLY__attribute__((__pure__));
425 bool isExtended256BitVector() const LLVM_READONLY__attribute__((__pure__));
426 bool isExtended512BitVector() const LLVM_READONLY__attribute__((__pure__));
427 bool isExtended1024BitVector() const LLVM_READONLY__attribute__((__pure__));
428 bool isExtended2048BitVector() const LLVM_READONLY__attribute__((__pure__));
429 EVT getExtendedVectorElementType() const;
430 unsigned getExtendedVectorNumElements() const LLVM_READONLY__attribute__((__pure__));
431 unsigned getExtendedSizeInBits() const LLVM_READONLY__attribute__((__pure__));
432 };
433
434} // end namespace llvm
435
436#endif // LLVM_CODEGEN_VALUETYPES_H

/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/AlignOf.h"
18#include "llvm/Support/Compiler.h"
19#include "llvm/Support/MathExtras.h"
20#include "llvm/Support/MemAlloc.h"
21#include "llvm/Support/type_traits.h"
22#include "llvm/Support/ErrorHandling.h"
23#include <algorithm>
24#include <cassert>
25#include <cstddef>
26#include <cstdlib>
27#include <cstring>
28#include <initializer_list>
29#include <iterator>
30#include <memory>
31#include <new>
32#include <type_traits>
33#include <utility>
34
35namespace llvm {
36
37/// This is all the non-templated stuff common to all SmallVectors.
38class SmallVectorBase {
39protected:
40 void *BeginX;
41 unsigned Size = 0, Capacity;
42
43 SmallVectorBase() = delete;
44 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
45 : BeginX(FirstEl), Capacity(TotalCapacity) {}
46
47 /// This is an implementation of the grow() method which only works
48 /// on POD-like data types and is out of line to reduce code duplication.
49 void grow_pod(void *FirstEl, size_t MinCapacity, size_t TSize);
50
51public:
52 size_t size() const { return Size; }
49
Returning zero
53 size_t capacity() const { return Capacity; }
54
55 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
56
57 /// Set the array size to \p N, which the current array must have enough
58 /// capacity for.
59 ///
60 /// This does not construct or destroy any elements in the vector.
61 ///
62 /// Clients can use this in conjunction with capacity() to write past the end
63 /// of the buffer when they know that more elements are available, and only
64 /// update the size later. This avoids the cost of value initializing elements
65 /// which will only be overwritten.
66 void set_size(size_t N) {
67 assert(N <= capacity())((N <= capacity()) ? static_cast<void> (0) : __assert_fail
("N <= capacity()", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 67, __PRETTY_FUNCTION__))
;
68 Size = N;
69 }
70};
71
72/// Figure out the offset of the first element.
73template <class T, typename = void> struct SmallVectorAlignmentAndSize {
74 AlignedCharArrayUnion<SmallVectorBase> Base;
75 AlignedCharArrayUnion<T> FirstEl;
76};
77
78/// This is the part of SmallVectorTemplateBase which does not depend on whether
79/// the type T is a POD. The extra dummy template argument is used by ArrayRef
80/// to avoid unnecessarily requiring T to be complete.
81template <typename T, typename = void>
82class SmallVectorTemplateCommon : public SmallVectorBase {
83 /// Find the address of the first element. For this pointer math to be valid
84 /// with small-size of 0 for T with lots of alignment, it's important that
85 /// SmallVectorStorage is properly-aligned even for small-size of 0.
86 void *getFirstEl() const {
87 return const_cast<void *>(reinterpret_cast<const void *>(
88 reinterpret_cast<const char *>(this) +
89 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
90 }
91 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
92
93protected:
94 SmallVectorTemplateCommon(size_t Size)
95 : SmallVectorBase(getFirstEl(), Size) {}
96
97 void grow_pod(size_t MinCapacity, size_t TSize) {
98 SmallVectorBase::grow_pod(getFirstEl(), MinCapacity, TSize);
99 }
100
101 /// Return true if this is a smallvector which has not had dynamic
102 /// memory allocated for it.
103 bool isSmall() const { return BeginX == getFirstEl(); }
104
105 /// Put this vector in a state of being small.
106 void resetToSmall() {
107 BeginX = getFirstEl();
108 Size = Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
109 }
110
111public:
112 using size_type = size_t;
113 using difference_type = ptrdiff_t;
114 using value_type = T;
115 using iterator = T *;
116 using const_iterator = const T *;
117
118 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
119 using reverse_iterator = std::reverse_iterator<iterator>;
120
121 using reference = T &;
122 using const_reference = const T &;
123 using pointer = T *;
124 using const_pointer = const T *;
125
126 // forward iterator creation methods.
127 iterator begin() { return (iterator)this->BeginX; }
128 const_iterator begin() const { return (const_iterator)this->BeginX; }
129 iterator end() { return begin() + size(); }
130 const_iterator end() const { return begin() + size(); }
131
132 // reverse iterator creation methods.
133 reverse_iterator rbegin() { return reverse_iterator(end()); }
134 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
135 reverse_iterator rend() { return reverse_iterator(begin()); }
136 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
137
138 size_type size_in_bytes() const { return size() * sizeof(T); }
139 size_type max_size() const { return size_type(-1) / sizeof(T); }
140
141 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
142
143 /// Return a pointer to the vector's buffer, even if empty().
144 pointer data() { return pointer(begin()); }
145 /// Return a pointer to the vector's buffer, even if empty().
146 const_pointer data() const { return const_pointer(begin()); }
147
148 reference operator[](size_type idx) {
149 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 149, __PRETTY_FUNCTION__))
;
150 return begin()[idx];
151 }
152 const_reference operator[](size_type idx) const {
153 assert(idx < size())((idx < size()) ? static_cast<void> (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 153, __PRETTY_FUNCTION__))
;
154 return begin()[idx];
155 }
156
157 reference front() {
158 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 158, __PRETTY_FUNCTION__))
;
159 return begin()[0];
160 }
161 const_reference front() const {
162 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 162, __PRETTY_FUNCTION__))
;
163 return begin()[0];
164 }
165
166 reference back() {
167 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 167, __PRETTY_FUNCTION__))
;
168 return end()[-1];
169 }
170 const_reference back() const {
171 assert(!empty())((!empty()) ? static_cast<void> (0) : __assert_fail ("!empty()"
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 171, __PRETTY_FUNCTION__))
;
172 return end()[-1];
173 }
174};
175
176/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put method
177/// implementations that are designed to work with non-POD-like T's.
178template <typename T, bool = is_trivially_copyable<T>::value>
179class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
180protected:
181 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
182
183 static void destroy_range(T *S, T *E) {
184 while (S != E) {
185 --E;
186 E->~T();
187 }
188 }
189
190 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
191 /// constructing elements as needed.
192 template<typename It1, typename It2>
193 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
194 std::uninitialized_copy(std::make_move_iterator(I),
195 std::make_move_iterator(E), Dest);
196 }
197
198 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
199 /// constructing elements as needed.
200 template<typename It1, typename It2>
201 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
202 std::uninitialized_copy(I, E, Dest);
203 }
204
205 /// Grow the allocated memory (without initializing new elements), doubling
206 /// the size of the allocated memory. Guarantees space for at least one more
207 /// element, or MinSize more elements if specified.
208 void grow(size_t MinSize = 0);
209
210public:
211 void push_back(const T &Elt) {
212 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
213 this->grow();
214 ::new ((void*) this->end()) T(Elt);
215 this->set_size(this->size() + 1);
216 }
217
218 void push_back(T &&Elt) {
219 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
220 this->grow();
221 ::new ((void*) this->end()) T(::std::move(Elt));
222 this->set_size(this->size() + 1);
223 }
224
225 void pop_back() {
226 this->set_size(this->size() - 1);
227 this->end()->~T();
228 }
229};
230
231// Define this out-of-line to dissuade the C++ compiler from inlining it.
232template <typename T, bool TriviallyCopyable>
233void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
234 if (MinSize > UINT32_MAX(4294967295U))
235 report_bad_alloc_error("SmallVector capacity overflow during allocation");
236
237 // Always grow, even from zero.
238 size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2));
239 NewCapacity = std::min(std::max(NewCapacity, MinSize), size_t(UINT32_MAX(4294967295U)));
240 T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T)));
241
242 // Move the elements over.
243 this->uninitialized_move(this->begin(), this->end(), NewElts);
244
245 // Destroy the original elements.
246 destroy_range(this->begin(), this->end());
247
248 // If this wasn't grown from the inline copy, deallocate the old space.
249 if (!this->isSmall())
250 free(this->begin());
251
252 this->BeginX = NewElts;
253 this->Capacity = NewCapacity;
254}
255
256/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
257/// method implementations that are designed to work with POD-like T's.
258template <typename T>
259class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
260protected:
261 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
262
263 // No need to do a destroy loop for POD's.
264 static void destroy_range(T *, T *) {}
265
266 /// Move the range [I, E) onto the uninitialized memory
267 /// starting with "Dest", constructing elements into it as needed.
268 template<typename It1, typename It2>
269 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
270 // Just do a copy.
271 uninitialized_copy(I, E, Dest);
272 }
273
274 /// Copy the range [I, E) onto the uninitialized memory
275 /// starting with "Dest", constructing elements into it as needed.
276 template<typename It1, typename It2>
277 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
278 // Arbitrary iterator types; just use the basic implementation.
279 std::uninitialized_copy(I, E, Dest);
280 }
281
282 /// Copy the range [I, E) onto the uninitialized memory
283 /// starting with "Dest", constructing elements into it as needed.
284 template <typename T1, typename T2>
285 static void uninitialized_copy(
286 T1 *I, T1 *E, T2 *Dest,
287 typename std::enable_if<std::is_same<typename std::remove_const<T1>::type,
288 T2>::value>::type * = nullptr) {
289 // Use memcpy for PODs iterated by pointers (which includes SmallVector
290 // iterators): std::uninitialized_copy optimizes to memmove, but we can
291 // use memcpy here. Note that I and E are iterators and thus might be
292 // invalid for memcpy if they are equal.
293 if (I != E)
294 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
295 }
296
297 /// Double the size of the allocated memory, guaranteeing space for at
298 /// least one more element or MinSize if specified.
299 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
300
301public:
302 void push_back(const T &Elt) {
303 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
304 this->grow();
305 memcpy(reinterpret_cast<void *>(this->end()), &Elt, sizeof(T));
306 this->set_size(this->size() + 1);
307 }
308
309 void pop_back() { this->set_size(this->size() - 1); }
310};
311
312/// This class consists of common code factored out of the SmallVector class to
313/// reduce code duplication based on the SmallVector 'N' template parameter.
314template <typename T>
315class SmallVectorImpl : public SmallVectorTemplateBase<T> {
316 using SuperClass = SmallVectorTemplateBase<T>;
317
318public:
319 using iterator = typename SuperClass::iterator;
320 using const_iterator = typename SuperClass::const_iterator;
321 using reference = typename SuperClass::reference;
322 using size_type = typename SuperClass::size_type;
323
324protected:
325 // Default ctor - Initialize to empty.
326 explicit SmallVectorImpl(unsigned N)
327 : SmallVectorTemplateBase<T>(N) {}
328
329public:
330 SmallVectorImpl(const SmallVectorImpl &) = delete;
331
332 ~SmallVectorImpl() {
333 // Subclass has already destructed this vector's elements.
334 // If this wasn't grown from the inline copy, deallocate the old space.
335 if (!this->isSmall())
336 free(this->begin());
337 }
338
339 void clear() {
340 this->destroy_range(this->begin(), this->end());
341 this->Size = 0;
342 }
343
344 void resize(size_type N) {
345 if (N < this->size()) {
346 this->destroy_range(this->begin()+N, this->end());
347 this->set_size(N);
348 } else if (N > this->size()) {
349 if (this->capacity() < N)
350 this->grow(N);
351 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
352 new (&*I) T();
353 this->set_size(N);
354 }
355 }
356
357 void resize(size_type N, const T &NV) {
358 if (N < this->size()) {
359 this->destroy_range(this->begin()+N, this->end());
360 this->set_size(N);
361 } else if (N > this->size()) {
362 if (this->capacity() < N)
363 this->grow(N);
364 std::uninitialized_fill(this->end(), this->begin()+N, NV);
365 this->set_size(N);
366 }
367 }
368
369 void reserve(size_type N) {
370 if (this->capacity() < N)
371 this->grow(N);
372 }
373
374 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
375 T Result = ::std::move(this->back());
376 this->pop_back();
377 return Result;
378 }
379
380 void swap(SmallVectorImpl &RHS);
381
382 /// Add the specified range to the end of the SmallVector.
383 template <typename in_iter,
384 typename = typename std::enable_if<std::is_convertible<
385 typename std::iterator_traits<in_iter>::iterator_category,
386 std::input_iterator_tag>::value>::type>
387 void append(in_iter in_start, in_iter in_end) {
388 size_type NumInputs = std::distance(in_start, in_end);
389 if (NumInputs > this->capacity() - this->size())
390 this->grow(this->size()+NumInputs);
391
392 this->uninitialized_copy(in_start, in_end, this->end());
393 this->set_size(this->size() + NumInputs);
394 }
395
396 /// Append \p NumInputs copies of \p Elt to the end.
397 void append(size_type NumInputs, const T &Elt) {
398 if (NumInputs > this->capacity() - this->size())
399 this->grow(this->size()+NumInputs);
400
401 std::uninitialized_fill_n(this->end(), NumInputs, Elt);
402 this->set_size(this->size() + NumInputs);
403 }
404
405 void append(std::initializer_list<T> IL) {
406 append(IL.begin(), IL.end());
407 }
408
409 // FIXME: Consider assigning over existing elements, rather than clearing &
410 // re-initializing them - for all assign(...) variants.
411
412 void assign(size_type NumElts, const T &Elt) {
413 clear();
414 if (this->capacity() < NumElts)
415 this->grow(NumElts);
416 this->set_size(NumElts);
417 std::uninitialized_fill(this->begin(), this->end(), Elt);
418 }
419
420 template <typename in_iter,
421 typename = typename std::enable_if<std::is_convertible<
422 typename std::iterator_traits<in_iter>::iterator_category,
423 std::input_iterator_tag>::value>::type>
424 void assign(in_iter in_start, in_iter in_end) {
425 clear();
426 append(in_start, in_end);
427 }
428
429 void assign(std::initializer_list<T> IL) {
430 clear();
431 append(IL);
432 }
433
434 iterator erase(const_iterator CI) {
435 // Just cast away constness because this is a non-const member function.
436 iterator I = const_cast<iterator>(CI);
437
438 assert(I >= this->begin() && "Iterator to erase is out of bounds.")((I >= this->begin() && "Iterator to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Iterator to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 438, __PRETTY_FUNCTION__))
;
439 assert(I < this->end() && "Erasing at past-the-end iterator.")((I < this->end() && "Erasing at past-the-end iterator."
) ? static_cast<void> (0) : __assert_fail ("I < this->end() && \"Erasing at past-the-end iterator.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 439, __PRETTY_FUNCTION__))
;
440
441 iterator N = I;
442 // Shift all elts down one.
443 std::move(I+1, this->end(), I);
444 // Drop the last elt.
445 this->pop_back();
446 return(N);
447 }
448
449 iterator erase(const_iterator CS, const_iterator CE) {
450 // Just cast away constness because this is a non-const member function.
451 iterator S = const_cast<iterator>(CS);
452 iterator E = const_cast<iterator>(CE);
453
454 assert(S >= this->begin() && "Range to erase is out of bounds.")((S >= this->begin() && "Range to erase is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("S >= this->begin() && \"Range to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 454, __PRETTY_FUNCTION__))
;
455 assert(S <= E && "Trying to erase invalid range.")((S <= E && "Trying to erase invalid range.") ? static_cast
<void> (0) : __assert_fail ("S <= E && \"Trying to erase invalid range.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 455, __PRETTY_FUNCTION__))
;
456 assert(E <= this->end() && "Trying to erase past the end.")((E <= this->end() && "Trying to erase past the end."
) ? static_cast<void> (0) : __assert_fail ("E <= this->end() && \"Trying to erase past the end.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 456, __PRETTY_FUNCTION__))
;
457
458 iterator N = S;
459 // Shift all elts down.
460 iterator I = std::move(E, this->end(), S);
461 // Drop the last elts.
462 this->destroy_range(I, this->end());
463 this->set_size(I - this->begin());
464 return(N);
465 }
466
467 iterator insert(iterator I, T &&Elt) {
468 if (I == this->end()) { // Important special case for empty vector.
469 this->push_back(::std::move(Elt));
470 return this->end()-1;
471 }
472
473 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 473, __PRETTY_FUNCTION__))
;
474 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 474, __PRETTY_FUNCTION__))
;
475
476 if (this->size() >= this->capacity()) {
477 size_t EltNo = I-this->begin();
478 this->grow();
479 I = this->begin()+EltNo;
480 }
481
482 ::new ((void*) this->end()) T(::std::move(this->back()));
483 // Push everything else over.
484 std::move_backward(I, this->end()-1, this->end());
485 this->set_size(this->size() + 1);
486
487 // If we just moved the element we're inserting, be sure to update
488 // the reference.
489 T *EltPtr = &Elt;
490 if (I <= EltPtr && EltPtr < this->end())
491 ++EltPtr;
492
493 *I = ::std::move(*EltPtr);
494 return I;
495 }
496
497 iterator insert(iterator I, const T &Elt) {
498 if (I == this->end()) { // Important special case for empty vector.
499 this->push_back(Elt);
500 return this->end()-1;
501 }
502
503 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 503, __PRETTY_FUNCTION__))
;
504 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 504, __PRETTY_FUNCTION__))
;
505
506 if (this->size() >= this->capacity()) {
507 size_t EltNo = I-this->begin();
508 this->grow();
509 I = this->begin()+EltNo;
510 }
511 ::new ((void*) this->end()) T(std::move(this->back()));
512 // Push everything else over.
513 std::move_backward(I, this->end()-1, this->end());
514 this->set_size(this->size() + 1);
515
516 // If we just moved the element we're inserting, be sure to update
517 // the reference.
518 const T *EltPtr = &Elt;
519 if (I <= EltPtr && EltPtr < this->end())
520 ++EltPtr;
521
522 *I = *EltPtr;
523 return I;
524 }
525
526 iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
527 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
528 size_t InsertElt = I - this->begin();
529
530 if (I == this->end()) { // Important special case for empty vector.
531 append(NumToInsert, Elt);
532 return this->begin()+InsertElt;
533 }
534
535 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 535, __PRETTY_FUNCTION__))
;
536 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 536, __PRETTY_FUNCTION__))
;
537
538 // Ensure there is enough space.
539 reserve(this->size() + NumToInsert);
540
541 // Uninvalidate the iterator.
542 I = this->begin()+InsertElt;
543
544 // If there are more elements between the insertion point and the end of the
545 // range than there are being inserted, we can use a simple approach to
546 // insertion. Since we already reserved space, we know that this won't
547 // reallocate the vector.
548 if (size_t(this->end()-I) >= NumToInsert) {
549 T *OldEnd = this->end();
550 append(std::move_iterator<iterator>(this->end() - NumToInsert),
551 std::move_iterator<iterator>(this->end()));
552
553 // Copy the existing elements that get replaced.
554 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
555
556 std::fill_n(I, NumToInsert, Elt);
557 return I;
558 }
559
560 // Otherwise, we're inserting more elements than exist already, and we're
561 // not inserting at the end.
562
563 // Move over the elements that we're about to overwrite.
564 T *OldEnd = this->end();
565 this->set_size(this->size() + NumToInsert);
566 size_t NumOverwritten = OldEnd-I;
567 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
568
569 // Replace the overwritten part.
570 std::fill_n(I, NumOverwritten, Elt);
571
572 // Insert the non-overwritten middle part.
573 std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
574 return I;
575 }
576
577 template <typename ItTy,
578 typename = typename std::enable_if<std::is_convertible<
579 typename std::iterator_traits<ItTy>::iterator_category,
580 std::input_iterator_tag>::value>::type>
581 iterator insert(iterator I, ItTy From, ItTy To) {
582 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
583 size_t InsertElt = I - this->begin();
584
585 if (I == this->end()) { // Important special case for empty vector.
586 append(From, To);
587 return this->begin()+InsertElt;
588 }
589
590 assert(I >= this->begin() && "Insertion iterator is out of bounds.")((I >= this->begin() && "Insertion iterator is out of bounds."
) ? static_cast<void> (0) : __assert_fail ("I >= this->begin() && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 590, __PRETTY_FUNCTION__))
;
591 assert(I <= this->end() && "Inserting past the end of the vector.")((I <= this->end() && "Inserting past the end of the vector."
) ? static_cast<void> (0) : __assert_fail ("I <= this->end() && \"Inserting past the end of the vector.\""
, "/build/llvm-toolchain-snapshot-10~svn374877/include/llvm/ADT/SmallVector.h"
, 591, __PRETTY_FUNCTION__))
;
592
593 size_t NumToInsert = std::distance(From, To);
594
595 // Ensure there is enough space.
596 reserve(this->size() + NumToInsert);
597
598 // Uninvalidate the iterator.
599 I = this->begin()+InsertElt;
600
601 // If there are more elements between the insertion point and the end of the
602 // range than there are being inserted, we can use a simple approach to
603 // insertion. Since we already reserved space, we know that this won't
604 // reallocate the vector.
605 if (size_t(this->end()-I) >= NumToInsert) {
606 T *OldEnd = this->end();
607 append(std::move_iterator<iterator>(this->end() - NumToInsert),
608 std::move_iterator<iterator>(this->end()));
609
610 // Copy the existing elements that get replaced.
611 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
612
613 std::copy(From, To, I);
614 return I;
615 }
616
617 // Otherwise, we're inserting more elements than exist already, and we're
618 // not inserting at the end.
619
620 // Move over the elements that we're about to overwrite.
621 T *OldEnd = this->end();
622 this->set_size(this->size() + NumToInsert);
623 size_t NumOverwritten = OldEnd-I;
624 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
625
626 // Replace the overwritten part.
627 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
628 *J = *From;
629 ++J; ++From;
630 }
631
632 // Insert the non-overwritten middle part.
633 this->uninitialized_copy(From, To, OldEnd);
634 return I;
635 }
636
637 void insert(iterator I, std::initializer_list<T> IL) {
638 insert(I, IL.begin(), IL.end());
639 }
640
641 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
642 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
643 this->grow();
644 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
645 this->set_size(this->size() + 1);
646 return this->back();
647 }
648
649 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
650
651 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
652
653 bool operator==(const SmallVectorImpl &RHS) const {
654 if (this->size() != RHS.size()) return false;
655 return std::equal(this->begin(), this->end(), RHS.begin());
656 }
657 bool operator!=(const SmallVectorImpl &RHS) const {
658 return !(*this == RHS);
659 }
660
661 bool operator<(const SmallVectorImpl &RHS) const {
662 return std::lexicographical_compare(this->begin(), this->end(),
663 RHS.begin(), RHS.end());
664 }
665};
666
667template <typename T>
668void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
669 if (this == &RHS) return;
670
671 // We can only avoid copying elements if neither vector is small.
672 if (!this->isSmall() && !RHS.isSmall()) {
673 std::swap(this->BeginX, RHS.BeginX);
674 std::swap(this->Size, RHS.Size);
675 std::swap(this->Capacity, RHS.Capacity);
676 return;
677 }
678 if (RHS.size() > this->capacity())
679 this->grow(RHS.size());
680 if (this->size() > RHS.capacity())
681 RHS.grow(this->size());
682
683 // Swap the shared elements.
684 size_t NumShared = this->size();
685 if (NumShared > RHS.size()) NumShared = RHS.size();
686 for (size_type i = 0; i != NumShared; ++i)
687 std::swap((*this)[i], RHS[i]);
688
689 // Copy over the extra elts.
690 if (this->size() > RHS.size()) {
691 size_t EltDiff = this->size() - RHS.size();
692 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
693 RHS.set_size(RHS.size() + EltDiff);
694 this->destroy_range(this->begin()+NumShared, this->end());
695 this->set_size(NumShared);
696 } else if (RHS.size() > this->size()) {
697 size_t EltDiff = RHS.size() - this->size();
698 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
699 this->set_size(this->size() + EltDiff);
700 this->destroy_range(RHS.begin()+NumShared, RHS.end());
701 RHS.set_size(NumShared);
702 }
703}
704
705template <typename T>
706SmallVectorImpl<T> &SmallVectorImpl<T>::
707 operator=(const SmallVectorImpl<T> &RHS) {
708 // Avoid self-assignment.
709 if (this == &RHS) return *this;
710
711 // If we already have sufficient space, assign the common elements, then
712 // destroy any excess.
713 size_t RHSSize = RHS.size();
714 size_t CurSize = this->size();
715 if (CurSize >= RHSSize) {
716 // Assign common elements.
717 iterator NewEnd;
718 if (RHSSize)
719 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
720 else
721 NewEnd = this->begin();
722
723 // Destroy excess elements.
724 this->destroy_range(NewEnd, this->end());
725
726 // Trim.
727 this->set_size(RHSSize);
728 return *this;
729 }
730
731 // If we have to grow to have enough elements, destroy the current elements.
732 // This allows us to avoid copying them during the grow.
733 // FIXME: don't do this if they're efficiently moveable.
734 if (this->capacity() < RHSSize) {
735 // Destroy current elements.
736 this->destroy_range(this->begin(), this->end());
737 this->set_size(0);
738 CurSize = 0;
739 this->grow(RHSSize);
740 } else if (CurSize) {
741 // Otherwise, use assignment for the already-constructed elements.
742 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
743 }
744
745 // Copy construct the new elements in place.
746 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
747 this->begin()+CurSize);
748
749 // Set end.
750 this->set_size(RHSSize);
751 return *this;
752}
753
754template <typename T>
755SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
756 // Avoid self-assignment.
757 if (this == &RHS) return *this;
758
759 // If the RHS isn't small, clear this vector and then steal its buffer.
760 if (!RHS.isSmall()) {
761 this->destroy_range(this->begin(), this->end());
762 if (!this->isSmall()) free(this->begin());
763 this->BeginX = RHS.BeginX;
764 this->Size = RHS.Size;
765 this->Capacity = RHS.Capacity;
766 RHS.resetToSmall();
767 return *this;
768 }
769
770 // If we already have sufficient space, assign the common elements, then
771 // destroy any excess.
772 size_t RHSSize = RHS.size();
773 size_t CurSize = this->size();
774 if (CurSize >= RHSSize) {
775 // Assign common elements.
776 iterator NewEnd = this->begin();
777 if (RHSSize)
778 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
779
780 // Destroy excess elements and trim the bounds.
781 this->destroy_range(NewEnd, this->end());
782 this->set_size(RHSSize);
783
784 // Clear the RHS.
785 RHS.clear();
786
787 return *this;
788 }
789
790 // If we have to grow to have enough elements, destroy the current elements.
791 // This allows us to avoid copying them during the grow.
792 // FIXME: this may not actually make any sense if we can efficiently move
793 // elements.
794 if (this->capacity() < RHSSize) {
795 // Destroy current elements.
796 this->destroy_range(this->begin(), this->end());
797 this->set_size(0);
798 CurSize = 0;
799 this->grow(RHSSize);
800 } else if (CurSize) {
801 // Otherwise, use assignment for the already-constructed elements.
802 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
803 }
804
805 // Move-construct the new elements in place.
806 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
807 this->begin()+CurSize);
808
809 // Set end.
810 this->set_size(RHSSize);
811
812 RHS.clear();
813 return *this;
814}
815
816/// Storage for the SmallVector elements. This is specialized for the N=0 case
817/// to avoid allocating unnecessary storage.
818template <typename T, unsigned N>
819struct SmallVectorStorage {
820 AlignedCharArrayUnion<T> InlineElts[N];
821};
822
823/// We need the storage to be properly aligned even for small-size of 0 so that
824/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
825/// well-defined.
826template <typename T> struct alignas(alignof(T)) SmallVectorStorage<T, 0> {};
827
828/// This is a 'vector' (really, a variable-sized array), optimized
829/// for the case when the array is small. It contains some number of elements
830/// in-place, which allows it to avoid heap allocation when the actual number of
831/// elements is below that threshold. This allows normal "small" cases to be
832/// fast without losing generality for large inputs.
833///
834/// Note that this does not attempt to be exception safe.
835///
836template <typename T, unsigned N>
837class SmallVector : public SmallVectorImpl<T>, SmallVectorStorage<T, N> {
838public:
839 SmallVector() : SmallVectorImpl<T>(N) {}
840
841 ~SmallVector() {
842 // Destroy the constructed elements in the vector.
843 this->destroy_range(this->begin(), this->end());
844 }
845
846 explicit SmallVector(size_t Size, const T &Value = T())
847 : SmallVectorImpl<T>(N) {
848 this->assign(Size, Value);
849 }
850
851 template <typename ItTy,
852 typename = typename std::enable_if<std::is_convertible<
853 typename std::iterator_traits<ItTy>::iterator_category,
854 std::input_iterator_tag>::value>::type>
855 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
856 this->append(S, E);
857 }
858
859 template <typename RangeTy>
860 explicit SmallVector(const iterator_range<RangeTy> &R)
861 : SmallVectorImpl<T>(N) {
862 this->append(R.begin(), R.end());
863 }
864
865 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
866 this->assign(IL);
867 }
868
869 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
870 if (!RHS.empty())
871 SmallVectorImpl<T>::operator=(RHS);
872 }
873
874 const SmallVector &operator=(const SmallVector &RHS) {
875 SmallVectorImpl<T>::operator=(RHS);
876 return *this;
877 }
878
879 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
880 if (!RHS.empty())
881 SmallVectorImpl<T>::operator=(::std::move(RHS));
882 }
883
884 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
885 if (!RHS.empty())
886 SmallVectorImpl<T>::operator=(::std::move(RHS));
887 }
888
889 const SmallVector &operator=(SmallVector &&RHS) {
890 SmallVectorImpl<T>::operator=(::std::move(RHS));
891 return *this;
892 }
893
894 const SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
895 SmallVectorImpl<T>::operator=(::std::move(RHS));
896 return *this;
897 }
898
899 const SmallVector &operator=(std::initializer_list<T> IL) {
900 this->assign(IL);
901 return *this;
902 }
903};
904
905template <typename T, unsigned N>
906inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
907 return X.capacity_in_bytes();
908}
909
910} // end namespace llvm
911
912namespace std {
913
914 /// Implement std::swap in terms of SmallVector swap.
915 template<typename T>
916 inline void
917 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
918 LHS.swap(RHS);
919 }
920
921 /// Implement std::swap in terms of SmallVector swap.
922 template<typename T, unsigned N>
923 inline void
924 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
925 LHS.swap(RHS);
926 }
927
928} // end namespace std
929
930#endif // LLVM_ADT_SMALLVECTOR_H