File: | lib/Target/X86/X86ISelLowering.cpp |
Warning: | line 7318, column 1 Potential leak of memory pointed to by 'LoadMask.X' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===// | |||
2 | // | |||
3 | // The LLVM Compiler Infrastructure | |||
4 | // | |||
5 | // This file is distributed under the University of Illinois Open Source | |||
6 | // License. See LICENSE.TXT for details. | |||
7 | // | |||
8 | //===----------------------------------------------------------------------===// | |||
9 | // | |||
10 | // This file defines the interfaces that X86 uses to lower LLVM code into a | |||
11 | // selection DAG. | |||
12 | // | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #include "X86ISelLowering.h" | |||
16 | #include "Utils/X86ShuffleDecode.h" | |||
17 | #include "X86CallingConv.h" | |||
18 | #include "X86FrameLowering.h" | |||
19 | #include "X86InstrBuilder.h" | |||
20 | #include "X86IntrinsicsInfo.h" | |||
21 | #include "X86MachineFunctionInfo.h" | |||
22 | #include "X86TargetMachine.h" | |||
23 | #include "X86TargetObjectFile.h" | |||
24 | #include "llvm/ADT/SmallBitVector.h" | |||
25 | #include "llvm/ADT/SmallSet.h" | |||
26 | #include "llvm/ADT/Statistic.h" | |||
27 | #include "llvm/ADT/StringExtras.h" | |||
28 | #include "llvm/ADT/StringSwitch.h" | |||
29 | #include "llvm/Analysis/EHPersonalities.h" | |||
30 | #include "llvm/CodeGen/IntrinsicLowering.h" | |||
31 | #include "llvm/CodeGen/MachineFrameInfo.h" | |||
32 | #include "llvm/CodeGen/MachineFunction.h" | |||
33 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |||
34 | #include "llvm/CodeGen/MachineJumpTableInfo.h" | |||
35 | #include "llvm/CodeGen/MachineModuleInfo.h" | |||
36 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |||
37 | #include "llvm/CodeGen/TargetLowering.h" | |||
38 | #include "llvm/CodeGen/WinEHFuncInfo.h" | |||
39 | #include "llvm/IR/CallSite.h" | |||
40 | #include "llvm/IR/CallingConv.h" | |||
41 | #include "llvm/IR/Constants.h" | |||
42 | #include "llvm/IR/DerivedTypes.h" | |||
43 | #include "llvm/IR/DiagnosticInfo.h" | |||
44 | #include "llvm/IR/Function.h" | |||
45 | #include "llvm/IR/GlobalAlias.h" | |||
46 | #include "llvm/IR/GlobalVariable.h" | |||
47 | #include "llvm/IR/Instructions.h" | |||
48 | #include "llvm/IR/Intrinsics.h" | |||
49 | #include "llvm/MC/MCAsmInfo.h" | |||
50 | #include "llvm/MC/MCContext.h" | |||
51 | #include "llvm/MC/MCExpr.h" | |||
52 | #include "llvm/MC/MCSymbol.h" | |||
53 | #include "llvm/Support/CommandLine.h" | |||
54 | #include "llvm/Support/Debug.h" | |||
55 | #include "llvm/Support/ErrorHandling.h" | |||
56 | #include "llvm/Support/KnownBits.h" | |||
57 | #include "llvm/Support/MathExtras.h" | |||
58 | #include "llvm/Target/TargetOptions.h" | |||
59 | #include <algorithm> | |||
60 | #include <bitset> | |||
61 | #include <cctype> | |||
62 | #include <numeric> | |||
63 | using namespace llvm; | |||
64 | ||||
65 | #define DEBUG_TYPE"x86-isel" "x86-isel" | |||
66 | ||||
67 | STATISTIC(NumTailCalls, "Number of tail calls")static llvm::Statistic NumTailCalls = {"x86-isel", "NumTailCalls" , "Number of tail calls", {0}, {false}}; | |||
68 | ||||
69 | static cl::opt<bool> ExperimentalVectorWideningLegalization( | |||
70 | "x86-experimental-vector-widening-legalization", cl::init(false), | |||
71 | cl::desc("Enable an experimental vector type legalization through widening " | |||
72 | "rather than promotion."), | |||
73 | cl::Hidden); | |||
74 | ||||
75 | static cl::opt<int> ExperimentalPrefLoopAlignment( | |||
76 | "x86-experimental-pref-loop-alignment", cl::init(4), | |||
77 | cl::desc("Sets the preferable loop alignment for experiments " | |||
78 | "(the last x86-experimental-pref-loop-alignment bits" | |||
79 | " of the loop header PC will be 0)."), | |||
80 | cl::Hidden); | |||
81 | ||||
82 | static cl::opt<bool> MulConstantOptimization( | |||
83 | "mul-constant-optimization", cl::init(true), | |||
84 | cl::desc("Replace 'mul x, Const' with more effective instructions like " | |||
85 | "SHIFT, LEA, etc."), | |||
86 | cl::Hidden); | |||
87 | ||||
88 | /// Call this when the user attempts to do something unsupported, like | |||
89 | /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike | |||
90 | /// report_fatal_error, so calling code should attempt to recover without | |||
91 | /// crashing. | |||
92 | static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl, | |||
93 | const char *Msg) { | |||
94 | MachineFunction &MF = DAG.getMachineFunction(); | |||
95 | DAG.getContext()->diagnose( | |||
96 | DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc())); | |||
97 | } | |||
98 | ||||
99 | X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, | |||
100 | const X86Subtarget &STI) | |||
101 | : TargetLowering(TM), Subtarget(STI) { | |||
102 | bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87(); | |||
103 | X86ScalarSSEf64 = Subtarget.hasSSE2(); | |||
104 | X86ScalarSSEf32 = Subtarget.hasSSE1(); | |||
105 | MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0)); | |||
106 | ||||
107 | // Set up the TargetLowering object. | |||
108 | ||||
109 | // X86 is weird. It always uses i8 for shift amounts and setcc results. | |||
110 | setBooleanContents(ZeroOrOneBooleanContent); | |||
111 | // X86-SSE is even stranger. It uses -1 or 0 for vector masks. | |||
112 | setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); | |||
113 | ||||
114 | // For 64-bit, since we have so many registers, use the ILP scheduler. | |||
115 | // For 32-bit, use the register pressure specific scheduling. | |||
116 | // For Atom, always use ILP scheduling. | |||
117 | if (Subtarget.isAtom()) | |||
118 | setSchedulingPreference(Sched::ILP); | |||
119 | else if (Subtarget.is64Bit()) | |||
120 | setSchedulingPreference(Sched::ILP); | |||
121 | else | |||
122 | setSchedulingPreference(Sched::RegPressure); | |||
123 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
124 | setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister()); | |||
125 | ||||
126 | // Bypass expensive divides and use cheaper ones. | |||
127 | if (TM.getOptLevel() >= CodeGenOpt::Default) { | |||
128 | if (Subtarget.hasSlowDivide32()) | |||
129 | addBypassSlowDiv(32, 8); | |||
130 | if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit()) | |||
131 | addBypassSlowDiv(64, 32); | |||
132 | } | |||
133 | ||||
134 | if (Subtarget.isTargetKnownWindowsMSVC() || | |||
135 | Subtarget.isTargetWindowsItanium()) { | |||
136 | // Setup Windows compiler runtime calls. | |||
137 | setLibcallName(RTLIB::SDIV_I64, "_alldiv"); | |||
138 | setLibcallName(RTLIB::UDIV_I64, "_aulldiv"); | |||
139 | setLibcallName(RTLIB::SREM_I64, "_allrem"); | |||
140 | setLibcallName(RTLIB::UREM_I64, "_aullrem"); | |||
141 | setLibcallName(RTLIB::MUL_I64, "_allmul"); | |||
142 | setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall); | |||
143 | setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall); | |||
144 | setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall); | |||
145 | setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall); | |||
146 | setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall); | |||
147 | } | |||
148 | ||||
149 | if (Subtarget.isTargetDarwin()) { | |||
150 | // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp. | |||
151 | setUseUnderscoreSetJmp(false); | |||
152 | setUseUnderscoreLongJmp(false); | |||
153 | } else if (Subtarget.isTargetWindowsGNU()) { | |||
154 | // MS runtime is weird: it exports _setjmp, but longjmp! | |||
155 | setUseUnderscoreSetJmp(true); | |||
156 | setUseUnderscoreLongJmp(false); | |||
157 | } else { | |||
158 | setUseUnderscoreSetJmp(true); | |||
159 | setUseUnderscoreLongJmp(true); | |||
160 | } | |||
161 | ||||
162 | // Set up the register classes. | |||
163 | addRegisterClass(MVT::i8, &X86::GR8RegClass); | |||
164 | addRegisterClass(MVT::i16, &X86::GR16RegClass); | |||
165 | addRegisterClass(MVT::i32, &X86::GR32RegClass); | |||
166 | if (Subtarget.is64Bit()) | |||
167 | addRegisterClass(MVT::i64, &X86::GR64RegClass); | |||
168 | ||||
169 | for (MVT VT : MVT::integer_valuetypes()) | |||
170 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); | |||
171 | ||||
172 | // We don't accept any truncstore of integer registers. | |||
173 | setTruncStoreAction(MVT::i64, MVT::i32, Expand); | |||
174 | setTruncStoreAction(MVT::i64, MVT::i16, Expand); | |||
175 | setTruncStoreAction(MVT::i64, MVT::i8 , Expand); | |||
176 | setTruncStoreAction(MVT::i32, MVT::i16, Expand); | |||
177 | setTruncStoreAction(MVT::i32, MVT::i8 , Expand); | |||
178 | setTruncStoreAction(MVT::i16, MVT::i8, Expand); | |||
179 | ||||
180 | setTruncStoreAction(MVT::f64, MVT::f32, Expand); | |||
181 | ||||
182 | // SETOEQ and SETUNE require checking two conditions. | |||
183 | setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand); | |||
184 | setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand); | |||
185 | setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand); | |||
186 | setCondCodeAction(ISD::SETUNE, MVT::f32, Expand); | |||
187 | setCondCodeAction(ISD::SETUNE, MVT::f64, Expand); | |||
188 | setCondCodeAction(ISD::SETUNE, MVT::f80, Expand); | |||
189 | ||||
190 | // Integer absolute. | |||
191 | if (Subtarget.hasCMov()) { | |||
192 | setOperationAction(ISD::ABS , MVT::i16 , Custom); | |||
193 | setOperationAction(ISD::ABS , MVT::i32 , Custom); | |||
194 | if (Subtarget.is64Bit()) | |||
195 | setOperationAction(ISD::ABS , MVT::i64 , Custom); | |||
196 | } | |||
197 | ||||
198 | // Funnel shifts. | |||
199 | for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) { | |||
200 | setOperationAction(ShiftOp , MVT::i16 , Custom); | |||
201 | setOperationAction(ShiftOp , MVT::i32 , Custom); | |||
202 | if (Subtarget.is64Bit()) | |||
203 | setOperationAction(ShiftOp , MVT::i64 , Custom); | |||
204 | } | |||
205 | ||||
206 | // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this | |||
207 | // operation. | |||
208 | setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote); | |||
209 | setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote); | |||
210 | setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote); | |||
211 | ||||
212 | if (Subtarget.is64Bit()) { | |||
213 | if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) | |||
214 | // f32/f64 are legal, f80 is custom. | |||
215 | setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); | |||
216 | else | |||
217 | setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote); | |||
218 | setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); | |||
219 | } else if (!Subtarget.useSoftFloat()) { | |||
220 | // We have an algorithm for SSE2->double, and we turn this into a | |||
221 | // 64-bit FILD followed by conditional FADD for other targets. | |||
222 | setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom); | |||
223 | // We have an algorithm for SSE2, and we turn this into a 64-bit | |||
224 | // FILD or VCVTUSI2SS/SD for other targets. | |||
225 | setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom); | |||
226 | } else { | |||
227 | setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand); | |||
228 | } | |||
229 | ||||
230 | // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have | |||
231 | // this operation. | |||
232 | setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote); | |||
233 | setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote); | |||
234 | ||||
235 | if (!Subtarget.useSoftFloat()) { | |||
236 | // SSE has no i16 to fp conversion, only i32. | |||
237 | if (X86ScalarSSEf32) { | |||
238 | setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); | |||
239 | // f32 and f64 cases are Legal, f80 case is not | |||
240 | setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); | |||
241 | } else { | |||
242 | setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom); | |||
243 | setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom); | |||
244 | } | |||
245 | } else { | |||
246 | setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote); | |||
247 | setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Expand); | |||
248 | } | |||
249 | ||||
250 | // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have | |||
251 | // this operation. | |||
252 | setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote); | |||
253 | setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote); | |||
254 | ||||
255 | if (!Subtarget.useSoftFloat()) { | |||
256 | // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64 | |||
257 | // are Legal, f80 is custom lowered. | |||
258 | setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom); | |||
259 | setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom); | |||
260 | ||||
261 | if (X86ScalarSSEf32) { | |||
262 | setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); | |||
263 | // f32 and f64 cases are Legal, f80 case is not | |||
264 | setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); | |||
265 | } else { | |||
266 | setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom); | |||
267 | setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom); | |||
268 | } | |||
269 | } else { | |||
270 | setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote); | |||
271 | setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Expand); | |||
272 | setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Expand); | |||
273 | } | |||
274 | ||||
275 | // Handle FP_TO_UINT by promoting the destination to a larger signed | |||
276 | // conversion. | |||
277 | setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote); | |||
278 | setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote); | |||
279 | setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote); | |||
280 | ||||
281 | if (Subtarget.is64Bit()) { | |||
282 | if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { | |||
283 | // FP_TO_UINT-i32/i64 is legal for f32/f64, but custom for f80. | |||
284 | setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); | |||
285 | setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); | |||
286 | } else { | |||
287 | setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote); | |||
288 | setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand); | |||
289 | } | |||
290 | } else if (!Subtarget.useSoftFloat()) { | |||
291 | // Since AVX is a superset of SSE3, only check for SSE here. | |||
292 | if (Subtarget.hasSSE1() && !Subtarget.hasSSE3()) | |||
293 | // Expand FP_TO_UINT into a select. | |||
294 | // FIXME: We would like to use a Custom expander here eventually to do | |||
295 | // the optimal thing for SSE vs. the default expansion in the legalizer. | |||
296 | setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand); | |||
297 | else | |||
298 | // With AVX512 we can use vcvts[ds]2usi for f32/f64->i32, f80 is custom. | |||
299 | // With SSE3 we can use fisttpll to convert to a signed i64; without | |||
300 | // SSE, we're stuck with a fistpll. | |||
301 | setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom); | |||
302 | ||||
303 | setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom); | |||
304 | } | |||
305 | ||||
306 | // TODO: when we have SSE, these could be more efficient, by using movd/movq. | |||
307 | if (!X86ScalarSSEf64) { | |||
308 | setOperationAction(ISD::BITCAST , MVT::f32 , Expand); | |||
309 | setOperationAction(ISD::BITCAST , MVT::i32 , Expand); | |||
310 | if (Subtarget.is64Bit()) { | |||
311 | setOperationAction(ISD::BITCAST , MVT::f64 , Expand); | |||
312 | // Without SSE, i64->f64 goes through memory. | |||
313 | setOperationAction(ISD::BITCAST , MVT::i64 , Expand); | |||
314 | } | |||
315 | } else if (!Subtarget.is64Bit()) | |||
316 | setOperationAction(ISD::BITCAST , MVT::i64 , Custom); | |||
317 | ||||
318 | // Scalar integer divide and remainder are lowered to use operations that | |||
319 | // produce two results, to match the available instructions. This exposes | |||
320 | // the two-result form to trivial CSE, which is able to combine x/y and x%y | |||
321 | // into a single instruction. | |||
322 | // | |||
323 | // Scalar integer multiply-high is also lowered to use two-result | |||
324 | // operations, to match the available instructions. However, plain multiply | |||
325 | // (low) operations are left as Legal, as there are single-result | |||
326 | // instructions for this in x86. Using the two-result multiply instructions | |||
327 | // when both high and low results are needed must be arranged by dagcombine. | |||
328 | for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { | |||
329 | setOperationAction(ISD::MULHS, VT, Expand); | |||
330 | setOperationAction(ISD::MULHU, VT, Expand); | |||
331 | setOperationAction(ISD::SDIV, VT, Expand); | |||
332 | setOperationAction(ISD::UDIV, VT, Expand); | |||
333 | setOperationAction(ISD::SREM, VT, Expand); | |||
334 | setOperationAction(ISD::UREM, VT, Expand); | |||
335 | } | |||
336 | ||||
337 | setOperationAction(ISD::BR_JT , MVT::Other, Expand); | |||
338 | setOperationAction(ISD::BRCOND , MVT::Other, Custom); | |||
339 | for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128, | |||
340 | MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { | |||
341 | setOperationAction(ISD::BR_CC, VT, Expand); | |||
342 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
343 | } | |||
344 | if (Subtarget.is64Bit()) | |||
345 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); | |||
346 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal); | |||
347 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal); | |||
348 | setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand); | |||
349 | setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand); | |||
350 | ||||
351 | setOperationAction(ISD::FREM , MVT::f32 , Expand); | |||
352 | setOperationAction(ISD::FREM , MVT::f64 , Expand); | |||
353 | setOperationAction(ISD::FREM , MVT::f80 , Expand); | |||
354 | setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom); | |||
355 | ||||
356 | // Promote the i8 variants and force them on up to i32 which has a shorter | |||
357 | // encoding. | |||
358 | setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32); | |||
359 | setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32); | |||
360 | if (!Subtarget.hasBMI()) { | |||
361 | setOperationAction(ISD::CTTZ , MVT::i16 , Custom); | |||
362 | setOperationAction(ISD::CTTZ , MVT::i32 , Custom); | |||
363 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal); | |||
364 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal); | |||
365 | if (Subtarget.is64Bit()) { | |||
366 | setOperationAction(ISD::CTTZ , MVT::i64 , Custom); | |||
367 | setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal); | |||
368 | } | |||
369 | } | |||
370 | ||||
371 | if (Subtarget.hasLZCNT()) { | |||
372 | // When promoting the i8 variants, force them to i32 for a shorter | |||
373 | // encoding. | |||
374 | setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32); | |||
375 | setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32); | |||
376 | } else { | |||
377 | setOperationAction(ISD::CTLZ , MVT::i8 , Custom); | |||
378 | setOperationAction(ISD::CTLZ , MVT::i16 , Custom); | |||
379 | setOperationAction(ISD::CTLZ , MVT::i32 , Custom); | |||
380 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom); | |||
381 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom); | |||
382 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom); | |||
383 | if (Subtarget.is64Bit()) { | |||
384 | setOperationAction(ISD::CTLZ , MVT::i64 , Custom); | |||
385 | setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); | |||
386 | } | |||
387 | } | |||
388 | ||||
389 | // Special handling for half-precision floating point conversions. | |||
390 | // If we don't have F16C support, then lower half float conversions | |||
391 | // into library calls. | |||
392 | if (Subtarget.useSoftFloat() || !Subtarget.hasF16C()) { | |||
393 | setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand); | |||
394 | setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand); | |||
395 | } | |||
396 | ||||
397 | // There's never any support for operations beyond MVT::f32. | |||
398 | setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); | |||
399 | setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand); | |||
400 | setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand); | |||
401 | setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand); | |||
402 | ||||
403 | setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); | |||
404 | setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); | |||
405 | setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand); | |||
406 | setTruncStoreAction(MVT::f32, MVT::f16, Expand); | |||
407 | setTruncStoreAction(MVT::f64, MVT::f16, Expand); | |||
408 | setTruncStoreAction(MVT::f80, MVT::f16, Expand); | |||
409 | ||||
410 | if (Subtarget.hasPOPCNT()) { | |||
411 | setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32); | |||
412 | } else { | |||
413 | setOperationAction(ISD::CTPOP , MVT::i8 , Expand); | |||
414 | setOperationAction(ISD::CTPOP , MVT::i16 , Expand); | |||
415 | setOperationAction(ISD::CTPOP , MVT::i32 , Expand); | |||
416 | if (Subtarget.is64Bit()) | |||
417 | setOperationAction(ISD::CTPOP , MVT::i64 , Expand); | |||
418 | } | |||
419 | ||||
420 | setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom); | |||
421 | ||||
422 | if (!Subtarget.hasMOVBE()) | |||
423 | setOperationAction(ISD::BSWAP , MVT::i16 , Expand); | |||
424 | ||||
425 | // These should be promoted to a larger select which is supported. | |||
426 | setOperationAction(ISD::SELECT , MVT::i1 , Promote); | |||
427 | // X86 wants to expand cmov itself. | |||
428 | for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) { | |||
429 | setOperationAction(ISD::SELECT, VT, Custom); | |||
430 | setOperationAction(ISD::SETCC, VT, Custom); | |||
431 | } | |||
432 | for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { | |||
433 | if (VT == MVT::i64 && !Subtarget.is64Bit()) | |||
434 | continue; | |||
435 | setOperationAction(ISD::SELECT, VT, Custom); | |||
436 | setOperationAction(ISD::SETCC, VT, Custom); | |||
437 | } | |||
438 | ||||
439 | // Custom action for SELECT MMX and expand action for SELECT_CC MMX | |||
440 | setOperationAction(ISD::SELECT, MVT::x86mmx, Custom); | |||
441 | setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand); | |||
442 | ||||
443 | setOperationAction(ISD::EH_RETURN , MVT::Other, Custom); | |||
444 | // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since | |||
445 | // LLVM/Clang supports zero-cost DWARF and SEH exception handling. | |||
446 | setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom); | |||
447 | setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom); | |||
448 | setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom); | |||
449 | if (TM.Options.ExceptionModel == ExceptionHandling::SjLj) | |||
450 | setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume"); | |||
451 | ||||
452 | // Darwin ABI issue. | |||
453 | for (auto VT : { MVT::i32, MVT::i64 }) { | |||
454 | if (VT == MVT::i64 && !Subtarget.is64Bit()) | |||
455 | continue; | |||
456 | setOperationAction(ISD::ConstantPool , VT, Custom); | |||
457 | setOperationAction(ISD::JumpTable , VT, Custom); | |||
458 | setOperationAction(ISD::GlobalAddress , VT, Custom); | |||
459 | setOperationAction(ISD::GlobalTLSAddress, VT, Custom); | |||
460 | setOperationAction(ISD::ExternalSymbol , VT, Custom); | |||
461 | setOperationAction(ISD::BlockAddress , VT, Custom); | |||
462 | } | |||
463 | ||||
464 | // 64-bit shl, sra, srl (iff 32-bit x86) | |||
465 | for (auto VT : { MVT::i32, MVT::i64 }) { | |||
466 | if (VT == MVT::i64 && !Subtarget.is64Bit()) | |||
467 | continue; | |||
468 | setOperationAction(ISD::SHL_PARTS, VT, Custom); | |||
469 | setOperationAction(ISD::SRA_PARTS, VT, Custom); | |||
470 | setOperationAction(ISD::SRL_PARTS, VT, Custom); | |||
471 | } | |||
472 | ||||
473 | if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow()) | |||
474 | setOperationAction(ISD::PREFETCH , MVT::Other, Legal); | |||
475 | ||||
476 | setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom); | |||
477 | ||||
478 | // Expand certain atomics | |||
479 | for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { | |||
480 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom); | |||
481 | setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom); | |||
482 | setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom); | |||
483 | setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom); | |||
484 | setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom); | |||
485 | setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom); | |||
486 | setOperationAction(ISD::ATOMIC_STORE, VT, Custom); | |||
487 | } | |||
488 | ||||
489 | if (Subtarget.hasCmpxchg16b()) { | |||
490 | setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom); | |||
491 | } | |||
492 | ||||
493 | // FIXME - use subtarget debug flags | |||
494 | if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() && | |||
495 | !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() && | |||
496 | TM.Options.ExceptionModel != ExceptionHandling::SjLj) { | |||
497 | setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); | |||
498 | } | |||
499 | ||||
500 | setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom); | |||
501 | setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom); | |||
502 | ||||
503 | setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom); | |||
504 | setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom); | |||
505 | ||||
506 | setOperationAction(ISD::TRAP, MVT::Other, Legal); | |||
507 | setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal); | |||
508 | ||||
509 | // VASTART needs to be custom lowered to use the VarArgsFrameIndex | |||
510 | setOperationAction(ISD::VASTART , MVT::Other, Custom); | |||
511 | setOperationAction(ISD::VAEND , MVT::Other, Expand); | |||
512 | bool Is64Bit = Subtarget.is64Bit(); | |||
513 | setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand); | |||
514 | setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand); | |||
515 | ||||
516 | setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); | |||
517 | setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); | |||
518 | ||||
519 | setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom); | |||
520 | ||||
521 | // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering. | |||
522 | setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom); | |||
523 | setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom); | |||
524 | ||||
525 | if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) { | |||
526 | // f32 and f64 use SSE. | |||
527 | // Set up the FP register classes. | |||
528 | addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass | |||
529 | : &X86::FR32RegClass); | |||
530 | addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass | |||
531 | : &X86::FR64RegClass); | |||
532 | ||||
533 | for (auto VT : { MVT::f32, MVT::f64 }) { | |||
534 | // Use ANDPD to simulate FABS. | |||
535 | setOperationAction(ISD::FABS, VT, Custom); | |||
536 | ||||
537 | // Use XORP to simulate FNEG. | |||
538 | setOperationAction(ISD::FNEG, VT, Custom); | |||
539 | ||||
540 | // Use ANDPD and ORPD to simulate FCOPYSIGN. | |||
541 | setOperationAction(ISD::FCOPYSIGN, VT, Custom); | |||
542 | ||||
543 | // We don't support sin/cos/fmod | |||
544 | setOperationAction(ISD::FSIN , VT, Expand); | |||
545 | setOperationAction(ISD::FCOS , VT, Expand); | |||
546 | setOperationAction(ISD::FSINCOS, VT, Expand); | |||
547 | } | |||
548 | ||||
549 | // Lower this to MOVMSK plus an AND. | |||
550 | setOperationAction(ISD::FGETSIGN, MVT::i64, Custom); | |||
551 | setOperationAction(ISD::FGETSIGN, MVT::i32, Custom); | |||
552 | ||||
553 | } else if (!useSoftFloat() && X86ScalarSSEf32 && (UseX87 || Is64Bit)) { | |||
554 | // Use SSE for f32, x87 for f64. | |||
555 | // Set up the FP register classes. | |||
556 | addRegisterClass(MVT::f32, &X86::FR32RegClass); | |||
557 | if (UseX87) | |||
558 | addRegisterClass(MVT::f64, &X86::RFP64RegClass); | |||
559 | ||||
560 | // Use ANDPS to simulate FABS. | |||
561 | setOperationAction(ISD::FABS , MVT::f32, Custom); | |||
562 | ||||
563 | // Use XORP to simulate FNEG. | |||
564 | setOperationAction(ISD::FNEG , MVT::f32, Custom); | |||
565 | ||||
566 | if (UseX87) | |||
567 | setOperationAction(ISD::UNDEF, MVT::f64, Expand); | |||
568 | ||||
569 | // Use ANDPS and ORPS to simulate FCOPYSIGN. | |||
570 | if (UseX87) | |||
571 | setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); | |||
572 | setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); | |||
573 | ||||
574 | // We don't support sin/cos/fmod | |||
575 | setOperationAction(ISD::FSIN , MVT::f32, Expand); | |||
576 | setOperationAction(ISD::FCOS , MVT::f32, Expand); | |||
577 | setOperationAction(ISD::FSINCOS, MVT::f32, Expand); | |||
578 | ||||
579 | if (UseX87) { | |||
580 | // Always expand sin/cos functions even though x87 has an instruction. | |||
581 | setOperationAction(ISD::FSIN, MVT::f64, Expand); | |||
582 | setOperationAction(ISD::FCOS, MVT::f64, Expand); | |||
583 | setOperationAction(ISD::FSINCOS, MVT::f64, Expand); | |||
584 | } | |||
585 | } else if (UseX87) { | |||
586 | // f32 and f64 in x87. | |||
587 | // Set up the FP register classes. | |||
588 | addRegisterClass(MVT::f64, &X86::RFP64RegClass); | |||
589 | addRegisterClass(MVT::f32, &X86::RFP32RegClass); | |||
590 | ||||
591 | for (auto VT : { MVT::f32, MVT::f64 }) { | |||
592 | setOperationAction(ISD::UNDEF, VT, Expand); | |||
593 | setOperationAction(ISD::FCOPYSIGN, VT, Expand); | |||
594 | ||||
595 | // Always expand sin/cos functions even though x87 has an instruction. | |||
596 | setOperationAction(ISD::FSIN , VT, Expand); | |||
597 | setOperationAction(ISD::FCOS , VT, Expand); | |||
598 | setOperationAction(ISD::FSINCOS, VT, Expand); | |||
599 | } | |||
600 | } | |||
601 | ||||
602 | // Expand FP32 immediates into loads from the stack, save special cases. | |||
603 | if (isTypeLegal(MVT::f32)) { | |||
604 | if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) { | |||
605 | addLegalFPImmediate(APFloat(+0.0f)); // FLD0 | |||
606 | addLegalFPImmediate(APFloat(+1.0f)); // FLD1 | |||
607 | addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS | |||
608 | addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS | |||
609 | } else // SSE immediates. | |||
610 | addLegalFPImmediate(APFloat(+0.0f)); // xorps | |||
611 | } | |||
612 | // Expand FP64 immediates into loads from the stack, save special cases. | |||
613 | if (isTypeLegal(MVT::f64)) { | |||
614 | if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) { | |||
615 | addLegalFPImmediate(APFloat(+0.0)); // FLD0 | |||
616 | addLegalFPImmediate(APFloat(+1.0)); // FLD1 | |||
617 | addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS | |||
618 | addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS | |||
619 | } else // SSE immediates. | |||
620 | addLegalFPImmediate(APFloat(+0.0)); // xorpd | |||
621 | } | |||
622 | ||||
623 | // We don't support FMA. | |||
624 | setOperationAction(ISD::FMA, MVT::f64, Expand); | |||
625 | setOperationAction(ISD::FMA, MVT::f32, Expand); | |||
626 | ||||
627 | // Long double always uses X87, except f128 in MMX. | |||
628 | if (UseX87) { | |||
629 | if (Subtarget.is64Bit() && Subtarget.hasMMX()) { | |||
630 | addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass | |||
631 | : &X86::VR128RegClass); | |||
632 | ValueTypeActions.setTypeAction(MVT::f128, TypeSoftenFloat); | |||
633 | setOperationAction(ISD::FABS , MVT::f128, Custom); | |||
634 | setOperationAction(ISD::FNEG , MVT::f128, Custom); | |||
635 | setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom); | |||
636 | } | |||
637 | ||||
638 | addRegisterClass(MVT::f80, &X86::RFP80RegClass); | |||
639 | setOperationAction(ISD::UNDEF, MVT::f80, Expand); | |||
640 | setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand); | |||
641 | { | |||
642 | APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended()); | |||
643 | addLegalFPImmediate(TmpFlt); // FLD0 | |||
644 | TmpFlt.changeSign(); | |||
645 | addLegalFPImmediate(TmpFlt); // FLD0/FCHS | |||
646 | ||||
647 | bool ignored; | |||
648 | APFloat TmpFlt2(+1.0); | |||
649 | TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven, | |||
650 | &ignored); | |||
651 | addLegalFPImmediate(TmpFlt2); // FLD1 | |||
652 | TmpFlt2.changeSign(); | |||
653 | addLegalFPImmediate(TmpFlt2); // FLD1/FCHS | |||
654 | } | |||
655 | ||||
656 | // Always expand sin/cos functions even though x87 has an instruction. | |||
657 | setOperationAction(ISD::FSIN , MVT::f80, Expand); | |||
658 | setOperationAction(ISD::FCOS , MVT::f80, Expand); | |||
659 | setOperationAction(ISD::FSINCOS, MVT::f80, Expand); | |||
660 | ||||
661 | setOperationAction(ISD::FFLOOR, MVT::f80, Expand); | |||
662 | setOperationAction(ISD::FCEIL, MVT::f80, Expand); | |||
663 | setOperationAction(ISD::FTRUNC, MVT::f80, Expand); | |||
664 | setOperationAction(ISD::FRINT, MVT::f80, Expand); | |||
665 | setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand); | |||
666 | setOperationAction(ISD::FMA, MVT::f80, Expand); | |||
667 | } | |||
668 | ||||
669 | // Always use a library call for pow. | |||
670 | setOperationAction(ISD::FPOW , MVT::f32 , Expand); | |||
671 | setOperationAction(ISD::FPOW , MVT::f64 , Expand); | |||
672 | setOperationAction(ISD::FPOW , MVT::f80 , Expand); | |||
673 | ||||
674 | setOperationAction(ISD::FLOG, MVT::f80, Expand); | |||
675 | setOperationAction(ISD::FLOG2, MVT::f80, Expand); | |||
676 | setOperationAction(ISD::FLOG10, MVT::f80, Expand); | |||
677 | setOperationAction(ISD::FEXP, MVT::f80, Expand); | |||
678 | setOperationAction(ISD::FEXP2, MVT::f80, Expand); | |||
679 | setOperationAction(ISD::FMINNUM, MVT::f80, Expand); | |||
680 | setOperationAction(ISD::FMAXNUM, MVT::f80, Expand); | |||
681 | ||||
682 | // Some FP actions are always expanded for vector types. | |||
683 | for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32, | |||
684 | MVT::v2f64, MVT::v4f64, MVT::v8f64 }) { | |||
685 | setOperationAction(ISD::FSIN, VT, Expand); | |||
686 | setOperationAction(ISD::FSINCOS, VT, Expand); | |||
687 | setOperationAction(ISD::FCOS, VT, Expand); | |||
688 | setOperationAction(ISD::FREM, VT, Expand); | |||
689 | setOperationAction(ISD::FCOPYSIGN, VT, Expand); | |||
690 | setOperationAction(ISD::FPOW, VT, Expand); | |||
691 | setOperationAction(ISD::FLOG, VT, Expand); | |||
692 | setOperationAction(ISD::FLOG2, VT, Expand); | |||
693 | setOperationAction(ISD::FLOG10, VT, Expand); | |||
694 | setOperationAction(ISD::FEXP, VT, Expand); | |||
695 | setOperationAction(ISD::FEXP2, VT, Expand); | |||
696 | } | |||
697 | ||||
698 | // First set operation action for all vector types to either promote | |||
699 | // (for widening) or expand (for scalarization). Then we will selectively | |||
700 | // turn on ones that can be effectively codegen'd. | |||
701 | for (MVT VT : MVT::vector_valuetypes()) { | |||
702 | setOperationAction(ISD::SDIV, VT, Expand); | |||
703 | setOperationAction(ISD::UDIV, VT, Expand); | |||
704 | setOperationAction(ISD::SREM, VT, Expand); | |||
705 | setOperationAction(ISD::UREM, VT, Expand); | |||
706 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand); | |||
707 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand); | |||
708 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand); | |||
709 | setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand); | |||
710 | setOperationAction(ISD::FMA, VT, Expand); | |||
711 | setOperationAction(ISD::FFLOOR, VT, Expand); | |||
712 | setOperationAction(ISD::FCEIL, VT, Expand); | |||
713 | setOperationAction(ISD::FTRUNC, VT, Expand); | |||
714 | setOperationAction(ISD::FRINT, VT, Expand); | |||
715 | setOperationAction(ISD::FNEARBYINT, VT, Expand); | |||
716 | setOperationAction(ISD::SMUL_LOHI, VT, Expand); | |||
717 | setOperationAction(ISD::MULHS, VT, Expand); | |||
718 | setOperationAction(ISD::UMUL_LOHI, VT, Expand); | |||
719 | setOperationAction(ISD::MULHU, VT, Expand); | |||
720 | setOperationAction(ISD::SDIVREM, VT, Expand); | |||
721 | setOperationAction(ISD::UDIVREM, VT, Expand); | |||
722 | setOperationAction(ISD::CTPOP, VT, Expand); | |||
723 | setOperationAction(ISD::CTTZ, VT, Expand); | |||
724 | setOperationAction(ISD::CTLZ, VT, Expand); | |||
725 | setOperationAction(ISD::ROTL, VT, Expand); | |||
726 | setOperationAction(ISD::ROTR, VT, Expand); | |||
727 | setOperationAction(ISD::BSWAP, VT, Expand); | |||
728 | setOperationAction(ISD::SETCC, VT, Expand); | |||
729 | setOperationAction(ISD::FP_TO_UINT, VT, Expand); | |||
730 | setOperationAction(ISD::FP_TO_SINT, VT, Expand); | |||
731 | setOperationAction(ISD::UINT_TO_FP, VT, Expand); | |||
732 | setOperationAction(ISD::SINT_TO_FP, VT, Expand); | |||
733 | setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand); | |||
734 | setOperationAction(ISD::TRUNCATE, VT, Expand); | |||
735 | setOperationAction(ISD::SIGN_EXTEND, VT, Expand); | |||
736 | setOperationAction(ISD::ZERO_EXTEND, VT, Expand); | |||
737 | setOperationAction(ISD::ANY_EXTEND, VT, Expand); | |||
738 | setOperationAction(ISD::SELECT_CC, VT, Expand); | |||
739 | for (MVT InnerVT : MVT::vector_valuetypes()) { | |||
740 | setTruncStoreAction(InnerVT, VT, Expand); | |||
741 | ||||
742 | setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand); | |||
743 | setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand); | |||
744 | ||||
745 | // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like | |||
746 | // types, we have to deal with them whether we ask for Expansion or not. | |||
747 | // Setting Expand causes its own optimisation problems though, so leave | |||
748 | // them legal. | |||
749 | if (VT.getVectorElementType() == MVT::i1) | |||
750 | setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); | |||
751 | ||||
752 | // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are | |||
753 | // split/scalarized right now. | |||
754 | if (VT.getVectorElementType() == MVT::f16) | |||
755 | setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); | |||
756 | } | |||
757 | } | |||
758 | ||||
759 | // FIXME: In order to prevent SSE instructions being expanded to MMX ones | |||
760 | // with -msoft-float, disable use of MMX as well. | |||
761 | if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) { | |||
762 | addRegisterClass(MVT::x86mmx, &X86::VR64RegClass); | |||
763 | // No operations on x86mmx supported, everything uses intrinsics. | |||
764 | } | |||
765 | ||||
766 | if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) { | |||
767 | addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass | |||
768 | : &X86::VR128RegClass); | |||
769 | ||||
770 | setOperationAction(ISD::FNEG, MVT::v4f32, Custom); | |||
771 | setOperationAction(ISD::FABS, MVT::v4f32, Custom); | |||
772 | setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom); | |||
773 | setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom); | |||
774 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom); | |||
775 | setOperationAction(ISD::VSELECT, MVT::v4f32, Custom); | |||
776 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom); | |||
777 | setOperationAction(ISD::SELECT, MVT::v4f32, Custom); | |||
778 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom); | |||
779 | } | |||
780 | ||||
781 | if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { | |||
782 | addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass | |||
783 | : &X86::VR128RegClass); | |||
784 | ||||
785 | // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM | |||
786 | // registers cannot be used even for integer operations. | |||
787 | addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass | |||
788 | : &X86::VR128RegClass); | |||
789 | addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass | |||
790 | : &X86::VR128RegClass); | |||
791 | addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass | |||
792 | : &X86::VR128RegClass); | |||
793 | addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass | |||
794 | : &X86::VR128RegClass); | |||
795 | ||||
796 | for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8, | |||
797 | MVT::v2i16, MVT::v4i16, MVT::v2i32 }) { | |||
798 | setOperationAction(ISD::SDIV, VT, Custom); | |||
799 | setOperationAction(ISD::SREM, VT, Custom); | |||
800 | setOperationAction(ISD::UDIV, VT, Custom); | |||
801 | setOperationAction(ISD::UREM, VT, Custom); | |||
802 | } | |||
803 | ||||
804 | setOperationAction(ISD::MUL, MVT::v2i8, Custom); | |||
805 | setOperationAction(ISD::MUL, MVT::v2i16, Custom); | |||
806 | setOperationAction(ISD::MUL, MVT::v2i32, Custom); | |||
807 | setOperationAction(ISD::MUL, MVT::v4i8, Custom); | |||
808 | setOperationAction(ISD::MUL, MVT::v4i16, Custom); | |||
809 | setOperationAction(ISD::MUL, MVT::v8i8, Custom); | |||
810 | ||||
811 | setOperationAction(ISD::MUL, MVT::v16i8, Custom); | |||
812 | setOperationAction(ISD::MUL, MVT::v4i32, Custom); | |||
813 | setOperationAction(ISD::MUL, MVT::v2i64, Custom); | |||
814 | setOperationAction(ISD::MULHU, MVT::v4i32, Custom); | |||
815 | setOperationAction(ISD::MULHS, MVT::v4i32, Custom); | |||
816 | setOperationAction(ISD::MULHU, MVT::v16i8, Custom); | |||
817 | setOperationAction(ISD::MULHS, MVT::v16i8, Custom); | |||
818 | setOperationAction(ISD::MULHU, MVT::v8i16, Legal); | |||
819 | setOperationAction(ISD::MULHS, MVT::v8i16, Legal); | |||
820 | setOperationAction(ISD::MUL, MVT::v8i16, Legal); | |||
821 | setOperationAction(ISD::FNEG, MVT::v2f64, Custom); | |||
822 | setOperationAction(ISD::FABS, MVT::v2f64, Custom); | |||
823 | setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom); | |||
824 | ||||
825 | for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { | |||
826 | setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom); | |||
827 | setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom); | |||
828 | setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom); | |||
829 | setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom); | |||
830 | } | |||
831 | ||||
832 | setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal); | |||
833 | setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal); | |||
834 | setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal); | |||
835 | setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal); | |||
836 | setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal); | |||
837 | setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal); | |||
838 | setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal); | |||
839 | setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal); | |||
840 | ||||
841 | if (!ExperimentalVectorWideningLegalization) { | |||
842 | // Use widening instead of promotion. | |||
843 | for (auto VT : { MVT::v8i8, MVT::v4i8, MVT::v2i8, | |||
844 | MVT::v4i16, MVT::v2i16 }) { | |||
845 | setOperationAction(ISD::UADDSAT, VT, Custom); | |||
846 | setOperationAction(ISD::SADDSAT, VT, Custom); | |||
847 | setOperationAction(ISD::USUBSAT, VT, Custom); | |||
848 | setOperationAction(ISD::SSUBSAT, VT, Custom); | |||
849 | } | |||
850 | } | |||
851 | ||||
852 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom); | |||
853 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom); | |||
854 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom); | |||
855 | ||||
856 | // Provide custom widening for v2f32 setcc. This is really for VLX when | |||
857 | // setcc result type returns v2i1/v4i1 vector for v2f32/v4f32 leading to | |||
858 | // type legalization changing the result type to v4i1 during widening. | |||
859 | // It works fine for SSE2 and is probably faster so no need to qualify with | |||
860 | // VLX support. | |||
861 | setOperationAction(ISD::SETCC, MVT::v2i32, Custom); | |||
862 | ||||
863 | for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { | |||
864 | setOperationAction(ISD::SETCC, VT, Custom); | |||
865 | setOperationAction(ISD::CTPOP, VT, Custom); | |||
866 | ||||
867 | // The condition codes aren't legal in SSE/AVX and under AVX512 we use | |||
868 | // setcc all the way to isel and prefer SETGT in some isel patterns. | |||
869 | setCondCodeAction(ISD::SETLT, VT, Custom); | |||
870 | setCondCodeAction(ISD::SETLE, VT, Custom); | |||
871 | } | |||
872 | ||||
873 | for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) { | |||
874 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); | |||
875 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
876 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
877 | setOperationAction(ISD::VSELECT, VT, Custom); | |||
878 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
879 | } | |||
880 | ||||
881 | // We support custom legalizing of sext and anyext loads for specific | |||
882 | // memory vector types which we can load as a scalar (or sequence of | |||
883 | // scalars) and extend in-register to a legal 128-bit vector type. For sext | |||
884 | // loads these must work with a single scalar load. | |||
885 | for (MVT VT : MVT::integer_vector_valuetypes()) { | |||
886 | if (!ExperimentalVectorWideningLegalization) { | |||
887 | // We don't want narrow result types here when widening. | |||
888 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom); | |||
889 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom); | |||
890 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom); | |||
891 | } | |||
892 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom); | |||
893 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom); | |||
894 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom); | |||
895 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom); | |||
896 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom); | |||
897 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom); | |||
898 | } | |||
899 | ||||
900 | if (ExperimentalVectorWideningLegalization && | |||
901 | !Subtarget.hasSSE41() && Subtarget.is64Bit()) { | |||
902 | // This lets DAG combine create sextloads that get split and scalarized. | |||
903 | // TODO: Does this make sense? What about v2i8->v2i64? | |||
904 | setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Custom); | |||
905 | setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i8, Custom); | |||
906 | } | |||
907 | ||||
908 | for (auto VT : { MVT::v2f64, MVT::v2i64 }) { | |||
909 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
910 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
911 | setOperationAction(ISD::VSELECT, VT, Custom); | |||
912 | ||||
913 | if (VT == MVT::v2i64 && !Subtarget.is64Bit()) | |||
914 | continue; | |||
915 | ||||
916 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
917 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
918 | } | |||
919 | ||||
920 | // Custom lower v2i64 and v2f64 selects. | |||
921 | setOperationAction(ISD::SELECT, MVT::v2f64, Custom); | |||
922 | setOperationAction(ISD::SELECT, MVT::v2i64, Custom); | |||
923 | setOperationAction(ISD::SELECT, MVT::v4i32, Custom); | |||
924 | setOperationAction(ISD::SELECT, MVT::v8i16, Custom); | |||
925 | setOperationAction(ISD::SELECT, MVT::v16i8, Custom); | |||
926 | ||||
927 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); | |||
928 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom); | |||
929 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom); | |||
930 | ||||
931 | // Custom legalize these to avoid over promotion or custom promotion. | |||
932 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i8, Custom); | |||
933 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i8, Custom); | |||
934 | setOperationAction(ISD::FP_TO_SINT, MVT::v8i8, Custom); | |||
935 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i16, Custom); | |||
936 | setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom); | |||
937 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i8, Custom); | |||
938 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i8, Custom); | |||
939 | setOperationAction(ISD::FP_TO_UINT, MVT::v8i8, Custom); | |||
940 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i16, Custom); | |||
941 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom); | |||
942 | ||||
943 | // By marking FP_TO_SINT v8i16 as Custom, will trick type legalization into | |||
944 | // promoting v8i8 FP_TO_UINT into FP_TO_SINT. When the v8i16 FP_TO_SINT is | |||
945 | // split again based on the input type, this will cause an AssertSExt i16 to | |||
946 | // be emitted instead of an AssertZExt. This will allow packssdw followed by | |||
947 | // packuswb to be used to truncate to v8i8. This is necessary since packusdw | |||
948 | // isn't available until sse4.1. | |||
949 | setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom); | |||
950 | ||||
951 | setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal); | |||
952 | setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); | |||
953 | ||||
954 | setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); | |||
955 | ||||
956 | // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion. | |||
957 | setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom); | |||
958 | ||||
959 | setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom); | |||
960 | setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom); | |||
961 | ||||
962 | for (MVT VT : MVT::fp_vector_valuetypes()) | |||
963 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal); | |||
964 | ||||
965 | // We want to legalize this to an f64 load rather than an i64 load on | |||
966 | // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for | |||
967 | // store. | |||
968 | setOperationAction(ISD::LOAD, MVT::v2f32, Custom); | |||
969 | setOperationAction(ISD::LOAD, MVT::v2i32, Custom); | |||
970 | setOperationAction(ISD::LOAD, MVT::v4i16, Custom); | |||
971 | setOperationAction(ISD::LOAD, MVT::v8i8, Custom); | |||
972 | setOperationAction(ISD::STORE, MVT::v2f32, Custom); | |||
973 | setOperationAction(ISD::STORE, MVT::v2i32, Custom); | |||
974 | setOperationAction(ISD::STORE, MVT::v4i16, Custom); | |||
975 | setOperationAction(ISD::STORE, MVT::v8i8, Custom); | |||
976 | ||||
977 | setOperationAction(ISD::BITCAST, MVT::v2i32, Custom); | |||
978 | setOperationAction(ISD::BITCAST, MVT::v4i16, Custom); | |||
979 | setOperationAction(ISD::BITCAST, MVT::v8i8, Custom); | |||
980 | if (!Subtarget.hasAVX512()) | |||
981 | setOperationAction(ISD::BITCAST, MVT::v16i1, Custom); | |||
982 | ||||
983 | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom); | |||
984 | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom); | |||
985 | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom); | |||
986 | ||||
987 | if (ExperimentalVectorWideningLegalization) { | |||
988 | setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom); | |||
989 | ||||
990 | setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom); | |||
991 | setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom); | |||
992 | setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom); | |||
993 | setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom); | |||
994 | setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom); | |||
995 | setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom); | |||
996 | } | |||
997 | ||||
998 | // In the customized shift lowering, the legal v4i32/v2i64 cases | |||
999 | // in AVX2 will be recognized. | |||
1000 | for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { | |||
1001 | setOperationAction(ISD::SRL, VT, Custom); | |||
1002 | setOperationAction(ISD::SHL, VT, Custom); | |||
1003 | setOperationAction(ISD::SRA, VT, Custom); | |||
1004 | } | |||
1005 | ||||
1006 | setOperationAction(ISD::ROTL, MVT::v4i32, Custom); | |||
1007 | setOperationAction(ISD::ROTL, MVT::v8i16, Custom); | |||
1008 | ||||
1009 | // With AVX512, expanding (and promoting the shifts) is better. | |||
1010 | if (!Subtarget.hasAVX512()) | |||
1011 | setOperationAction(ISD::ROTL, MVT::v16i8, Custom); | |||
1012 | } | |||
1013 | ||||
1014 | if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) { | |||
1015 | setOperationAction(ISD::ABS, MVT::v16i8, Legal); | |||
1016 | setOperationAction(ISD::ABS, MVT::v8i16, Legal); | |||
1017 | setOperationAction(ISD::ABS, MVT::v4i32, Legal); | |||
1018 | setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom); | |||
1019 | setOperationAction(ISD::CTLZ, MVT::v16i8, Custom); | |||
1020 | setOperationAction(ISD::CTLZ, MVT::v8i16, Custom); | |||
1021 | setOperationAction(ISD::CTLZ, MVT::v4i32, Custom); | |||
1022 | setOperationAction(ISD::CTLZ, MVT::v2i64, Custom); | |||
1023 | } | |||
1024 | ||||
1025 | if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) { | |||
1026 | for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) { | |||
1027 | setOperationAction(ISD::FFLOOR, RoundedTy, Legal); | |||
1028 | setOperationAction(ISD::FCEIL, RoundedTy, Legal); | |||
1029 | setOperationAction(ISD::FTRUNC, RoundedTy, Legal); | |||
1030 | setOperationAction(ISD::FRINT, RoundedTy, Legal); | |||
1031 | setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal); | |||
1032 | } | |||
1033 | ||||
1034 | setOperationAction(ISD::SMAX, MVT::v16i8, Legal); | |||
1035 | setOperationAction(ISD::SMAX, MVT::v4i32, Legal); | |||
1036 | setOperationAction(ISD::UMAX, MVT::v8i16, Legal); | |||
1037 | setOperationAction(ISD::UMAX, MVT::v4i32, Legal); | |||
1038 | setOperationAction(ISD::SMIN, MVT::v16i8, Legal); | |||
1039 | setOperationAction(ISD::SMIN, MVT::v4i32, Legal); | |||
1040 | setOperationAction(ISD::UMIN, MVT::v8i16, Legal); | |||
1041 | setOperationAction(ISD::UMIN, MVT::v4i32, Legal); | |||
1042 | ||||
1043 | // FIXME: Do we need to handle scalar-to-vector here? | |||
1044 | setOperationAction(ISD::MUL, MVT::v4i32, Legal); | |||
1045 | ||||
1046 | // We directly match byte blends in the backend as they match the VSELECT | |||
1047 | // condition form. | |||
1048 | setOperationAction(ISD::VSELECT, MVT::v16i8, Legal); | |||
1049 | ||||
1050 | // SSE41 brings specific instructions for doing vector sign extend even in | |||
1051 | // cases where we don't have SRA. | |||
1052 | for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { | |||
1053 | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal); | |||
1054 | setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal); | |||
1055 | } | |||
1056 | ||||
1057 | if (!ExperimentalVectorWideningLegalization) { | |||
1058 | // Avoid narrow result types when widening. The legal types are listed | |||
1059 | // in the next loop. | |||
1060 | for (MVT VT : MVT::integer_vector_valuetypes()) { | |||
1061 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom); | |||
1062 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom); | |||
1063 | setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom); | |||
1064 | } | |||
1065 | } | |||
1066 | ||||
1067 | // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X | |||
1068 | for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) { | |||
1069 | setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal); | |||
1070 | setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal); | |||
1071 | if (!ExperimentalVectorWideningLegalization) | |||
1072 | setLoadExtAction(LoadExtOp, MVT::v2i32, MVT::v2i8, Legal); | |||
1073 | setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal); | |||
1074 | setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal); | |||
1075 | setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal); | |||
1076 | setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal); | |||
1077 | } | |||
1078 | ||||
1079 | // i8 vectors are custom because the source register and source | |||
1080 | // source memory operand types are not the same width. | |||
1081 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom); | |||
1082 | } | |||
1083 | ||||
1084 | if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) { | |||
1085 | for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, | |||
1086 | MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) | |||
1087 | setOperationAction(ISD::ROTL, VT, Custom); | |||
1088 | ||||
1089 | // XOP can efficiently perform BITREVERSE with VPPERM. | |||
1090 | for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) | |||
1091 | setOperationAction(ISD::BITREVERSE, VT, Custom); | |||
1092 | ||||
1093 | for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, | |||
1094 | MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) | |||
1095 | setOperationAction(ISD::BITREVERSE, VT, Custom); | |||
1096 | } | |||
1097 | ||||
1098 | if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) { | |||
1099 | bool HasInt256 = Subtarget.hasInt256(); | |||
1100 | ||||
1101 | addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass | |||
1102 | : &X86::VR256RegClass); | |||
1103 | addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass | |||
1104 | : &X86::VR256RegClass); | |||
1105 | addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass | |||
1106 | : &X86::VR256RegClass); | |||
1107 | addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass | |||
1108 | : &X86::VR256RegClass); | |||
1109 | addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass | |||
1110 | : &X86::VR256RegClass); | |||
1111 | addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass | |||
1112 | : &X86::VR256RegClass); | |||
1113 | ||||
1114 | for (auto VT : { MVT::v8f32, MVT::v4f64 }) { | |||
1115 | setOperationAction(ISD::FFLOOR, VT, Legal); | |||
1116 | setOperationAction(ISD::FCEIL, VT, Legal); | |||
1117 | setOperationAction(ISD::FTRUNC, VT, Legal); | |||
1118 | setOperationAction(ISD::FRINT, VT, Legal); | |||
1119 | setOperationAction(ISD::FNEARBYINT, VT, Legal); | |||
1120 | setOperationAction(ISD::FNEG, VT, Custom); | |||
1121 | setOperationAction(ISD::FABS, VT, Custom); | |||
1122 | setOperationAction(ISD::FCOPYSIGN, VT, Custom); | |||
1123 | } | |||
1124 | ||||
1125 | // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted | |||
1126 | // even though v8i16 is a legal type. | |||
1127 | setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32); | |||
1128 | setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32); | |||
1129 | setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal); | |||
1130 | ||||
1131 | setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal); | |||
1132 | setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal); | |||
1133 | ||||
1134 | if (!Subtarget.hasAVX512()) | |||
1135 | setOperationAction(ISD::BITCAST, MVT::v32i1, Custom); | |||
1136 | ||||
1137 | for (MVT VT : MVT::fp_vector_valuetypes()) | |||
1138 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal); | |||
1139 | ||||
1140 | // In the customized shift lowering, the legal v8i32/v4i64 cases | |||
1141 | // in AVX2 will be recognized. | |||
1142 | for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { | |||
1143 | setOperationAction(ISD::SRL, VT, Custom); | |||
1144 | setOperationAction(ISD::SHL, VT, Custom); | |||
1145 | setOperationAction(ISD::SRA, VT, Custom); | |||
1146 | } | |||
1147 | ||||
1148 | if (ExperimentalVectorWideningLegalization) { | |||
1149 | // These types need custom splitting if their input is a 128-bit vector. | |||
1150 | setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); | |||
1151 | setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); | |||
1152 | setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); | |||
1153 | setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); | |||
1154 | } | |||
1155 | ||||
1156 | setOperationAction(ISD::ROTL, MVT::v8i32, Custom); | |||
1157 | setOperationAction(ISD::ROTL, MVT::v16i16, Custom); | |||
1158 | ||||
1159 | // With BWI, expanding (and promoting the shifts) is the better. | |||
1160 | if (!Subtarget.hasBWI()) | |||
1161 | setOperationAction(ISD::ROTL, MVT::v32i8, Custom); | |||
1162 | ||||
1163 | setOperationAction(ISD::SELECT, MVT::v4f64, Custom); | |||
1164 | setOperationAction(ISD::SELECT, MVT::v4i64, Custom); | |||
1165 | setOperationAction(ISD::SELECT, MVT::v8i32, Custom); | |||
1166 | setOperationAction(ISD::SELECT, MVT::v16i16, Custom); | |||
1167 | setOperationAction(ISD::SELECT, MVT::v32i8, Custom); | |||
1168 | setOperationAction(ISD::SELECT, MVT::v8f32, Custom); | |||
1169 | ||||
1170 | for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { | |||
1171 | setOperationAction(ISD::SIGN_EXTEND, VT, Custom); | |||
1172 | setOperationAction(ISD::ZERO_EXTEND, VT, Custom); | |||
1173 | setOperationAction(ISD::ANY_EXTEND, VT, Custom); | |||
1174 | } | |||
1175 | ||||
1176 | setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom); | |||
1177 | setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom); | |||
1178 | setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom); | |||
1179 | setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom); | |||
1180 | ||||
1181 | for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { | |||
1182 | setOperationAction(ISD::SETCC, VT, Custom); | |||
1183 | setOperationAction(ISD::CTPOP, VT, Custom); | |||
1184 | setOperationAction(ISD::CTLZ, VT, Custom); | |||
1185 | ||||
1186 | // TODO - remove this once 256-bit X86ISD::ANDNP correctly split. | |||
1187 | setOperationAction(ISD::CTTZ, VT, HasInt256 ? Expand : Custom); | |||
1188 | ||||
1189 | // The condition codes aren't legal in SSE/AVX and under AVX512 we use | |||
1190 | // setcc all the way to isel and prefer SETGT in some isel patterns. | |||
1191 | setCondCodeAction(ISD::SETLT, VT, Custom); | |||
1192 | setCondCodeAction(ISD::SETLE, VT, Custom); | |||
1193 | } | |||
1194 | ||||
1195 | if (Subtarget.hasAnyFMA()) { | |||
1196 | for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32, | |||
1197 | MVT::v2f64, MVT::v4f64 }) | |||
1198 | setOperationAction(ISD::FMA, VT, Legal); | |||
1199 | } | |||
1200 | ||||
1201 | for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { | |||
1202 | setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom); | |||
1203 | setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom); | |||
1204 | } | |||
1205 | ||||
1206 | setOperationAction(ISD::MUL, MVT::v4i64, Custom); | |||
1207 | setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom); | |||
1208 | setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom); | |||
1209 | setOperationAction(ISD::MUL, MVT::v32i8, Custom); | |||
1210 | ||||
1211 | setOperationAction(ISD::MULHU, MVT::v8i32, Custom); | |||
1212 | setOperationAction(ISD::MULHS, MVT::v8i32, Custom); | |||
1213 | setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom); | |||
1214 | setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom); | |||
1215 | setOperationAction(ISD::MULHU, MVT::v32i8, Custom); | |||
1216 | setOperationAction(ISD::MULHS, MVT::v32i8, Custom); | |||
1217 | ||||
1218 | setOperationAction(ISD::SMAX, MVT::v4i64, Custom); | |||
1219 | setOperationAction(ISD::UMAX, MVT::v4i64, Custom); | |||
1220 | setOperationAction(ISD::SMIN, MVT::v4i64, Custom); | |||
1221 | setOperationAction(ISD::UMIN, MVT::v4i64, Custom); | |||
1222 | ||||
1223 | setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom); | |||
1224 | setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom); | |||
1225 | setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom); | |||
1226 | setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom); | |||
1227 | setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom); | |||
1228 | setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom); | |||
1229 | setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom); | |||
1230 | setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom); | |||
1231 | ||||
1232 | for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) { | |||
1233 | setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom); | |||
1234 | setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom); | |||
1235 | setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom); | |||
1236 | setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom); | |||
1237 | setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom); | |||
1238 | } | |||
1239 | ||||
1240 | for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) { | |||
1241 | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); | |||
1242 | setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); | |||
1243 | } | |||
1244 | ||||
1245 | if (HasInt256) { | |||
1246 | // The custom lowering for UINT_TO_FP for v8i32 becomes interesting | |||
1247 | // when we have a 256bit-wide blend with immediate. | |||
1248 | setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom); | |||
1249 | ||||
1250 | // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X | |||
1251 | for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) { | |||
1252 | setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal); | |||
1253 | setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal); | |||
1254 | setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal); | |||
1255 | setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal); | |||
1256 | setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal); | |||
1257 | setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal); | |||
1258 | } | |||
1259 | } | |||
1260 | ||||
1261 | for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, | |||
1262 | MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) { | |||
1263 | setOperationAction(ISD::MLOAD, VT, Legal); | |||
1264 | setOperationAction(ISD::MSTORE, VT, Legal); | |||
1265 | } | |||
1266 | ||||
1267 | // Extract subvector is special because the value type | |||
1268 | // (result) is 128-bit but the source is 256-bit wide. | |||
1269 | for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, | |||
1270 | MVT::v4f32, MVT::v2f64 }) { | |||
1271 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); | |||
1272 | } | |||
1273 | ||||
1274 | // Custom lower several nodes for 256-bit types. | |||
1275 | for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, | |||
1276 | MVT::v8f32, MVT::v4f64 }) { | |||
1277 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
1278 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
1279 | setOperationAction(ISD::VSELECT, VT, Custom); | |||
1280 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
1281 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
1282 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); | |||
1283 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal); | |||
1284 | setOperationAction(ISD::CONCAT_VECTORS, VT, Custom); | |||
1285 | } | |||
1286 | ||||
1287 | if (HasInt256) | |||
1288 | setOperationAction(ISD::VSELECT, MVT::v32i8, Legal); | |||
1289 | ||||
1290 | if (HasInt256) { | |||
1291 | // Custom legalize 2x32 to get a little better code. | |||
1292 | setOperationAction(ISD::MGATHER, MVT::v2f32, Custom); | |||
1293 | setOperationAction(ISD::MGATHER, MVT::v2i32, Custom); | |||
1294 | ||||
1295 | for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, | |||
1296 | MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) | |||
1297 | setOperationAction(ISD::MGATHER, VT, Custom); | |||
1298 | } | |||
1299 | } | |||
1300 | ||||
1301 | // This block controls legalization of the mask vector sizes that are | |||
1302 | // available with AVX512. 512-bit vectors are in a separate block controlled | |||
1303 | // by useAVX512Regs. | |||
1304 | if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { | |||
1305 | addRegisterClass(MVT::v1i1, &X86::VK1RegClass); | |||
1306 | addRegisterClass(MVT::v2i1, &X86::VK2RegClass); | |||
1307 | addRegisterClass(MVT::v4i1, &X86::VK4RegClass); | |||
1308 | addRegisterClass(MVT::v8i1, &X86::VK8RegClass); | |||
1309 | addRegisterClass(MVT::v16i1, &X86::VK16RegClass); | |||
1310 | ||||
1311 | setOperationAction(ISD::SELECT, MVT::v1i1, Custom); | |||
1312 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom); | |||
1313 | setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom); | |||
1314 | ||||
1315 | setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32); | |||
1316 | setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32); | |||
1317 | setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32); | |||
1318 | setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32); | |||
1319 | setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom); | |||
1320 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom); | |||
1321 | ||||
1322 | // There is no byte sized k-register load or store without AVX512DQ. | |||
1323 | if (!Subtarget.hasDQI()) { | |||
1324 | setOperationAction(ISD::LOAD, MVT::v1i1, Custom); | |||
1325 | setOperationAction(ISD::LOAD, MVT::v2i1, Custom); | |||
1326 | setOperationAction(ISD::LOAD, MVT::v4i1, Custom); | |||
1327 | setOperationAction(ISD::LOAD, MVT::v8i1, Custom); | |||
1328 | ||||
1329 | setOperationAction(ISD::STORE, MVT::v1i1, Custom); | |||
1330 | setOperationAction(ISD::STORE, MVT::v2i1, Custom); | |||
1331 | setOperationAction(ISD::STORE, MVT::v4i1, Custom); | |||
1332 | setOperationAction(ISD::STORE, MVT::v8i1, Custom); | |||
1333 | } | |||
1334 | ||||
1335 | // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors. | |||
1336 | for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) { | |||
1337 | setOperationAction(ISD::SIGN_EXTEND, VT, Custom); | |||
1338 | setOperationAction(ISD::ZERO_EXTEND, VT, Custom); | |||
1339 | setOperationAction(ISD::ANY_EXTEND, VT, Custom); | |||
1340 | } | |||
1341 | ||||
1342 | for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) { | |||
1343 | setOperationAction(ISD::ADD, VT, Custom); | |||
1344 | setOperationAction(ISD::SUB, VT, Custom); | |||
1345 | setOperationAction(ISD::MUL, VT, Custom); | |||
1346 | setOperationAction(ISD::SETCC, VT, Custom); | |||
1347 | setOperationAction(ISD::SELECT, VT, Custom); | |||
1348 | setOperationAction(ISD::TRUNCATE, VT, Custom); | |||
1349 | setOperationAction(ISD::UADDSAT, VT, Custom); | |||
1350 | setOperationAction(ISD::SADDSAT, VT, Custom); | |||
1351 | setOperationAction(ISD::USUBSAT, VT, Custom); | |||
1352 | setOperationAction(ISD::SSUBSAT, VT, Custom); | |||
1353 | ||||
1354 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
1355 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
1356 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
1357 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
1358 | setOperationAction(ISD::VSELECT, VT, Expand); | |||
1359 | } | |||
1360 | ||||
1361 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Custom); | |||
1362 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom); | |||
1363 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i1, Custom); | |||
1364 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v2i1, Custom); | |||
1365 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v4i1, Custom); | |||
1366 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Custom); | |||
1367 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v16i1, Custom); | |||
1368 | for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 }) | |||
1369 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
1370 | } | |||
1371 | ||||
1372 | // This block controls legalization for 512-bit operations with 32/64 bit | |||
1373 | // elements. 512-bits can be disabled based on prefer-vector-width and | |||
1374 | // required-vector-width function attributes. | |||
1375 | if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) { | |||
1376 | addRegisterClass(MVT::v16i32, &X86::VR512RegClass); | |||
1377 | addRegisterClass(MVT::v16f32, &X86::VR512RegClass); | |||
1378 | addRegisterClass(MVT::v8i64, &X86::VR512RegClass); | |||
1379 | addRegisterClass(MVT::v8f64, &X86::VR512RegClass); | |||
1380 | ||||
1381 | for (MVT VT : MVT::fp_vector_valuetypes()) | |||
1382 | setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal); | |||
1383 | ||||
1384 | for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) { | |||
1385 | setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal); | |||
1386 | setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal); | |||
1387 | setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal); | |||
1388 | setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal); | |||
1389 | setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal); | |||
1390 | } | |||
1391 | ||||
1392 | for (MVT VT : { MVT::v16f32, MVT::v8f64 }) { | |||
1393 | setOperationAction(ISD::FNEG, VT, Custom); | |||
1394 | setOperationAction(ISD::FABS, VT, Custom); | |||
1395 | setOperationAction(ISD::FMA, VT, Legal); | |||
1396 | setOperationAction(ISD::FCOPYSIGN, VT, Custom); | |||
1397 | } | |||
1398 | ||||
1399 | setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal); | |||
1400 | setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i16, MVT::v16i32); | |||
1401 | setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i8, MVT::v16i32); | |||
1402 | setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v16i1, MVT::v16i32); | |||
1403 | setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal); | |||
1404 | setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i1, MVT::v16i32); | |||
1405 | setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i8, MVT::v16i32); | |||
1406 | setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v16i16, MVT::v16i32); | |||
1407 | setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal); | |||
1408 | setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal); | |||
1409 | ||||
1410 | setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal); | |||
1411 | setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal); | |||
1412 | setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal); | |||
1413 | setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal); | |||
1414 | setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal); | |||
1415 | ||||
1416 | if (!Subtarget.hasVLX()) { | |||
1417 | // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE | |||
1418 | // to 512-bit rather than use the AVX2 instructions so that we can use | |||
1419 | // k-masks. | |||
1420 | for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, | |||
1421 | MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) { | |||
1422 | setOperationAction(ISD::MLOAD, VT, Custom); | |||
1423 | setOperationAction(ISD::MSTORE, VT, Custom); | |||
1424 | } | |||
1425 | } | |||
1426 | ||||
1427 | setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom); | |||
1428 | setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom); | |||
1429 | setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom); | |||
1430 | setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom); | |||
1431 | setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom); | |||
1432 | setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom); | |||
1433 | setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom); | |||
1434 | setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom); | |||
1435 | ||||
1436 | if (ExperimentalVectorWideningLegalization) { | |||
1437 | // Need to custom widen this if we don't have AVX512BW. | |||
1438 | setOperationAction(ISD::ANY_EXTEND, MVT::v8i8, Custom); | |||
1439 | setOperationAction(ISD::ZERO_EXTEND, MVT::v8i8, Custom); | |||
1440 | setOperationAction(ISD::SIGN_EXTEND, MVT::v8i8, Custom); | |||
1441 | } | |||
1442 | ||||
1443 | for (auto VT : { MVT::v16f32, MVT::v8f64 }) { | |||
1444 | setOperationAction(ISD::FFLOOR, VT, Legal); | |||
1445 | setOperationAction(ISD::FCEIL, VT, Legal); | |||
1446 | setOperationAction(ISD::FTRUNC, VT, Legal); | |||
1447 | setOperationAction(ISD::FRINT, VT, Legal); | |||
1448 | setOperationAction(ISD::FNEARBYINT, VT, Legal); | |||
1449 | } | |||
1450 | ||||
1451 | // Without BWI we need to use custom lowering to handle MVT::v64i8 input. | |||
1452 | for (auto VT : {MVT::v16i32, MVT::v8i64, MVT::v64i8}) { | |||
1453 | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom); | |||
1454 | setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom); | |||
1455 | } | |||
1456 | ||||
1457 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom); | |||
1458 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom); | |||
1459 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom); | |||
1460 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom); | |||
1461 | ||||
1462 | setOperationAction(ISD::MUL, MVT::v8i64, Custom); | |||
1463 | setOperationAction(ISD::MUL, MVT::v16i32, Legal); | |||
1464 | ||||
1465 | setOperationAction(ISD::MULHU, MVT::v16i32, Custom); | |||
1466 | setOperationAction(ISD::MULHS, MVT::v16i32, Custom); | |||
1467 | ||||
1468 | setOperationAction(ISD::SELECT, MVT::v8f64, Custom); | |||
1469 | setOperationAction(ISD::SELECT, MVT::v8i64, Custom); | |||
1470 | setOperationAction(ISD::SELECT, MVT::v16i32, Custom); | |||
1471 | setOperationAction(ISD::SELECT, MVT::v32i16, Custom); | |||
1472 | setOperationAction(ISD::SELECT, MVT::v64i8, Custom); | |||
1473 | setOperationAction(ISD::SELECT, MVT::v16f32, Custom); | |||
1474 | ||||
1475 | for (auto VT : { MVT::v16i32, MVT::v8i64 }) { | |||
1476 | setOperationAction(ISD::SMAX, VT, Legal); | |||
1477 | setOperationAction(ISD::UMAX, VT, Legal); | |||
1478 | setOperationAction(ISD::SMIN, VT, Legal); | |||
1479 | setOperationAction(ISD::UMIN, VT, Legal); | |||
1480 | setOperationAction(ISD::ABS, VT, Legal); | |||
1481 | setOperationAction(ISD::SRL, VT, Custom); | |||
1482 | setOperationAction(ISD::SHL, VT, Custom); | |||
1483 | setOperationAction(ISD::SRA, VT, Custom); | |||
1484 | setOperationAction(ISD::CTPOP, VT, Custom); | |||
1485 | setOperationAction(ISD::ROTL, VT, Custom); | |||
1486 | setOperationAction(ISD::ROTR, VT, Custom); | |||
1487 | setOperationAction(ISD::SETCC, VT, Custom); | |||
1488 | ||||
1489 | // The condition codes aren't legal in SSE/AVX and under AVX512 we use | |||
1490 | // setcc all the way to isel and prefer SETGT in some isel patterns. | |||
1491 | setCondCodeAction(ISD::SETLT, VT, Custom); | |||
1492 | setCondCodeAction(ISD::SETLE, VT, Custom); | |||
1493 | } | |||
1494 | ||||
1495 | if (Subtarget.hasDQI()) { | |||
1496 | setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal); | |||
1497 | setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal); | |||
1498 | setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal); | |||
1499 | setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal); | |||
1500 | ||||
1501 | setOperationAction(ISD::MUL, MVT::v8i64, Legal); | |||
1502 | } | |||
1503 | ||||
1504 | if (Subtarget.hasCDI()) { | |||
1505 | // NonVLX sub-targets extend 128/256 vectors to use the 512 version. | |||
1506 | for (auto VT : { MVT::v16i32, MVT::v8i64} ) { | |||
1507 | setOperationAction(ISD::CTLZ, VT, Legal); | |||
1508 | } | |||
1509 | } // Subtarget.hasCDI() | |||
1510 | ||||
1511 | if (Subtarget.hasVPOPCNTDQ()) { | |||
1512 | for (auto VT : { MVT::v16i32, MVT::v8i64 }) | |||
1513 | setOperationAction(ISD::CTPOP, VT, Legal); | |||
1514 | } | |||
1515 | ||||
1516 | // Extract subvector is special because the value type | |||
1517 | // (result) is 256-bit but the source is 512-bit wide. | |||
1518 | // 128-bit was made Legal under AVX1. | |||
1519 | for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64, | |||
1520 | MVT::v8f32, MVT::v4f64 }) | |||
1521 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal); | |||
1522 | ||||
1523 | for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) { | |||
1524 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
1525 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
1526 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
1527 | setOperationAction(ISD::VSELECT, VT, Custom); | |||
1528 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
1529 | setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom); | |||
1530 | setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal); | |||
1531 | setOperationAction(ISD::MLOAD, VT, Legal); | |||
1532 | setOperationAction(ISD::MSTORE, VT, Legal); | |||
1533 | setOperationAction(ISD::MGATHER, VT, Custom); | |||
1534 | setOperationAction(ISD::MSCATTER, VT, Custom); | |||
1535 | } | |||
1536 | // Need to custom split v32i16/v64i8 bitcasts. | |||
1537 | if (!Subtarget.hasBWI()) { | |||
1538 | setOperationAction(ISD::BITCAST, MVT::v32i16, Custom); | |||
1539 | setOperationAction(ISD::BITCAST, MVT::v64i8, Custom); | |||
1540 | } | |||
1541 | }// has AVX-512 | |||
1542 | ||||
1543 | // This block controls legalization for operations that don't have | |||
1544 | // pre-AVX512 equivalents. Without VLX we use 512-bit operations for | |||
1545 | // narrower widths. | |||
1546 | if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) { | |||
1547 | // These operations are handled on non-VLX by artificially widening in | |||
1548 | // isel patterns. | |||
1549 | // TODO: Custom widen in lowering on non-VLX and drop the isel patterns? | |||
1550 | ||||
1551 | setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal); | |||
1552 | setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal); | |||
1553 | setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom); | |||
1554 | setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal); | |||
1555 | setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal); | |||
1556 | ||||
1557 | for (auto VT : { MVT::v2i64, MVT::v4i64 }) { | |||
1558 | setOperationAction(ISD::SMAX, VT, Legal); | |||
1559 | setOperationAction(ISD::UMAX, VT, Legal); | |||
1560 | setOperationAction(ISD::SMIN, VT, Legal); | |||
1561 | setOperationAction(ISD::UMIN, VT, Legal); | |||
1562 | setOperationAction(ISD::ABS, VT, Legal); | |||
1563 | } | |||
1564 | ||||
1565 | for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) { | |||
1566 | setOperationAction(ISD::ROTL, VT, Custom); | |||
1567 | setOperationAction(ISD::ROTR, VT, Custom); | |||
1568 | } | |||
1569 | ||||
1570 | // Custom legalize 2x32 to get a little better code. | |||
1571 | setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom); | |||
1572 | setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom); | |||
1573 | ||||
1574 | for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64, | |||
1575 | MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) | |||
1576 | setOperationAction(ISD::MSCATTER, VT, Custom); | |||
1577 | ||||
1578 | if (Subtarget.hasDQI()) { | |||
1579 | for (auto VT : { MVT::v2i64, MVT::v4i64 }) { | |||
1580 | setOperationAction(ISD::SINT_TO_FP, VT, Legal); | |||
1581 | setOperationAction(ISD::UINT_TO_FP, VT, Legal); | |||
1582 | setOperationAction(ISD::FP_TO_SINT, VT, Legal); | |||
1583 | setOperationAction(ISD::FP_TO_UINT, VT, Legal); | |||
1584 | ||||
1585 | setOperationAction(ISD::MUL, VT, Legal); | |||
1586 | } | |||
1587 | } | |||
1588 | ||||
1589 | if (Subtarget.hasCDI()) { | |||
1590 | for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) { | |||
1591 | setOperationAction(ISD::CTLZ, VT, Legal); | |||
1592 | } | |||
1593 | } // Subtarget.hasCDI() | |||
1594 | ||||
1595 | if (Subtarget.hasVPOPCNTDQ()) { | |||
1596 | for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) | |||
1597 | setOperationAction(ISD::CTPOP, VT, Legal); | |||
1598 | } | |||
1599 | } | |||
1600 | ||||
1601 | // This block control legalization of v32i1/v64i1 which are available with | |||
1602 | // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with | |||
1603 | // useBWIRegs. | |||
1604 | if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) { | |||
1605 | addRegisterClass(MVT::v32i1, &X86::VK32RegClass); | |||
1606 | addRegisterClass(MVT::v64i1, &X86::VK64RegClass); | |||
1607 | ||||
1608 | for (auto VT : { MVT::v32i1, MVT::v64i1 }) { | |||
1609 | setOperationAction(ISD::ADD, VT, Custom); | |||
1610 | setOperationAction(ISD::SUB, VT, Custom); | |||
1611 | setOperationAction(ISD::MUL, VT, Custom); | |||
1612 | setOperationAction(ISD::VSELECT, VT, Expand); | |||
1613 | setOperationAction(ISD::UADDSAT, VT, Custom); | |||
1614 | setOperationAction(ISD::SADDSAT, VT, Custom); | |||
1615 | setOperationAction(ISD::USUBSAT, VT, Custom); | |||
1616 | setOperationAction(ISD::SSUBSAT, VT, Custom); | |||
1617 | ||||
1618 | setOperationAction(ISD::TRUNCATE, VT, Custom); | |||
1619 | setOperationAction(ISD::SETCC, VT, Custom); | |||
1620 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); | |||
1621 | setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); | |||
1622 | setOperationAction(ISD::SELECT, VT, Custom); | |||
1623 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
1624 | setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); | |||
1625 | } | |||
1626 | ||||
1627 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i1, Custom); | |||
1628 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i1, Custom); | |||
1629 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i1, Custom); | |||
1630 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i1, Custom); | |||
1631 | for (auto VT : { MVT::v16i1, MVT::v32i1 }) | |||
1632 | setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); | |||
1633 | ||||
1634 | // Extends from v32i1 masks to 256-bit vectors. | |||
1635 | setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom); | |||
1636 | setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom); | |||
1637 | setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom); | |||
1638 | } | |||
1639 | ||||
1640 | // This block controls legalization for v32i16 and v64i8. 512-bits can be | |||
1641 | // disabled based on prefer-vector-width and required-vector-width function | |||
1642 | // attributes. | |||
1643 | if (!Subtarget.useSoftFloat() && Subtarget.useBWIRegs()) { | |||
1644 | addRegisterClass(MVT::v32i16, &X86::VR512RegClass); | |||
1645 | addRegisterClass(MVT::v64i8, &X86::VR512RegClass); | |||
1646 | ||||
1647 | // Extends from v64i1 masks to 512-bit vectors. | |||
1648 | setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom); | |||
1649 | setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom); | |||
1650 | setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom); | |||
1651 | ||||
1652 | setOperationAction(ISD::MUL, MVT::v32i16, Legal); | |||
1653 | setOperationAction(ISD::MUL, MVT::v64i8, Custom); | |||
1654 | setOperationAction(ISD::MULHS, MVT::v32i16, Legal); | |||
1655 | setOperationAction(ISD::MULHU, MVT::v32i16, Legal); | |||
1656 | setOperationAction(ISD::MULHS, MVT::v64i8, Custom); | |||
1657 | setOperationAction(ISD::MULHU, MVT::v64i8, Custom); | |||
1658 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v32i16, Custom); | |||
1659 | setOperationAction(ISD::CONCAT_VECTORS, MVT::v64i8, Custom); | |||
1660 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32i16, Legal); | |||
1661 | setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v64i8, Legal); | |||
1662 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v32i16, Custom); | |||
1663 | setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v64i8, Custom); | |||
1664 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v32i16, Custom); | |||
1665 | setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v64i8, Custom); | |||
1666 | setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom); | |||
1667 | setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom); | |||
1668 | setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom); | |||
1669 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom); | |||
1670 | setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v64i8, Custom); | |||
1671 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i16, Custom); | |||
1672 | setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v64i8, Custom); | |||
1673 | setOperationAction(ISD::TRUNCATE, MVT::v32i8, Custom); | |||
1674 | setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom); | |||
1675 | ||||
1676 | setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v32i16, Custom); | |||
1677 | setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, MVT::v32i16, Custom); | |||
1678 | ||||
1679 | setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal); | |||
1680 | ||||
1681 | for (auto VT : { MVT::v64i8, MVT::v32i16 }) { | |||
1682 | setOperationAction(ISD::BUILD_VECTOR, VT, Custom); | |||
1683 | setOperationAction(ISD::VSELECT, VT, Custom); | |||
1684 | setOperationAction(ISD::ABS, VT, Legal); | |||
1685 | setOperationAction(ISD::SRL, VT, Custom); | |||
1686 | setOperationAction(ISD::SHL, VT, Custom); | |||
1687 | setOperationAction(ISD::SRA, VT, Custom); | |||
1688 | setOperationAction(ISD::MLOAD, VT, Legal); | |||
1689 | setOperationAction(ISD::MSTORE, VT, Legal); | |||
1690 | setOperationAction(ISD::CTPOP, VT, Custom); | |||
1691 | setOperationAction(ISD::CTLZ, VT, Custom); | |||
1692 | setOperationAction(ISD::SMAX, VT, Legal); | |||
1693 | setOperationAction(ISD::UMAX, VT, Legal); | |||
1694 | setOperationAction(ISD::SMIN, VT, Legal); | |||
1695 | setOperationAction(ISD::UMIN, VT, Legal); | |||
1696 | setOperationAction(ISD::SETCC, VT, Custom); | |||
1697 | setOperationAction(ISD::UADDSAT, VT, Legal); | |||
1698 | setOperationAction(ISD::SADDSAT, VT, Legal); | |||
1699 | setOperationAction(ISD::USUBSAT, VT, Legal); | |||
1700 | setOperationAction(ISD::SSUBSAT, VT, Legal); | |||
1701 | ||||
1702 | // The condition codes aren't legal in SSE/AVX and under AVX512 we use | |||
1703 | // setcc all the way to isel and prefer SETGT in some isel patterns. | |||
1704 | setCondCodeAction(ISD::SETLT, VT, Custom); | |||
1705 | setCondCodeAction(ISD::SETLE, VT, Custom); | |||
1706 | } | |||
1707 | ||||
1708 | for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) { | |||
1709 | setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal); | |||
1710 | } | |||
1711 | ||||
1712 | if (Subtarget.hasBITALG()) { | |||
1713 | for (auto VT : { MVT::v64i8, MVT::v32i16 }) | |||
1714 | setOperationAction(ISD::CTPOP, VT, Legal); | |||
1715 | } | |||
1716 | } | |||
1717 | ||||
1718 | if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) { | |||
1719 | for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) { | |||
1720 | setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom); | |||
1721 | setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom); | |||
1722 | } | |||
1723 | ||||
1724 | // These operations are handled on non-VLX by artificially widening in | |||
1725 | // isel patterns. | |||
1726 | // TODO: Custom widen in lowering on non-VLX and drop the isel patterns? | |||
1727 | ||||
1728 | if (Subtarget.hasBITALG()) { | |||
1729 | for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 }) | |||
1730 | setOperationAction(ISD::CTPOP, VT, Legal); | |||
1731 | } | |||
1732 | } | |||
1733 | ||||
1734 | if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) { | |||
1735 | setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal); | |||
1736 | setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal); | |||
1737 | setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal); | |||
1738 | setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal); | |||
1739 | setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal); | |||
1740 | ||||
1741 | setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal); | |||
1742 | setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal); | |||
1743 | setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal); | |||
1744 | setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal); | |||
1745 | setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal); | |||
1746 | ||||
1747 | if (Subtarget.hasDQI()) { | |||
1748 | // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion. | |||
1749 | // v2f32 UINT_TO_FP is already custom under SSE2. | |||
1750 | setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom); | |||
1751 | assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&((isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) && "Unexpected operation action!" ) ? static_cast<void> (0) : __assert_fail ("isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) && \"Unexpected operation action!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 1752, __PRETTY_FUNCTION__)) | |||
1752 | "Unexpected operation action!")((isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) && "Unexpected operation action!" ) ? static_cast<void> (0) : __assert_fail ("isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) && \"Unexpected operation action!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 1752, __PRETTY_FUNCTION__)); | |||
1753 | // v2i64 FP_TO_S/UINT(v2f32) custom conversion. | |||
1754 | setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom); | |||
1755 | setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom); | |||
1756 | } | |||
1757 | ||||
1758 | if (Subtarget.hasBWI()) { | |||
1759 | setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal); | |||
1760 | setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal); | |||
1761 | } | |||
1762 | } | |||
1763 | ||||
1764 | // We want to custom lower some of our intrinsics. | |||
1765 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); | |||
1766 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom); | |||
1767 | setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); | |||
1768 | if (!Subtarget.is64Bit()) { | |||
1769 | setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom); | |||
1770 | setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom); | |||
1771 | } | |||
1772 | ||||
1773 | // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't | |||
1774 | // handle type legalization for these operations here. | |||
1775 | // | |||
1776 | // FIXME: We really should do custom legalization for addition and | |||
1777 | // subtraction on x86-32 once PR3203 is fixed. We really can't do much better | |||
1778 | // than generic legalization for 64-bit multiplication-with-overflow, though. | |||
1779 | for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) { | |||
1780 | if (VT == MVT::i64 && !Subtarget.is64Bit()) | |||
1781 | continue; | |||
1782 | // Add/Sub/Mul with overflow operations are custom lowered. | |||
1783 | setOperationAction(ISD::SADDO, VT, Custom); | |||
1784 | setOperationAction(ISD::UADDO, VT, Custom); | |||
1785 | setOperationAction(ISD::SSUBO, VT, Custom); | |||
1786 | setOperationAction(ISD::USUBO, VT, Custom); | |||
1787 | setOperationAction(ISD::SMULO, VT, Custom); | |||
1788 | setOperationAction(ISD::UMULO, VT, Custom); | |||
1789 | ||||
1790 | // Support carry in as value rather than glue. | |||
1791 | setOperationAction(ISD::ADDCARRY, VT, Custom); | |||
1792 | setOperationAction(ISD::SUBCARRY, VT, Custom); | |||
1793 | setOperationAction(ISD::SETCCCARRY, VT, Custom); | |||
1794 | } | |||
1795 | ||||
1796 | if (!Subtarget.is64Bit()) { | |||
1797 | // These libcalls are not available in 32-bit. | |||
1798 | setLibcallName(RTLIB::SHL_I128, nullptr); | |||
1799 | setLibcallName(RTLIB::SRL_I128, nullptr); | |||
1800 | setLibcallName(RTLIB::SRA_I128, nullptr); | |||
1801 | setLibcallName(RTLIB::MUL_I128, nullptr); | |||
1802 | } | |||
1803 | ||||
1804 | // Combine sin / cos into _sincos_stret if it is available. | |||
1805 | if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && | |||
1806 | getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { | |||
1807 | setOperationAction(ISD::FSINCOS, MVT::f64, Custom); | |||
1808 | setOperationAction(ISD::FSINCOS, MVT::f32, Custom); | |||
1809 | } | |||
1810 | ||||
1811 | if (Subtarget.isTargetWin64()) { | |||
1812 | setOperationAction(ISD::SDIV, MVT::i128, Custom); | |||
1813 | setOperationAction(ISD::UDIV, MVT::i128, Custom); | |||
1814 | setOperationAction(ISD::SREM, MVT::i128, Custom); | |||
1815 | setOperationAction(ISD::UREM, MVT::i128, Custom); | |||
1816 | setOperationAction(ISD::SDIVREM, MVT::i128, Custom); | |||
1817 | setOperationAction(ISD::UDIVREM, MVT::i128, Custom); | |||
1818 | } | |||
1819 | ||||
1820 | // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)` | |||
1821 | // is. We should promote the value to 64-bits to solve this. | |||
1822 | // This is what the CRT headers do - `fmodf` is an inline header | |||
1823 | // function casting to f64 and calling `fmod`. | |||
1824 | if (Subtarget.is32Bit() && (Subtarget.isTargetKnownWindowsMSVC() || | |||
1825 | Subtarget.isTargetWindowsItanium())) | |||
1826 | for (ISD::NodeType Op : | |||
1827 | {ISD::FCEIL, ISD::FCOS, ISD::FEXP, ISD::FFLOOR, ISD::FREM, ISD::FLOG, | |||
1828 | ISD::FLOG10, ISD::FPOW, ISD::FSIN}) | |||
1829 | if (isOperationExpand(Op, MVT::f32)) | |||
1830 | setOperationAction(Op, MVT::f32, Promote); | |||
1831 | ||||
1832 | // We have target-specific dag combine patterns for the following nodes: | |||
1833 | setTargetDAGCombine(ISD::VECTOR_SHUFFLE); | |||
1834 | setTargetDAGCombine(ISD::SCALAR_TO_VECTOR); | |||
1835 | setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); | |||
1836 | setTargetDAGCombine(ISD::INSERT_SUBVECTOR); | |||
1837 | setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR); | |||
1838 | setTargetDAGCombine(ISD::BITCAST); | |||
1839 | setTargetDAGCombine(ISD::VSELECT); | |||
1840 | setTargetDAGCombine(ISD::SELECT); | |||
1841 | setTargetDAGCombine(ISD::SHL); | |||
1842 | setTargetDAGCombine(ISD::SRA); | |||
1843 | setTargetDAGCombine(ISD::SRL); | |||
1844 | setTargetDAGCombine(ISD::OR); | |||
1845 | setTargetDAGCombine(ISD::AND); | |||
1846 | setTargetDAGCombine(ISD::ADD); | |||
1847 | setTargetDAGCombine(ISD::FADD); | |||
1848 | setTargetDAGCombine(ISD::FSUB); | |||
1849 | setTargetDAGCombine(ISD::FNEG); | |||
1850 | setTargetDAGCombine(ISD::FMA); | |||
1851 | setTargetDAGCombine(ISD::FMINNUM); | |||
1852 | setTargetDAGCombine(ISD::FMAXNUM); | |||
1853 | setTargetDAGCombine(ISD::SUB); | |||
1854 | setTargetDAGCombine(ISD::LOAD); | |||
1855 | setTargetDAGCombine(ISD::MLOAD); | |||
1856 | setTargetDAGCombine(ISD::STORE); | |||
1857 | setTargetDAGCombine(ISD::MSTORE); | |||
1858 | setTargetDAGCombine(ISD::TRUNCATE); | |||
1859 | setTargetDAGCombine(ISD::ZERO_EXTEND); | |||
1860 | setTargetDAGCombine(ISD::ANY_EXTEND); | |||
1861 | setTargetDAGCombine(ISD::SIGN_EXTEND); | |||
1862 | setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); | |||
1863 | setTargetDAGCombine(ISD::SINT_TO_FP); | |||
1864 | setTargetDAGCombine(ISD::UINT_TO_FP); | |||
1865 | setTargetDAGCombine(ISD::SETCC); | |||
1866 | setTargetDAGCombine(ISD::MUL); | |||
1867 | setTargetDAGCombine(ISD::XOR); | |||
1868 | setTargetDAGCombine(ISD::MSCATTER); | |||
1869 | setTargetDAGCombine(ISD::MGATHER); | |||
1870 | ||||
1871 | computeRegisterProperties(Subtarget.getRegisterInfo()); | |||
1872 | ||||
1873 | MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores | |||
1874 | MaxStoresPerMemsetOptSize = 8; | |||
1875 | MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores | |||
1876 | MaxStoresPerMemcpyOptSize = 4; | |||
1877 | MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores | |||
1878 | MaxStoresPerMemmoveOptSize = 4; | |||
1879 | ||||
1880 | // TODO: These control memcmp expansion in CGP and could be raised higher, but | |||
1881 | // that needs to benchmarked and balanced with the potential use of vector | |||
1882 | // load/store types (PR33329, PR33914). | |||
1883 | MaxLoadsPerMemcmp = 2; | |||
1884 | MaxLoadsPerMemcmpOptSize = 2; | |||
1885 | ||||
1886 | // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4). | |||
1887 | setPrefLoopAlignment(ExperimentalPrefLoopAlignment); | |||
1888 | ||||
1889 | // An out-of-order CPU can speculatively execute past a predictable branch, | |||
1890 | // but a conditional move could be stalled by an expensive earlier operation. | |||
1891 | PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder(); | |||
1892 | EnableExtLdPromotion = true; | |||
1893 | setPrefFunctionAlignment(4); // 2^4 bytes. | |||
1894 | ||||
1895 | verifyIntrinsicTables(); | |||
1896 | } | |||
1897 | ||||
1898 | // This has so far only been implemented for 64-bit MachO. | |||
1899 | bool X86TargetLowering::useLoadStackGuardNode() const { | |||
1900 | return Subtarget.isTargetMachO() && Subtarget.is64Bit(); | |||
1901 | } | |||
1902 | ||||
1903 | bool X86TargetLowering::useStackGuardXorFP() const { | |||
1904 | // Currently only MSVC CRTs XOR the frame pointer into the stack guard value. | |||
1905 | return Subtarget.getTargetTriple().isOSMSVCRT(); | |||
1906 | } | |||
1907 | ||||
1908 | SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val, | |||
1909 | const SDLoc &DL) const { | |||
1910 | EVT PtrTy = getPointerTy(DAG.getDataLayout()); | |||
1911 | unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP; | |||
1912 | MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val); | |||
1913 | return SDValue(Node, 0); | |||
1914 | } | |||
1915 | ||||
1916 | TargetLoweringBase::LegalizeTypeAction | |||
1917 | X86TargetLowering::getPreferredVectorAction(MVT VT) const { | |||
1918 | if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) | |||
1919 | return TypeSplitVector; | |||
1920 | ||||
1921 | if (ExperimentalVectorWideningLegalization && | |||
1922 | VT.getVectorNumElements() != 1 && | |||
1923 | VT.getVectorElementType() != MVT::i1) | |||
1924 | return TypeWidenVector; | |||
1925 | ||||
1926 | return TargetLoweringBase::getPreferredVectorAction(VT); | |||
1927 | } | |||
1928 | ||||
1929 | MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, | |||
1930 | CallingConv::ID CC, | |||
1931 | EVT VT) const { | |||
1932 | if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) | |||
1933 | return MVT::v32i8; | |||
1934 | return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); | |||
1935 | } | |||
1936 | ||||
1937 | unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, | |||
1938 | CallingConv::ID CC, | |||
1939 | EVT VT) const { | |||
1940 | if (VT == MVT::v32i1 && Subtarget.hasAVX512() && !Subtarget.hasBWI()) | |||
1941 | return 1; | |||
1942 | return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); | |||
1943 | } | |||
1944 | ||||
1945 | EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL, | |||
1946 | LLVMContext& Context, | |||
1947 | EVT VT) const { | |||
1948 | if (!VT.isVector()) | |||
1949 | return MVT::i8; | |||
1950 | ||||
1951 | if (Subtarget.hasAVX512()) { | |||
1952 | const unsigned NumElts = VT.getVectorNumElements(); | |||
1953 | ||||
1954 | // Figure out what this type will be legalized to. | |||
1955 | EVT LegalVT = VT; | |||
1956 | while (getTypeAction(Context, LegalVT) != TypeLegal) | |||
1957 | LegalVT = getTypeToTransformTo(Context, LegalVT); | |||
1958 | ||||
1959 | // If we got a 512-bit vector then we'll definitely have a vXi1 compare. | |||
1960 | if (LegalVT.getSimpleVT().is512BitVector()) | |||
1961 | return EVT::getVectorVT(Context, MVT::i1, NumElts); | |||
1962 | ||||
1963 | if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) { | |||
1964 | // If we legalized to less than a 512-bit vector, then we will use a vXi1 | |||
1965 | // compare for vXi32/vXi64 for sure. If we have BWI we will also support | |||
1966 | // vXi16/vXi8. | |||
1967 | MVT EltVT = LegalVT.getSimpleVT().getVectorElementType(); | |||
1968 | if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32) | |||
1969 | return EVT::getVectorVT(Context, MVT::i1, NumElts); | |||
1970 | } | |||
1971 | } | |||
1972 | ||||
1973 | return VT.changeVectorElementTypeToInteger(); | |||
1974 | } | |||
1975 | ||||
1976 | /// Helper for getByValTypeAlignment to determine | |||
1977 | /// the desired ByVal argument alignment. | |||
1978 | static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) { | |||
1979 | if (MaxAlign == 16) | |||
1980 | return; | |||
1981 | if (VectorType *VTy = dyn_cast<VectorType>(Ty)) { | |||
1982 | if (VTy->getBitWidth() == 128) | |||
1983 | MaxAlign = 16; | |||
1984 | } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { | |||
1985 | unsigned EltAlign = 0; | |||
1986 | getMaxByValAlign(ATy->getElementType(), EltAlign); | |||
1987 | if (EltAlign > MaxAlign) | |||
1988 | MaxAlign = EltAlign; | |||
1989 | } else if (StructType *STy = dyn_cast<StructType>(Ty)) { | |||
1990 | for (auto *EltTy : STy->elements()) { | |||
1991 | unsigned EltAlign = 0; | |||
1992 | getMaxByValAlign(EltTy, EltAlign); | |||
1993 | if (EltAlign > MaxAlign) | |||
1994 | MaxAlign = EltAlign; | |||
1995 | if (MaxAlign == 16) | |||
1996 | break; | |||
1997 | } | |||
1998 | } | |||
1999 | } | |||
2000 | ||||
2001 | /// Return the desired alignment for ByVal aggregate | |||
2002 | /// function arguments in the caller parameter area. For X86, aggregates | |||
2003 | /// that contain SSE vectors are placed at 16-byte boundaries while the rest | |||
2004 | /// are at 4-byte boundaries. | |||
2005 | unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty, | |||
2006 | const DataLayout &DL) const { | |||
2007 | if (Subtarget.is64Bit()) { | |||
2008 | // Max of 8 and alignment of type. | |||
2009 | unsigned TyAlign = DL.getABITypeAlignment(Ty); | |||
2010 | if (TyAlign > 8) | |||
2011 | return TyAlign; | |||
2012 | return 8; | |||
2013 | } | |||
2014 | ||||
2015 | unsigned Align = 4; | |||
2016 | if (Subtarget.hasSSE1()) | |||
2017 | getMaxByValAlign(Ty, Align); | |||
2018 | return Align; | |||
2019 | } | |||
2020 | ||||
2021 | /// Returns the target specific optimal type for load | |||
2022 | /// and store operations as a result of memset, memcpy, and memmove | |||
2023 | /// lowering. If DstAlign is zero that means it's safe to destination | |||
2024 | /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it | |||
2025 | /// means there isn't a need to check it against alignment requirement, | |||
2026 | /// probably because the source does not need to be loaded. If 'IsMemset' is | |||
2027 | /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that | |||
2028 | /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy | |||
2029 | /// source is constant so it does not need to be loaded. | |||
2030 | /// It returns EVT::Other if the type should be determined using generic | |||
2031 | /// target-independent logic. | |||
2032 | EVT | |||
2033 | X86TargetLowering::getOptimalMemOpType(uint64_t Size, | |||
2034 | unsigned DstAlign, unsigned SrcAlign, | |||
2035 | bool IsMemset, bool ZeroMemset, | |||
2036 | bool MemcpyStrSrc, | |||
2037 | MachineFunction &MF) const { | |||
2038 | const Function &F = MF.getFunction(); | |||
2039 | if (!F.hasFnAttribute(Attribute::NoImplicitFloat)) { | |||
2040 | if (Size >= 16 && | |||
2041 | (!Subtarget.isUnalignedMem16Slow() || | |||
2042 | ((DstAlign == 0 || DstAlign >= 16) && | |||
2043 | (SrcAlign == 0 || SrcAlign >= 16)))) { | |||
2044 | // FIXME: Check if unaligned 32-byte accesses are slow. | |||
2045 | if (Size >= 32 && Subtarget.hasAVX()) { | |||
2046 | // Although this isn't a well-supported type for AVX1, we'll let | |||
2047 | // legalization and shuffle lowering produce the optimal codegen. If we | |||
2048 | // choose an optimal type with a vector element larger than a byte, | |||
2049 | // getMemsetStores() may create an intermediate splat (using an integer | |||
2050 | // multiply) before we splat as a vector. | |||
2051 | return MVT::v32i8; | |||
2052 | } | |||
2053 | if (Subtarget.hasSSE2()) | |||
2054 | return MVT::v16i8; | |||
2055 | // TODO: Can SSE1 handle a byte vector? | |||
2056 | // If we have SSE1 registers we should be able to use them. | |||
2057 | if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87())) | |||
2058 | return MVT::v4f32; | |||
2059 | } else if ((!IsMemset || ZeroMemset) && !MemcpyStrSrc && Size >= 8 && | |||
2060 | !Subtarget.is64Bit() && Subtarget.hasSSE2()) { | |||
2061 | // Do not use f64 to lower memcpy if source is string constant. It's | |||
2062 | // better to use i32 to avoid the loads. | |||
2063 | // Also, do not use f64 to lower memset unless this is a memset of zeros. | |||
2064 | // The gymnastics of splatting a byte value into an XMM register and then | |||
2065 | // only using 8-byte stores (because this is a CPU with slow unaligned | |||
2066 | // 16-byte accesses) makes that a loser. | |||
2067 | return MVT::f64; | |||
2068 | } | |||
2069 | } | |||
2070 | // This is a compromise. If we reach here, unaligned accesses may be slow on | |||
2071 | // this target. However, creating smaller, aligned accesses could be even | |||
2072 | // slower and would certainly be a lot more code. | |||
2073 | if (Subtarget.is64Bit() && Size >= 8) | |||
2074 | return MVT::i64; | |||
2075 | return MVT::i32; | |||
2076 | } | |||
2077 | ||||
2078 | bool X86TargetLowering::isSafeMemOpType(MVT VT) const { | |||
2079 | if (VT == MVT::f32) | |||
2080 | return X86ScalarSSEf32; | |||
2081 | else if (VT == MVT::f64) | |||
2082 | return X86ScalarSSEf64; | |||
2083 | return true; | |||
2084 | } | |||
2085 | ||||
2086 | bool | |||
2087 | X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, | |||
2088 | unsigned, | |||
2089 | unsigned, | |||
2090 | bool *Fast) const { | |||
2091 | if (Fast) { | |||
2092 | switch (VT.getSizeInBits()) { | |||
2093 | default: | |||
2094 | // 8-byte and under are always assumed to be fast. | |||
2095 | *Fast = true; | |||
2096 | break; | |||
2097 | case 128: | |||
2098 | *Fast = !Subtarget.isUnalignedMem16Slow(); | |||
2099 | break; | |||
2100 | case 256: | |||
2101 | *Fast = !Subtarget.isUnalignedMem32Slow(); | |||
2102 | break; | |||
2103 | // TODO: What about AVX-512 (512-bit) accesses? | |||
2104 | } | |||
2105 | } | |||
2106 | // Misaligned accesses of any size are always allowed. | |||
2107 | return true; | |||
2108 | } | |||
2109 | ||||
2110 | /// Return the entry encoding for a jump table in the | |||
2111 | /// current function. The returned value is a member of the | |||
2112 | /// MachineJumpTableInfo::JTEntryKind enum. | |||
2113 | unsigned X86TargetLowering::getJumpTableEncoding() const { | |||
2114 | // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF | |||
2115 | // symbol. | |||
2116 | if (isPositionIndependent() && Subtarget.isPICStyleGOT()) | |||
2117 | return MachineJumpTableInfo::EK_Custom32; | |||
2118 | ||||
2119 | // Otherwise, use the normal jump table encoding heuristics. | |||
2120 | return TargetLowering::getJumpTableEncoding(); | |||
2121 | } | |||
2122 | ||||
2123 | bool X86TargetLowering::useSoftFloat() const { | |||
2124 | return Subtarget.useSoftFloat(); | |||
2125 | } | |||
2126 | ||||
2127 | void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC, | |||
2128 | ArgListTy &Args) const { | |||
2129 | ||||
2130 | // Only relabel X86-32 for C / Stdcall CCs. | |||
2131 | if (Subtarget.is64Bit()) | |||
2132 | return; | |||
2133 | if (CC != CallingConv::C && CC != CallingConv::X86_StdCall) | |||
2134 | return; | |||
2135 | unsigned ParamRegs = 0; | |||
2136 | if (auto *M = MF->getFunction().getParent()) | |||
2137 | ParamRegs = M->getNumberRegisterParameters(); | |||
2138 | ||||
2139 | // Mark the first N int arguments as having reg | |||
2140 | for (unsigned Idx = 0; Idx < Args.size(); Idx++) { | |||
2141 | Type *T = Args[Idx].Ty; | |||
2142 | if (T->isIntOrPtrTy()) | |||
2143 | if (MF->getDataLayout().getTypeAllocSize(T) <= 8) { | |||
2144 | unsigned numRegs = 1; | |||
2145 | if (MF->getDataLayout().getTypeAllocSize(T) > 4) | |||
2146 | numRegs = 2; | |||
2147 | if (ParamRegs < numRegs) | |||
2148 | return; | |||
2149 | ParamRegs -= numRegs; | |||
2150 | Args[Idx].IsInReg = true; | |||
2151 | } | |||
2152 | } | |||
2153 | } | |||
2154 | ||||
2155 | const MCExpr * | |||
2156 | X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, | |||
2157 | const MachineBasicBlock *MBB, | |||
2158 | unsigned uid,MCContext &Ctx) const{ | |||
2159 | assert(isPositionIndependent() && Subtarget.isPICStyleGOT())((isPositionIndependent() && Subtarget.isPICStyleGOT( )) ? static_cast<void> (0) : __assert_fail ("isPositionIndependent() && Subtarget.isPICStyleGOT()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2159, __PRETTY_FUNCTION__)); | |||
2160 | // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF | |||
2161 | // entries. | |||
2162 | return MCSymbolRefExpr::create(MBB->getSymbol(), | |||
2163 | MCSymbolRefExpr::VK_GOTOFF, Ctx); | |||
2164 | } | |||
2165 | ||||
2166 | /// Returns relocation base for the given PIC jumptable. | |||
2167 | SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table, | |||
2168 | SelectionDAG &DAG) const { | |||
2169 | if (!Subtarget.is64Bit()) | |||
2170 | // This doesn't have SDLoc associated with it, but is not really the | |||
2171 | // same as a Register. | |||
2172 | return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), | |||
2173 | getPointerTy(DAG.getDataLayout())); | |||
2174 | return Table; | |||
2175 | } | |||
2176 | ||||
2177 | /// This returns the relocation base for the given PIC jumptable, | |||
2178 | /// the same as getPICJumpTableRelocBase, but as an MCExpr. | |||
2179 | const MCExpr *X86TargetLowering:: | |||
2180 | getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI, | |||
2181 | MCContext &Ctx) const { | |||
2182 | // X86-64 uses RIP relative addressing based on the jump table label. | |||
2183 | if (Subtarget.isPICStyleRIPRel()) | |||
2184 | return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx); | |||
2185 | ||||
2186 | // Otherwise, the reference is relative to the PIC base. | |||
2187 | return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx); | |||
2188 | } | |||
2189 | ||||
2190 | std::pair<const TargetRegisterClass *, uint8_t> | |||
2191 | X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI, | |||
2192 | MVT VT) const { | |||
2193 | const TargetRegisterClass *RRC = nullptr; | |||
2194 | uint8_t Cost = 1; | |||
2195 | switch (VT.SimpleTy) { | |||
2196 | default: | |||
2197 | return TargetLowering::findRepresentativeClass(TRI, VT); | |||
2198 | case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: | |||
2199 | RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass; | |||
2200 | break; | |||
2201 | case MVT::x86mmx: | |||
2202 | RRC = &X86::VR64RegClass; | |||
2203 | break; | |||
2204 | case MVT::f32: case MVT::f64: | |||
2205 | case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: | |||
2206 | case MVT::v4f32: case MVT::v2f64: | |||
2207 | case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64: | |||
2208 | case MVT::v8f32: case MVT::v4f64: | |||
2209 | case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64: | |||
2210 | case MVT::v16f32: case MVT::v8f64: | |||
2211 | RRC = &X86::VR128XRegClass; | |||
2212 | break; | |||
2213 | } | |||
2214 | return std::make_pair(RRC, Cost); | |||
2215 | } | |||
2216 | ||||
2217 | unsigned X86TargetLowering::getAddressSpace() const { | |||
2218 | if (Subtarget.is64Bit()) | |||
2219 | return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257; | |||
2220 | return 256; | |||
2221 | } | |||
2222 | ||||
2223 | static bool hasStackGuardSlotTLS(const Triple &TargetTriple) { | |||
2224 | return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() || | |||
2225 | (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17)); | |||
2226 | } | |||
2227 | ||||
2228 | static Constant* SegmentOffset(IRBuilder<> &IRB, | |||
2229 | unsigned Offset, unsigned AddressSpace) { | |||
2230 | return ConstantExpr::getIntToPtr( | |||
2231 | ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset), | |||
2232 | Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace)); | |||
2233 | } | |||
2234 | ||||
2235 | Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const { | |||
2236 | // glibc, bionic, and Fuchsia have a special slot for the stack guard in | |||
2237 | // tcbhead_t; use it instead of the usual global variable (see | |||
2238 | // sysdeps/{i386,x86_64}/nptl/tls.h) | |||
2239 | if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) { | |||
2240 | if (Subtarget.isTargetFuchsia()) { | |||
2241 | // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value. | |||
2242 | return SegmentOffset(IRB, 0x10, getAddressSpace()); | |||
2243 | } else { | |||
2244 | // %fs:0x28, unless we're using a Kernel code model, in which case | |||
2245 | // it's %gs:0x28. gs:0x14 on i386. | |||
2246 | unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14; | |||
2247 | return SegmentOffset(IRB, Offset, getAddressSpace()); | |||
2248 | } | |||
2249 | } | |||
2250 | ||||
2251 | return TargetLowering::getIRStackGuard(IRB); | |||
2252 | } | |||
2253 | ||||
2254 | void X86TargetLowering::insertSSPDeclarations(Module &M) const { | |||
2255 | // MSVC CRT provides functionalities for stack protection. | |||
2256 | if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() || | |||
2257 | Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) { | |||
2258 | // MSVC CRT has a global variable holding security cookie. | |||
2259 | M.getOrInsertGlobal("__security_cookie", | |||
2260 | Type::getInt8PtrTy(M.getContext())); | |||
2261 | ||||
2262 | // MSVC CRT has a function to validate security cookie. | |||
2263 | auto *SecurityCheckCookie = cast<Function>( | |||
2264 | M.getOrInsertFunction("__security_check_cookie", | |||
2265 | Type::getVoidTy(M.getContext()), | |||
2266 | Type::getInt8PtrTy(M.getContext()))); | |||
2267 | SecurityCheckCookie->setCallingConv(CallingConv::X86_FastCall); | |||
2268 | SecurityCheckCookie->addAttribute(1, Attribute::AttrKind::InReg); | |||
2269 | return; | |||
2270 | } | |||
2271 | // glibc, bionic, and Fuchsia have a special slot for the stack guard. | |||
2272 | if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) | |||
2273 | return; | |||
2274 | TargetLowering::insertSSPDeclarations(M); | |||
2275 | } | |||
2276 | ||||
2277 | Value *X86TargetLowering::getSDagStackGuard(const Module &M) const { | |||
2278 | // MSVC CRT has a global variable holding security cookie. | |||
2279 | if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() || | |||
2280 | Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) { | |||
2281 | return M.getGlobalVariable("__security_cookie"); | |||
2282 | } | |||
2283 | return TargetLowering::getSDagStackGuard(M); | |||
2284 | } | |||
2285 | ||||
2286 | Value *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const { | |||
2287 | // MSVC CRT has a function to validate security cookie. | |||
2288 | if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() || | |||
2289 | Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) { | |||
2290 | return M.getFunction("__security_check_cookie"); | |||
2291 | } | |||
2292 | return TargetLowering::getSSPStackGuardCheck(M); | |||
2293 | } | |||
2294 | ||||
2295 | Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const { | |||
2296 | if (Subtarget.getTargetTriple().isOSContiki()) | |||
2297 | return getDefaultSafeStackPointerLocation(IRB, false); | |||
2298 | ||||
2299 | // Android provides a fixed TLS slot for the SafeStack pointer. See the | |||
2300 | // definition of TLS_SLOT_SAFESTACK in | |||
2301 | // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h | |||
2302 | if (Subtarget.isTargetAndroid()) { | |||
2303 | // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs: | |||
2304 | // %gs:0x24 on i386 | |||
2305 | unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24; | |||
2306 | return SegmentOffset(IRB, Offset, getAddressSpace()); | |||
2307 | } | |||
2308 | ||||
2309 | // Fuchsia is similar. | |||
2310 | if (Subtarget.isTargetFuchsia()) { | |||
2311 | // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value. | |||
2312 | return SegmentOffset(IRB, 0x18, getAddressSpace()); | |||
2313 | } | |||
2314 | ||||
2315 | return TargetLowering::getSafeStackPointerLocation(IRB); | |||
2316 | } | |||
2317 | ||||
2318 | bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS, | |||
2319 | unsigned DestAS) const { | |||
2320 | assert(SrcAS != DestAS && "Expected different address spaces!")((SrcAS != DestAS && "Expected different address spaces!" ) ? static_cast<void> (0) : __assert_fail ("SrcAS != DestAS && \"Expected different address spaces!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2320, __PRETTY_FUNCTION__)); | |||
2321 | ||||
2322 | return SrcAS < 256 && DestAS < 256; | |||
2323 | } | |||
2324 | ||||
2325 | //===----------------------------------------------------------------------===// | |||
2326 | // Return Value Calling Convention Implementation | |||
2327 | //===----------------------------------------------------------------------===// | |||
2328 | ||||
2329 | #include "X86GenCallingConv.inc" | |||
2330 | ||||
2331 | bool X86TargetLowering::CanLowerReturn( | |||
2332 | CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, | |||
2333 | const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { | |||
2334 | SmallVector<CCValAssign, 16> RVLocs; | |||
2335 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); | |||
2336 | return CCInfo.CheckReturn(Outs, RetCC_X86); | |||
2337 | } | |||
2338 | ||||
2339 | const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const { | |||
2340 | static const MCPhysReg ScratchRegs[] = { X86::R11, 0 }; | |||
2341 | return ScratchRegs; | |||
2342 | } | |||
2343 | ||||
2344 | /// Lowers masks values (v*i1) to the local register values | |||
2345 | /// \returns DAG node after lowering to register type | |||
2346 | static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc, | |||
2347 | const SDLoc &Dl, SelectionDAG &DAG) { | |||
2348 | EVT ValVT = ValArg.getValueType(); | |||
2349 | ||||
2350 | if (ValVT == MVT::v1i1) | |||
2351 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg, | |||
2352 | DAG.getIntPtrConstant(0, Dl)); | |||
2353 | ||||
2354 | if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) || | |||
2355 | (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) { | |||
2356 | // Two stage lowering might be required | |||
2357 | // bitcast: v8i1 -> i8 / v16i1 -> i16 | |||
2358 | // anyextend: i8 -> i32 / i16 -> i32 | |||
2359 | EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16; | |||
2360 | SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg); | |||
2361 | if (ValLoc == MVT::i32) | |||
2362 | ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy); | |||
2363 | return ValToCopy; | |||
2364 | } | |||
2365 | ||||
2366 | if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) || | |||
2367 | (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) { | |||
2368 | // One stage lowering is required | |||
2369 | // bitcast: v32i1 -> i32 / v64i1 -> i64 | |||
2370 | return DAG.getBitcast(ValLoc, ValArg); | |||
2371 | } | |||
2372 | ||||
2373 | return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg); | |||
2374 | } | |||
2375 | ||||
2376 | /// Breaks v64i1 value into two registers and adds the new node to the DAG | |||
2377 | static void Passv64i1ArgInRegs( | |||
2378 | const SDLoc &Dl, SelectionDAG &DAG, SDValue Chain, SDValue &Arg, | |||
2379 | SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, CCValAssign &VA, | |||
2380 | CCValAssign &NextVA, const X86Subtarget &Subtarget) { | |||
2381 | assert(Subtarget.hasBWI() && "Expected AVX512BW target!")((Subtarget.hasBWI() && "Expected AVX512BW target!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected AVX512BW target!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2381, __PRETTY_FUNCTION__)); | |||
2382 | assert(Subtarget.is32Bit() && "Expecting 32 bit target")((Subtarget.is32Bit() && "Expecting 32 bit target") ? static_cast<void> (0) : __assert_fail ("Subtarget.is32Bit() && \"Expecting 32 bit target\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2382, __PRETTY_FUNCTION__)); | |||
2383 | assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value")((Arg.getValueType() == MVT::i64 && "Expecting 64 bit value" ) ? static_cast<void> (0) : __assert_fail ("Arg.getValueType() == MVT::i64 && \"Expecting 64 bit value\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2383, __PRETTY_FUNCTION__)); | |||
2384 | assert(VA.isRegLoc() && NextVA.isRegLoc() &&((VA.isRegLoc() && NextVA.isRegLoc() && "The value should reside in two registers" ) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && NextVA.isRegLoc() && \"The value should reside in two registers\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2385, __PRETTY_FUNCTION__)) | |||
2385 | "The value should reside in two registers")((VA.isRegLoc() && NextVA.isRegLoc() && "The value should reside in two registers" ) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && NextVA.isRegLoc() && \"The value should reside in two registers\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2385, __PRETTY_FUNCTION__)); | |||
2386 | ||||
2387 | // Before splitting the value we cast it to i64 | |||
2388 | Arg = DAG.getBitcast(MVT::i64, Arg); | |||
2389 | ||||
2390 | // Splitting the value into two i32 types | |||
2391 | SDValue Lo, Hi; | |||
2392 | Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg, | |||
2393 | DAG.getConstant(0, Dl, MVT::i32)); | |||
2394 | Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg, | |||
2395 | DAG.getConstant(1, Dl, MVT::i32)); | |||
2396 | ||||
2397 | // Attach the two i32 types into corresponding registers | |||
2398 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo)); | |||
2399 | RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi)); | |||
2400 | } | |||
2401 | ||||
2402 | SDValue | |||
2403 | X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, | |||
2404 | bool isVarArg, | |||
2405 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
2406 | const SmallVectorImpl<SDValue> &OutVals, | |||
2407 | const SDLoc &dl, SelectionDAG &DAG) const { | |||
2408 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2409 | X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); | |||
2410 | ||||
2411 | // In some cases we need to disable registers from the default CSR list. | |||
2412 | // For example, when they are used for argument passing. | |||
2413 | bool ShouldDisableCalleeSavedRegister = | |||
2414 | CallConv == CallingConv::X86_RegCall || | |||
2415 | MF.getFunction().hasFnAttribute("no_caller_saved_registers"); | |||
2416 | ||||
2417 | if (CallConv == CallingConv::X86_INTR && !Outs.empty()) | |||
2418 | report_fatal_error("X86 interrupts may not return any value"); | |||
2419 | ||||
2420 | SmallVector<CCValAssign, 16> RVLocs; | |||
2421 | CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext()); | |||
2422 | CCInfo.AnalyzeReturn(Outs, RetCC_X86); | |||
2423 | ||||
2424 | SDValue Flag; | |||
2425 | SmallVector<SDValue, 6> RetOps; | |||
2426 | RetOps.push_back(Chain); // Operand #0 = Chain (updated below) | |||
2427 | // Operand #1 = Bytes To Pop | |||
2428 | RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl, | |||
2429 | MVT::i32)); | |||
2430 | ||||
2431 | // Copy the result values into the output registers. | |||
2432 | for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E; | |||
2433 | ++I, ++OutsIndex) { | |||
2434 | CCValAssign &VA = RVLocs[I]; | |||
2435 | assert(VA.isRegLoc() && "Can only return in registers!")((VA.isRegLoc() && "Can only return in registers!") ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && \"Can only return in registers!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2435, __PRETTY_FUNCTION__)); | |||
2436 | ||||
2437 | // Add the register to the CalleeSaveDisableRegs list. | |||
2438 | if (ShouldDisableCalleeSavedRegister) | |||
2439 | MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg()); | |||
2440 | ||||
2441 | SDValue ValToCopy = OutVals[OutsIndex]; | |||
2442 | EVT ValVT = ValToCopy.getValueType(); | |||
2443 | ||||
2444 | // Promote values to the appropriate types. | |||
2445 | if (VA.getLocInfo() == CCValAssign::SExt) | |||
2446 | ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy); | |||
2447 | else if (VA.getLocInfo() == CCValAssign::ZExt) | |||
2448 | ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy); | |||
2449 | else if (VA.getLocInfo() == CCValAssign::AExt) { | |||
2450 | if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1) | |||
2451 | ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG); | |||
2452 | else | |||
2453 | ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy); | |||
2454 | } | |||
2455 | else if (VA.getLocInfo() == CCValAssign::BCvt) | |||
2456 | ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy); | |||
2457 | ||||
2458 | assert(VA.getLocInfo() != CCValAssign::FPExt &&((VA.getLocInfo() != CCValAssign::FPExt && "Unexpected FP-extend for return value." ) ? static_cast<void> (0) : __assert_fail ("VA.getLocInfo() != CCValAssign::FPExt && \"Unexpected FP-extend for return value.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2459, __PRETTY_FUNCTION__)) | |||
2459 | "Unexpected FP-extend for return value.")((VA.getLocInfo() != CCValAssign::FPExt && "Unexpected FP-extend for return value." ) ? static_cast<void> (0) : __assert_fail ("VA.getLocInfo() != CCValAssign::FPExt && \"Unexpected FP-extend for return value.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2459, __PRETTY_FUNCTION__)); | |||
2460 | ||||
2461 | // If this is x86-64, and we disabled SSE, we can't return FP values, | |||
2462 | // or SSE or MMX vectors. | |||
2463 | if ((ValVT == MVT::f32 || ValVT == MVT::f64 || | |||
2464 | VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) && | |||
2465 | (Subtarget.is64Bit() && !Subtarget.hasSSE1())) { | |||
2466 | errorUnsupported(DAG, dl, "SSE register return with SSE disabled"); | |||
2467 | VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts. | |||
2468 | } else if (ValVT == MVT::f64 && | |||
2469 | (Subtarget.is64Bit() && !Subtarget.hasSSE2())) { | |||
2470 | // Likewise we can't return F64 values with SSE1 only. gcc does so, but | |||
2471 | // llvm-gcc has never done it right and no one has noticed, so this | |||
2472 | // should be OK for now. | |||
2473 | errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled"); | |||
2474 | VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts. | |||
2475 | } | |||
2476 | ||||
2477 | // Returns in ST0/ST1 are handled specially: these are pushed as operands to | |||
2478 | // the RET instruction and handled by the FP Stackifier. | |||
2479 | if (VA.getLocReg() == X86::FP0 || | |||
2480 | VA.getLocReg() == X86::FP1) { | |||
2481 | // If this is a copy from an xmm register to ST(0), use an FPExtend to | |||
2482 | // change the value to the FP stack register class. | |||
2483 | if (isScalarFPTypeInSSEReg(VA.getValVT())) | |||
2484 | ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy); | |||
2485 | RetOps.push_back(ValToCopy); | |||
2486 | // Don't emit a copytoreg. | |||
2487 | continue; | |||
2488 | } | |||
2489 | ||||
2490 | // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 | |||
2491 | // which is returned in RAX / RDX. | |||
2492 | if (Subtarget.is64Bit()) { | |||
2493 | if (ValVT == MVT::x86mmx) { | |||
2494 | if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { | |||
2495 | ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy); | |||
2496 | ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, | |||
2497 | ValToCopy); | |||
2498 | // If we don't have SSE2 available, convert to v4f32 so the generated | |||
2499 | // register is legal. | |||
2500 | if (!Subtarget.hasSSE2()) | |||
2501 | ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy); | |||
2502 | } | |||
2503 | } | |||
2504 | } | |||
2505 | ||||
2506 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; | |||
2507 | ||||
2508 | if (VA.needsCustom()) { | |||
2509 | assert(VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2510, __PRETTY_FUNCTION__)) | |||
2510 | "Currently the only custom case is when we split v64i1 to 2 regs")((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2510, __PRETTY_FUNCTION__)); | |||
2511 | ||||
2512 | Passv64i1ArgInRegs(dl, DAG, Chain, ValToCopy, RegsToPass, VA, RVLocs[++I], | |||
2513 | Subtarget); | |||
2514 | ||||
2515 | assert(2 == RegsToPass.size() &&((2 == RegsToPass.size() && "Expecting two registers after Pass64BitArgInRegs" ) ? static_cast<void> (0) : __assert_fail ("2 == RegsToPass.size() && \"Expecting two registers after Pass64BitArgInRegs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2516, __PRETTY_FUNCTION__)) | |||
2516 | "Expecting two registers after Pass64BitArgInRegs")((2 == RegsToPass.size() && "Expecting two registers after Pass64BitArgInRegs" ) ? static_cast<void> (0) : __assert_fail ("2 == RegsToPass.size() && \"Expecting two registers after Pass64BitArgInRegs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2516, __PRETTY_FUNCTION__)); | |||
2517 | ||||
2518 | // Add the second register to the CalleeSaveDisableRegs list. | |||
2519 | if (ShouldDisableCalleeSavedRegister) | |||
2520 | MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg()); | |||
2521 | } else { | |||
2522 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), ValToCopy)); | |||
2523 | } | |||
2524 | ||||
2525 | // Add nodes to the DAG and add the values into the RetOps list | |||
2526 | for (auto &Reg : RegsToPass) { | |||
2527 | Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, Flag); | |||
2528 | Flag = Chain.getValue(1); | |||
2529 | RetOps.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); | |||
2530 | } | |||
2531 | } | |||
2532 | ||||
2533 | // Swift calling convention does not require we copy the sret argument | |||
2534 | // into %rax/%eax for the return, and SRetReturnReg is not set for Swift. | |||
2535 | ||||
2536 | // All x86 ABIs require that for returning structs by value we copy | |||
2537 | // the sret argument into %rax/%eax (depending on ABI) for the return. | |||
2538 | // We saved the argument into a virtual register in the entry block, | |||
2539 | // so now we copy the value out and into %rax/%eax. | |||
2540 | // | |||
2541 | // Checking Function.hasStructRetAttr() here is insufficient because the IR | |||
2542 | // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is | |||
2543 | // false, then an sret argument may be implicitly inserted in the SelDAG. In | |||
2544 | // either case FuncInfo->setSRetReturnReg() will have been called. | |||
2545 | if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) { | |||
2546 | // When we have both sret and another return value, we should use the | |||
2547 | // original Chain stored in RetOps[0], instead of the current Chain updated | |||
2548 | // in the above loop. If we only have sret, RetOps[0] equals to Chain. | |||
2549 | ||||
2550 | // For the case of sret and another return value, we have | |||
2551 | // Chain_0 at the function entry | |||
2552 | // Chain_1 = getCopyToReg(Chain_0) in the above loop | |||
2553 | // If we use Chain_1 in getCopyFromReg, we will have | |||
2554 | // Val = getCopyFromReg(Chain_1) | |||
2555 | // Chain_2 = getCopyToReg(Chain_1, Val) from below | |||
2556 | ||||
2557 | // getCopyToReg(Chain_0) will be glued together with | |||
2558 | // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be | |||
2559 | // in Unit B, and we will have cyclic dependency between Unit A and Unit B: | |||
2560 | // Data dependency from Unit B to Unit A due to usage of Val in | |||
2561 | // getCopyToReg(Chain_1, Val) | |||
2562 | // Chain dependency from Unit A to Unit B | |||
2563 | ||||
2564 | // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg. | |||
2565 | SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg, | |||
2566 | getPointerTy(MF.getDataLayout())); | |||
2567 | ||||
2568 | unsigned RetValReg | |||
2569 | = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ? | |||
2570 | X86::RAX : X86::EAX; | |||
2571 | Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag); | |||
2572 | Flag = Chain.getValue(1); | |||
2573 | ||||
2574 | // RAX/EAX now acts like a return value. | |||
2575 | RetOps.push_back( | |||
2576 | DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout()))); | |||
2577 | ||||
2578 | // Add the returned register to the CalleeSaveDisableRegs list. | |||
2579 | if (ShouldDisableCalleeSavedRegister) | |||
2580 | MF.getRegInfo().disableCalleeSavedRegister(RetValReg); | |||
2581 | } | |||
2582 | ||||
2583 | const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
2584 | const MCPhysReg *I = | |||
2585 | TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); | |||
2586 | if (I) { | |||
2587 | for (; *I; ++I) { | |||
2588 | if (X86::GR64RegClass.contains(*I)) | |||
2589 | RetOps.push_back(DAG.getRegister(*I, MVT::i64)); | |||
2590 | else | |||
2591 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2591); | |||
2592 | } | |||
2593 | } | |||
2594 | ||||
2595 | RetOps[0] = Chain; // Update chain. | |||
2596 | ||||
2597 | // Add the flag if we have it. | |||
2598 | if (Flag.getNode()) | |||
2599 | RetOps.push_back(Flag); | |||
2600 | ||||
2601 | X86ISD::NodeType opcode = X86ISD::RET_FLAG; | |||
2602 | if (CallConv == CallingConv::X86_INTR) | |||
2603 | opcode = X86ISD::IRET; | |||
2604 | return DAG.getNode(opcode, dl, MVT::Other, RetOps); | |||
2605 | } | |||
2606 | ||||
2607 | bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const { | |||
2608 | if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0)) | |||
2609 | return false; | |||
2610 | ||||
2611 | SDValue TCChain = Chain; | |||
2612 | SDNode *Copy = *N->use_begin(); | |||
2613 | if (Copy->getOpcode() == ISD::CopyToReg) { | |||
2614 | // If the copy has a glue operand, we conservatively assume it isn't safe to | |||
2615 | // perform a tail call. | |||
2616 | if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue) | |||
2617 | return false; | |||
2618 | TCChain = Copy->getOperand(0); | |||
2619 | } else if (Copy->getOpcode() != ISD::FP_EXTEND) | |||
2620 | return false; | |||
2621 | ||||
2622 | bool HasRet = false; | |||
2623 | for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end(); | |||
2624 | UI != UE; ++UI) { | |||
2625 | if (UI->getOpcode() != X86ISD::RET_FLAG) | |||
2626 | return false; | |||
2627 | // If we are returning more than one value, we can definitely | |||
2628 | // not make a tail call see PR19530 | |||
2629 | if (UI->getNumOperands() > 4) | |||
2630 | return false; | |||
2631 | if (UI->getNumOperands() == 4 && | |||
2632 | UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue) | |||
2633 | return false; | |||
2634 | HasRet = true; | |||
2635 | } | |||
2636 | ||||
2637 | if (!HasRet) | |||
2638 | return false; | |||
2639 | ||||
2640 | Chain = TCChain; | |||
2641 | return true; | |||
2642 | } | |||
2643 | ||||
2644 | EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT, | |||
2645 | ISD::NodeType ExtendKind) const { | |||
2646 | MVT ReturnMVT = MVT::i32; | |||
2647 | ||||
2648 | bool Darwin = Subtarget.getTargetTriple().isOSDarwin(); | |||
2649 | if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) { | |||
2650 | // The ABI does not require i1, i8 or i16 to be extended. | |||
2651 | // | |||
2652 | // On Darwin, there is code in the wild relying on Clang's old behaviour of | |||
2653 | // always extending i8/i16 return values, so keep doing that for now. | |||
2654 | // (PR26665). | |||
2655 | ReturnMVT = MVT::i8; | |||
2656 | } | |||
2657 | ||||
2658 | EVT MinVT = getRegisterType(Context, ReturnMVT); | |||
2659 | return VT.bitsLT(MinVT) ? MinVT : VT; | |||
2660 | } | |||
2661 | ||||
2662 | /// Reads two 32 bit registers and creates a 64 bit mask value. | |||
2663 | /// \param VA The current 32 bit value that need to be assigned. | |||
2664 | /// \param NextVA The next 32 bit value that need to be assigned. | |||
2665 | /// \param Root The parent DAG node. | |||
2666 | /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for | |||
2667 | /// glue purposes. In the case the DAG is already using | |||
2668 | /// physical register instead of virtual, we should glue | |||
2669 | /// our new SDValue to InFlag SDvalue. | |||
2670 | /// \return a new SDvalue of size 64bit. | |||
2671 | static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA, | |||
2672 | SDValue &Root, SelectionDAG &DAG, | |||
2673 | const SDLoc &Dl, const X86Subtarget &Subtarget, | |||
2674 | SDValue *InFlag = nullptr) { | |||
2675 | assert((Subtarget.hasBWI()) && "Expected AVX512BW target!")(((Subtarget.hasBWI()) && "Expected AVX512BW target!" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasBWI()) && \"Expected AVX512BW target!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2675, __PRETTY_FUNCTION__)); | |||
2676 | assert(Subtarget.is32Bit() && "Expecting 32 bit target")((Subtarget.is32Bit() && "Expecting 32 bit target") ? static_cast<void> (0) : __assert_fail ("Subtarget.is32Bit() && \"Expecting 32 bit target\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2676, __PRETTY_FUNCTION__)); | |||
2677 | assert(VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Expecting first location of 64 bit width type" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Expecting first location of 64 bit width type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2678, __PRETTY_FUNCTION__)) | |||
2678 | "Expecting first location of 64 bit width type")((VA.getValVT() == MVT::v64i1 && "Expecting first location of 64 bit width type" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Expecting first location of 64 bit width type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2678, __PRETTY_FUNCTION__)); | |||
2679 | assert(NextVA.getValVT() == VA.getValVT() &&((NextVA.getValVT() == VA.getValVT() && "The locations should have the same type" ) ? static_cast<void> (0) : __assert_fail ("NextVA.getValVT() == VA.getValVT() && \"The locations should have the same type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2680, __PRETTY_FUNCTION__)) | |||
2680 | "The locations should have the same type")((NextVA.getValVT() == VA.getValVT() && "The locations should have the same type" ) ? static_cast<void> (0) : __assert_fail ("NextVA.getValVT() == VA.getValVT() && \"The locations should have the same type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2680, __PRETTY_FUNCTION__)); | |||
2681 | assert(VA.isRegLoc() && NextVA.isRegLoc() &&((VA.isRegLoc() && NextVA.isRegLoc() && "The values should reside in two registers" ) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && NextVA.isRegLoc() && \"The values should reside in two registers\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2682, __PRETTY_FUNCTION__)) | |||
2682 | "The values should reside in two registers")((VA.isRegLoc() && NextVA.isRegLoc() && "The values should reside in two registers" ) ? static_cast<void> (0) : __assert_fail ("VA.isRegLoc() && NextVA.isRegLoc() && \"The values should reside in two registers\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2682, __PRETTY_FUNCTION__)); | |||
2683 | ||||
2684 | SDValue Lo, Hi; | |||
2685 | unsigned Reg; | |||
2686 | SDValue ArgValueLo, ArgValueHi; | |||
2687 | ||||
2688 | MachineFunction &MF = DAG.getMachineFunction(); | |||
2689 | const TargetRegisterClass *RC = &X86::GR32RegClass; | |||
2690 | ||||
2691 | // Read a 32 bit value from the registers. | |||
2692 | if (nullptr == InFlag) { | |||
2693 | // When no physical register is present, | |||
2694 | // create an intermediate virtual register. | |||
2695 | Reg = MF.addLiveIn(VA.getLocReg(), RC); | |||
2696 | ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32); | |||
2697 | Reg = MF.addLiveIn(NextVA.getLocReg(), RC); | |||
2698 | ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32); | |||
2699 | } else { | |||
2700 | // When a physical register is available read the value from it and glue | |||
2701 | // the reads together. | |||
2702 | ArgValueLo = | |||
2703 | DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag); | |||
2704 | *InFlag = ArgValueLo.getValue(2); | |||
2705 | ArgValueHi = | |||
2706 | DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag); | |||
2707 | *InFlag = ArgValueHi.getValue(2); | |||
2708 | } | |||
2709 | ||||
2710 | // Convert the i32 type into v32i1 type. | |||
2711 | Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo); | |||
2712 | ||||
2713 | // Convert the i32 type into v32i1 type. | |||
2714 | Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi); | |||
2715 | ||||
2716 | // Concatenate the two values together. | |||
2717 | return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi); | |||
2718 | } | |||
2719 | ||||
2720 | /// The function will lower a register of various sizes (8/16/32/64) | |||
2721 | /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1) | |||
2722 | /// \returns a DAG node contains the operand after lowering to mask type. | |||
2723 | static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT, | |||
2724 | const EVT &ValLoc, const SDLoc &Dl, | |||
2725 | SelectionDAG &DAG) { | |||
2726 | SDValue ValReturned = ValArg; | |||
2727 | ||||
2728 | if (ValVT == MVT::v1i1) | |||
2729 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned); | |||
2730 | ||||
2731 | if (ValVT == MVT::v64i1) { | |||
2732 | // In 32 bit machine, this case is handled by getv64i1Argument | |||
2733 | assert(ValLoc == MVT::i64 && "Expecting only i64 locations")((ValLoc == MVT::i64 && "Expecting only i64 locations" ) ? static_cast<void> (0) : __assert_fail ("ValLoc == MVT::i64 && \"Expecting only i64 locations\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2733, __PRETTY_FUNCTION__)); | |||
2734 | // In 64 bit machine, There is no need to truncate the value only bitcast | |||
2735 | } else { | |||
2736 | MVT maskLen; | |||
2737 | switch (ValVT.getSimpleVT().SimpleTy) { | |||
2738 | case MVT::v8i1: | |||
2739 | maskLen = MVT::i8; | |||
2740 | break; | |||
2741 | case MVT::v16i1: | |||
2742 | maskLen = MVT::i16; | |||
2743 | break; | |||
2744 | case MVT::v32i1: | |||
2745 | maskLen = MVT::i32; | |||
2746 | break; | |||
2747 | default: | |||
2748 | llvm_unreachable("Expecting a vector of i1 types")::llvm::llvm_unreachable_internal("Expecting a vector of i1 types" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2748); | |||
2749 | } | |||
2750 | ||||
2751 | ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned); | |||
2752 | } | |||
2753 | return DAG.getBitcast(ValVT, ValReturned); | |||
2754 | } | |||
2755 | ||||
2756 | /// Lower the result values of a call into the | |||
2757 | /// appropriate copies out of appropriate physical registers. | |||
2758 | /// | |||
2759 | SDValue X86TargetLowering::LowerCallResult( | |||
2760 | SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, | |||
2761 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, | |||
2762 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, | |||
2763 | uint32_t *RegMask) const { | |||
2764 | ||||
2765 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
2766 | // Assign locations to each value returned by this call. | |||
2767 | SmallVector<CCValAssign, 16> RVLocs; | |||
2768 | bool Is64Bit = Subtarget.is64Bit(); | |||
2769 | CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, | |||
2770 | *DAG.getContext()); | |||
2771 | CCInfo.AnalyzeCallResult(Ins, RetCC_X86); | |||
2772 | ||||
2773 | // Copy all of the result registers out of their specified physreg. | |||
2774 | for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E; | |||
2775 | ++I, ++InsIndex) { | |||
2776 | CCValAssign &VA = RVLocs[I]; | |||
2777 | EVT CopyVT = VA.getLocVT(); | |||
2778 | ||||
2779 | // In some calling conventions we need to remove the used registers | |||
2780 | // from the register mask. | |||
2781 | if (RegMask) { | |||
2782 | for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true); | |||
2783 | SubRegs.isValid(); ++SubRegs) | |||
2784 | RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32)); | |||
2785 | } | |||
2786 | ||||
2787 | // If this is x86-64, and we disabled SSE, we can't return FP values | |||
2788 | if ((CopyVT == MVT::f32 || CopyVT == MVT::f64 || CopyVT == MVT::f128) && | |||
2789 | ((Is64Bit || Ins[InsIndex].Flags.isInReg()) && !Subtarget.hasSSE1())) { | |||
2790 | errorUnsupported(DAG, dl, "SSE register return with SSE disabled"); | |||
2791 | VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts. | |||
2792 | } | |||
2793 | ||||
2794 | // If we prefer to use the value in xmm registers, copy it out as f80 and | |||
2795 | // use a truncate to move it from fp stack reg to xmm reg. | |||
2796 | bool RoundAfterCopy = false; | |||
2797 | if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) && | |||
2798 | isScalarFPTypeInSSEReg(VA.getValVT())) { | |||
2799 | if (!Subtarget.hasX87()) | |||
2800 | report_fatal_error("X87 register return with X87 disabled"); | |||
2801 | CopyVT = MVT::f80; | |||
2802 | RoundAfterCopy = (CopyVT != VA.getLocVT()); | |||
2803 | } | |||
2804 | ||||
2805 | SDValue Val; | |||
2806 | if (VA.needsCustom()) { | |||
2807 | assert(VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2808, __PRETTY_FUNCTION__)) | |||
2808 | "Currently the only custom case is when we split v64i1 to 2 regs")((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 2808, __PRETTY_FUNCTION__)); | |||
2809 | Val = | |||
2810 | getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag); | |||
2811 | } else { | |||
2812 | Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag) | |||
2813 | .getValue(1); | |||
2814 | Val = Chain.getValue(0); | |||
2815 | InFlag = Chain.getValue(2); | |||
2816 | } | |||
2817 | ||||
2818 | if (RoundAfterCopy) | |||
2819 | Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val, | |||
2820 | // This truncation won't change the value. | |||
2821 | DAG.getIntPtrConstant(1, dl)); | |||
2822 | ||||
2823 | if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) { | |||
2824 | if (VA.getValVT().isVector() && | |||
2825 | ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) || | |||
2826 | (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) { | |||
2827 | // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8 | |||
2828 | Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG); | |||
2829 | } else | |||
2830 | Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val); | |||
2831 | } | |||
2832 | ||||
2833 | InVals.push_back(Val); | |||
2834 | } | |||
2835 | ||||
2836 | return Chain; | |||
2837 | } | |||
2838 | ||||
2839 | //===----------------------------------------------------------------------===// | |||
2840 | // C & StdCall & Fast Calling Convention implementation | |||
2841 | //===----------------------------------------------------------------------===// | |||
2842 | // StdCall calling convention seems to be standard for many Windows' API | |||
2843 | // routines and around. It differs from C calling convention just a little: | |||
2844 | // callee should clean up the stack, not caller. Symbols should be also | |||
2845 | // decorated in some fancy way :) It doesn't support any vector arguments. | |||
2846 | // For info on fast calling convention see Fast Calling Convention (tail call) | |||
2847 | // implementation LowerX86_32FastCCCallTo. | |||
2848 | ||||
2849 | /// CallIsStructReturn - Determines whether a call uses struct return | |||
2850 | /// semantics. | |||
2851 | enum StructReturnType { | |||
2852 | NotStructReturn, | |||
2853 | RegStructReturn, | |||
2854 | StackStructReturn | |||
2855 | }; | |||
2856 | static StructReturnType | |||
2857 | callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) { | |||
2858 | if (Outs.empty()) | |||
2859 | return NotStructReturn; | |||
2860 | ||||
2861 | const ISD::ArgFlagsTy &Flags = Outs[0].Flags; | |||
2862 | if (!Flags.isSRet()) | |||
2863 | return NotStructReturn; | |||
2864 | if (Flags.isInReg() || IsMCU) | |||
2865 | return RegStructReturn; | |||
2866 | return StackStructReturn; | |||
2867 | } | |||
2868 | ||||
2869 | /// Determines whether a function uses struct return semantics. | |||
2870 | static StructReturnType | |||
2871 | argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) { | |||
2872 | if (Ins.empty()) | |||
2873 | return NotStructReturn; | |||
2874 | ||||
2875 | const ISD::ArgFlagsTy &Flags = Ins[0].Flags; | |||
2876 | if (!Flags.isSRet()) | |||
2877 | return NotStructReturn; | |||
2878 | if (Flags.isInReg() || IsMCU) | |||
2879 | return RegStructReturn; | |||
2880 | return StackStructReturn; | |||
2881 | } | |||
2882 | ||||
2883 | /// Make a copy of an aggregate at address specified by "Src" to address | |||
2884 | /// "Dst" with size and alignment information specified by the specific | |||
2885 | /// parameter attribute. The copy will be passed as a byval function parameter. | |||
2886 | static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst, | |||
2887 | SDValue Chain, ISD::ArgFlagsTy Flags, | |||
2888 | SelectionDAG &DAG, const SDLoc &dl) { | |||
2889 | SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32); | |||
2890 | ||||
2891 | return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(), | |||
2892 | /*isVolatile*/false, /*AlwaysInline=*/true, | |||
2893 | /*isTailCall*/false, | |||
2894 | MachinePointerInfo(), MachinePointerInfo()); | |||
2895 | } | |||
2896 | ||||
2897 | /// Return true if the calling convention is one that we can guarantee TCO for. | |||
2898 | static bool canGuaranteeTCO(CallingConv::ID CC) { | |||
2899 | return (CC == CallingConv::Fast || CC == CallingConv::GHC || | |||
2900 | CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE || | |||
2901 | CC == CallingConv::HHVM); | |||
2902 | } | |||
2903 | ||||
2904 | /// Return true if we might ever do TCO for calls with this calling convention. | |||
2905 | static bool mayTailCallThisCC(CallingConv::ID CC) { | |||
2906 | switch (CC) { | |||
2907 | // C calling conventions: | |||
2908 | case CallingConv::C: | |||
2909 | case CallingConv::Win64: | |||
2910 | case CallingConv::X86_64_SysV: | |||
2911 | // Callee pop conventions: | |||
2912 | case CallingConv::X86_ThisCall: | |||
2913 | case CallingConv::X86_StdCall: | |||
2914 | case CallingConv::X86_VectorCall: | |||
2915 | case CallingConv::X86_FastCall: | |||
2916 | return true; | |||
2917 | default: | |||
2918 | return canGuaranteeTCO(CC); | |||
2919 | } | |||
2920 | } | |||
2921 | ||||
2922 | /// Return true if the function is being made into a tailcall target by | |||
2923 | /// changing its ABI. | |||
2924 | static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) { | |||
2925 | return GuaranteedTailCallOpt && canGuaranteeTCO(CC); | |||
2926 | } | |||
2927 | ||||
2928 | bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { | |||
2929 | auto Attr = | |||
2930 | CI->getParent()->getParent()->getFnAttribute("disable-tail-calls"); | |||
2931 | if (!CI->isTailCall() || Attr.getValueAsString() == "true") | |||
2932 | return false; | |||
2933 | ||||
2934 | ImmutableCallSite CS(CI); | |||
2935 | CallingConv::ID CalleeCC = CS.getCallingConv(); | |||
2936 | if (!mayTailCallThisCC(CalleeCC)) | |||
2937 | return false; | |||
2938 | ||||
2939 | return true; | |||
2940 | } | |||
2941 | ||||
2942 | SDValue | |||
2943 | X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv, | |||
2944 | const SmallVectorImpl<ISD::InputArg> &Ins, | |||
2945 | const SDLoc &dl, SelectionDAG &DAG, | |||
2946 | const CCValAssign &VA, | |||
2947 | MachineFrameInfo &MFI, unsigned i) const { | |||
2948 | // Create the nodes corresponding to a load from this parameter slot. | |||
2949 | ISD::ArgFlagsTy Flags = Ins[i].Flags; | |||
2950 | bool AlwaysUseMutable = shouldGuaranteeTCO( | |||
2951 | CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt); | |||
2952 | bool isImmutable = !AlwaysUseMutable && !Flags.isByVal(); | |||
2953 | EVT ValVT; | |||
2954 | MVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
2955 | ||||
2956 | // If value is passed by pointer we have address passed instead of the value | |||
2957 | // itself. No need to extend if the mask value and location share the same | |||
2958 | // absolute size. | |||
2959 | bool ExtendedInMem = | |||
2960 | VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 && | |||
2961 | VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits(); | |||
2962 | ||||
2963 | if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem) | |||
2964 | ValVT = VA.getLocVT(); | |||
2965 | else | |||
2966 | ValVT = VA.getValVT(); | |||
2967 | ||||
2968 | // Calculate SP offset of interrupt parameter, re-arrange the slot normally | |||
2969 | // taken by a return address. | |||
2970 | int Offset = 0; | |||
2971 | if (CallConv == CallingConv::X86_INTR) { | |||
2972 | // X86 interrupts may take one or two arguments. | |||
2973 | // On the stack there will be no return address as in regular call. | |||
2974 | // Offset of last argument need to be set to -4/-8 bytes. | |||
2975 | // Where offset of the first argument out of two, should be set to 0 bytes. | |||
2976 | Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - 1); | |||
2977 | if (Subtarget.is64Bit() && Ins.size() == 2) { | |||
2978 | // The stack pointer needs to be realigned for 64 bit handlers with error | |||
2979 | // code, so the argument offset changes by 8 bytes. | |||
2980 | Offset += 8; | |||
2981 | } | |||
2982 | } | |||
2983 | ||||
2984 | // FIXME: For now, all byval parameter objects are marked mutable. This can be | |||
2985 | // changed with more analysis. | |||
2986 | // In case of tail call optimization mark all arguments mutable. Since they | |||
2987 | // could be overwritten by lowering of arguments in case of a tail call. | |||
2988 | if (Flags.isByVal()) { | |||
2989 | unsigned Bytes = Flags.getByValSize(); | |||
2990 | if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects. | |||
2991 | ||||
2992 | // FIXME: For now, all byval parameter objects are marked as aliasing. This | |||
2993 | // can be improved with deeper analysis. | |||
2994 | int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable, | |||
2995 | /*isAliased=*/true); | |||
2996 | // Adjust SP offset of interrupt parameter. | |||
2997 | if (CallConv == CallingConv::X86_INTR) { | |||
2998 | MFI.setObjectOffset(FI, Offset); | |||
2999 | } | |||
3000 | return DAG.getFrameIndex(FI, PtrVT); | |||
3001 | } | |||
3002 | ||||
3003 | // This is an argument in memory. We might be able to perform copy elision. | |||
3004 | if (Flags.isCopyElisionCandidate()) { | |||
3005 | EVT ArgVT = Ins[i].ArgVT; | |||
3006 | SDValue PartAddr; | |||
3007 | if (Ins[i].PartOffset == 0) { | |||
3008 | // If this is a one-part value or the first part of a multi-part value, | |||
3009 | // create a stack object for the entire argument value type and return a | |||
3010 | // load from our portion of it. This assumes that if the first part of an | |||
3011 | // argument is in memory, the rest will also be in memory. | |||
3012 | int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(), | |||
3013 | /*Immutable=*/false); | |||
3014 | PartAddr = DAG.getFrameIndex(FI, PtrVT); | |||
3015 | return DAG.getLoad( | |||
3016 | ValVT, dl, Chain, PartAddr, | |||
3017 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); | |||
3018 | } else { | |||
3019 | // This is not the first piece of an argument in memory. See if there is | |||
3020 | // already a fixed stack object including this offset. If so, assume it | |||
3021 | // was created by the PartOffset == 0 branch above and create a load from | |||
3022 | // the appropriate offset into it. | |||
3023 | int64_t PartBegin = VA.getLocMemOffset(); | |||
3024 | int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8; | |||
3025 | int FI = MFI.getObjectIndexBegin(); | |||
3026 | for (; MFI.isFixedObjectIndex(FI); ++FI) { | |||
3027 | int64_t ObjBegin = MFI.getObjectOffset(FI); | |||
3028 | int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI); | |||
3029 | if (ObjBegin <= PartBegin && PartEnd <= ObjEnd) | |||
3030 | break; | |||
3031 | } | |||
3032 | if (MFI.isFixedObjectIndex(FI)) { | |||
3033 | SDValue Addr = | |||
3034 | DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT), | |||
3035 | DAG.getIntPtrConstant(Ins[i].PartOffset, dl)); | |||
3036 | return DAG.getLoad( | |||
3037 | ValVT, dl, Chain, Addr, | |||
3038 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI, | |||
3039 | Ins[i].PartOffset)); | |||
3040 | } | |||
3041 | } | |||
3042 | } | |||
3043 | ||||
3044 | int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8, | |||
3045 | VA.getLocMemOffset(), isImmutable); | |||
3046 | ||||
3047 | // Set SExt or ZExt flag. | |||
3048 | if (VA.getLocInfo() == CCValAssign::ZExt) { | |||
3049 | MFI.setObjectZExt(FI, true); | |||
3050 | } else if (VA.getLocInfo() == CCValAssign::SExt) { | |||
3051 | MFI.setObjectSExt(FI, true); | |||
3052 | } | |||
3053 | ||||
3054 | // Adjust SP offset of interrupt parameter. | |||
3055 | if (CallConv == CallingConv::X86_INTR) { | |||
3056 | MFI.setObjectOffset(FI, Offset); | |||
3057 | } | |||
3058 | ||||
3059 | SDValue FIN = DAG.getFrameIndex(FI, PtrVT); | |||
3060 | SDValue Val = DAG.getLoad( | |||
3061 | ValVT, dl, Chain, FIN, | |||
3062 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); | |||
3063 | return ExtendedInMem | |||
3064 | ? (VA.getValVT().isVector() | |||
3065 | ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val) | |||
3066 | : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val)) | |||
3067 | : Val; | |||
3068 | } | |||
3069 | ||||
3070 | // FIXME: Get this from tablegen. | |||
3071 | static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv, | |||
3072 | const X86Subtarget &Subtarget) { | |||
3073 | assert(Subtarget.is64Bit())((Subtarget.is64Bit()) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3073, __PRETTY_FUNCTION__)); | |||
3074 | ||||
3075 | if (Subtarget.isCallingConvWin64(CallConv)) { | |||
3076 | static const MCPhysReg GPR64ArgRegsWin64[] = { | |||
3077 | X86::RCX, X86::RDX, X86::R8, X86::R9 | |||
3078 | }; | |||
3079 | return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64)); | |||
3080 | } | |||
3081 | ||||
3082 | static const MCPhysReg GPR64ArgRegs64Bit[] = { | |||
3083 | X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9 | |||
3084 | }; | |||
3085 | return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit)); | |||
3086 | } | |||
3087 | ||||
3088 | // FIXME: Get this from tablegen. | |||
3089 | static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF, | |||
3090 | CallingConv::ID CallConv, | |||
3091 | const X86Subtarget &Subtarget) { | |||
3092 | assert(Subtarget.is64Bit())((Subtarget.is64Bit()) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3092, __PRETTY_FUNCTION__)); | |||
3093 | if (Subtarget.isCallingConvWin64(CallConv)) { | |||
3094 | // The XMM registers which might contain var arg parameters are shadowed | |||
3095 | // in their paired GPR. So we only need to save the GPR to their home | |||
3096 | // slots. | |||
3097 | // TODO: __vectorcall will change this. | |||
3098 | return None; | |||
3099 | } | |||
3100 | ||||
3101 | const Function &F = MF.getFunction(); | |||
3102 | bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat); | |||
3103 | bool isSoftFloat = Subtarget.useSoftFloat(); | |||
3104 | assert(!(isSoftFloat && NoImplicitFloatOps) &&((!(isSoftFloat && NoImplicitFloatOps) && "SSE register cannot be used when SSE is disabled!" ) ? static_cast<void> (0) : __assert_fail ("!(isSoftFloat && NoImplicitFloatOps) && \"SSE register cannot be used when SSE is disabled!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3105, __PRETTY_FUNCTION__)) | |||
3105 | "SSE register cannot be used when SSE is disabled!")((!(isSoftFloat && NoImplicitFloatOps) && "SSE register cannot be used when SSE is disabled!" ) ? static_cast<void> (0) : __assert_fail ("!(isSoftFloat && NoImplicitFloatOps) && \"SSE register cannot be used when SSE is disabled!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3105, __PRETTY_FUNCTION__)); | |||
3106 | if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1()) | |||
3107 | // Kernel mode asks for SSE to be disabled, so there are no XMM argument | |||
3108 | // registers. | |||
3109 | return None; | |||
3110 | ||||
3111 | static const MCPhysReg XMMArgRegs64Bit[] = { | |||
3112 | X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, | |||
3113 | X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 | |||
3114 | }; | |||
3115 | return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit)); | |||
3116 | } | |||
3117 | ||||
3118 | #ifndef NDEBUG | |||
3119 | static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) { | |||
3120 | return std::is_sorted(ArgLocs.begin(), ArgLocs.end(), | |||
3121 | [](const CCValAssign &A, const CCValAssign &B) -> bool { | |||
3122 | return A.getValNo() < B.getValNo(); | |||
3123 | }); | |||
3124 | } | |||
3125 | #endif | |||
3126 | ||||
3127 | SDValue X86TargetLowering::LowerFormalArguments( | |||
3128 | SDValue Chain, CallingConv::ID CallConv, bool isVarArg, | |||
3129 | const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl, | |||
3130 | SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { | |||
3131 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3132 | X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); | |||
3133 | const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); | |||
3134 | ||||
3135 | const Function &F = MF.getFunction(); | |||
3136 | if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() && | |||
3137 | F.getName() == "main") | |||
3138 | FuncInfo->setForceFramePointer(true); | |||
3139 | ||||
3140 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
3141 | bool Is64Bit = Subtarget.is64Bit(); | |||
3142 | bool IsWin64 = Subtarget.isCallingConvWin64(CallConv); | |||
3143 | ||||
3144 | assert(((!(isVarArg && canGuaranteeTCO(CallConv)) && "Var args not supported with calling conv' regcall, fastcc, ghc or hipe" ) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling conv' regcall, fastcc, ghc or hipe\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3146, __PRETTY_FUNCTION__)) | |||
3145 | !(isVarArg && canGuaranteeTCO(CallConv)) &&((!(isVarArg && canGuaranteeTCO(CallConv)) && "Var args not supported with calling conv' regcall, fastcc, ghc or hipe" ) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling conv' regcall, fastcc, ghc or hipe\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3146, __PRETTY_FUNCTION__)) | |||
3146 | "Var args not supported with calling conv' regcall, fastcc, ghc or hipe")((!(isVarArg && canGuaranteeTCO(CallConv)) && "Var args not supported with calling conv' regcall, fastcc, ghc or hipe" ) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling conv' regcall, fastcc, ghc or hipe\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3146, __PRETTY_FUNCTION__)); | |||
3147 | ||||
3148 | if (CallConv == CallingConv::X86_INTR) { | |||
3149 | bool isLegal = Ins.size() == 1 || | |||
3150 | (Ins.size() == 2 && ((Is64Bit && Ins[1].VT == MVT::i64) || | |||
3151 | (!Is64Bit && Ins[1].VT == MVT::i32))); | |||
3152 | if (!isLegal) | |||
3153 | report_fatal_error("X86 interrupts may take one or two arguments"); | |||
3154 | } | |||
3155 | ||||
3156 | // Assign locations to all of the incoming arguments. | |||
3157 | SmallVector<CCValAssign, 16> ArgLocs; | |||
3158 | CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); | |||
3159 | ||||
3160 | // Allocate shadow area for Win64. | |||
3161 | if (IsWin64) | |||
3162 | CCInfo.AllocateStack(32, 8); | |||
3163 | ||||
3164 | CCInfo.AnalyzeArguments(Ins, CC_X86); | |||
3165 | ||||
3166 | // In vectorcall calling convention a second pass is required for the HVA | |||
3167 | // types. | |||
3168 | if (CallingConv::X86_VectorCall == CallConv) { | |||
3169 | CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86); | |||
3170 | } | |||
3171 | ||||
3172 | // The next loop assumes that the locations are in the same order of the | |||
3173 | // input arguments. | |||
3174 | assert(isSortedByValueNo(ArgLocs) &&((isSortedByValueNo(ArgLocs) && "Argument Location list must be sorted before lowering" ) ? static_cast<void> (0) : __assert_fail ("isSortedByValueNo(ArgLocs) && \"Argument Location list must be sorted before lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3175, __PRETTY_FUNCTION__)) | |||
3175 | "Argument Location list must be sorted before lowering")((isSortedByValueNo(ArgLocs) && "Argument Location list must be sorted before lowering" ) ? static_cast<void> (0) : __assert_fail ("isSortedByValueNo(ArgLocs) && \"Argument Location list must be sorted before lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3175, __PRETTY_FUNCTION__)); | |||
3176 | ||||
3177 | SDValue ArgValue; | |||
3178 | for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E; | |||
3179 | ++I, ++InsIndex) { | |||
3180 | assert(InsIndex < Ins.size() && "Invalid Ins index")((InsIndex < Ins.size() && "Invalid Ins index") ? static_cast <void> (0) : __assert_fail ("InsIndex < Ins.size() && \"Invalid Ins index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3180, __PRETTY_FUNCTION__)); | |||
3181 | CCValAssign &VA = ArgLocs[I]; | |||
3182 | ||||
3183 | if (VA.isRegLoc()) { | |||
3184 | EVT RegVT = VA.getLocVT(); | |||
3185 | if (VA.needsCustom()) { | |||
3186 | assert(((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3188, __PRETTY_FUNCTION__)) | |||
3187 | VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3188, __PRETTY_FUNCTION__)) | |||
3188 | "Currently the only custom case is when we split v64i1 to 2 regs")((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3188, __PRETTY_FUNCTION__)); | |||
3189 | ||||
3190 | // v64i1 values, in regcall calling convention, that are | |||
3191 | // compiled to 32 bit arch, are split up into two registers. | |||
3192 | ArgValue = | |||
3193 | getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget); | |||
3194 | } else { | |||
3195 | const TargetRegisterClass *RC; | |||
3196 | if (RegVT == MVT::i8) | |||
3197 | RC = &X86::GR8RegClass; | |||
3198 | else if (RegVT == MVT::i16) | |||
3199 | RC = &X86::GR16RegClass; | |||
3200 | else if (RegVT == MVT::i32) | |||
3201 | RC = &X86::GR32RegClass; | |||
3202 | else if (Is64Bit && RegVT == MVT::i64) | |||
3203 | RC = &X86::GR64RegClass; | |||
3204 | else if (RegVT == MVT::f32) | |||
3205 | RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass; | |||
3206 | else if (RegVT == MVT::f64) | |||
3207 | RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass; | |||
3208 | else if (RegVT == MVT::f80) | |||
3209 | RC = &X86::RFP80RegClass; | |||
3210 | else if (RegVT == MVT::f128) | |||
3211 | RC = &X86::VR128RegClass; | |||
3212 | else if (RegVT.is512BitVector()) | |||
3213 | RC = &X86::VR512RegClass; | |||
3214 | else if (RegVT.is256BitVector()) | |||
3215 | RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass; | |||
3216 | else if (RegVT.is128BitVector()) | |||
3217 | RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass; | |||
3218 | else if (RegVT == MVT::x86mmx) | |||
3219 | RC = &X86::VR64RegClass; | |||
3220 | else if (RegVT == MVT::v1i1) | |||
3221 | RC = &X86::VK1RegClass; | |||
3222 | else if (RegVT == MVT::v8i1) | |||
3223 | RC = &X86::VK8RegClass; | |||
3224 | else if (RegVT == MVT::v16i1) | |||
3225 | RC = &X86::VK16RegClass; | |||
3226 | else if (RegVT == MVT::v32i1) | |||
3227 | RC = &X86::VK32RegClass; | |||
3228 | else if (RegVT == MVT::v64i1) | |||
3229 | RC = &X86::VK64RegClass; | |||
3230 | else | |||
3231 | llvm_unreachable("Unknown argument type!")::llvm::llvm_unreachable_internal("Unknown argument type!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3231); | |||
3232 | ||||
3233 | unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); | |||
3234 | ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); | |||
3235 | } | |||
3236 | ||||
3237 | // If this is an 8 or 16-bit value, it is really passed promoted to 32 | |||
3238 | // bits. Insert an assert[sz]ext to capture this, then truncate to the | |||
3239 | // right size. | |||
3240 | if (VA.getLocInfo() == CCValAssign::SExt) | |||
3241 | ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, | |||
3242 | DAG.getValueType(VA.getValVT())); | |||
3243 | else if (VA.getLocInfo() == CCValAssign::ZExt) | |||
3244 | ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, | |||
3245 | DAG.getValueType(VA.getValVT())); | |||
3246 | else if (VA.getLocInfo() == CCValAssign::BCvt) | |||
3247 | ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue); | |||
3248 | ||||
3249 | if (VA.isExtInLoc()) { | |||
3250 | // Handle MMX values passed in XMM regs. | |||
3251 | if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1) | |||
3252 | ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue); | |||
3253 | else if (VA.getValVT().isVector() && | |||
3254 | VA.getValVT().getScalarType() == MVT::i1 && | |||
3255 | ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) || | |||
3256 | (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) { | |||
3257 | // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8 | |||
3258 | ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG); | |||
3259 | } else | |||
3260 | ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); | |||
3261 | } | |||
3262 | } else { | |||
3263 | assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3263, __PRETTY_FUNCTION__)); | |||
3264 | ArgValue = | |||
3265 | LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex); | |||
3266 | } | |||
3267 | ||||
3268 | // If value is passed via pointer - do a load. | |||
3269 | if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal()) | |||
3270 | ArgValue = | |||
3271 | DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo()); | |||
3272 | ||||
3273 | InVals.push_back(ArgValue); | |||
3274 | } | |||
3275 | ||||
3276 | for (unsigned I = 0, E = Ins.size(); I != E; ++I) { | |||
3277 | // Swift calling convention does not require we copy the sret argument | |||
3278 | // into %rax/%eax for the return. We don't set SRetReturnReg for Swift. | |||
3279 | if (CallConv == CallingConv::Swift) | |||
3280 | continue; | |||
3281 | ||||
3282 | // All x86 ABIs require that for returning structs by value we copy the | |||
3283 | // sret argument into %rax/%eax (depending on ABI) for the return. Save | |||
3284 | // the argument into a virtual register so that we can access it from the | |||
3285 | // return points. | |||
3286 | if (Ins[I].Flags.isSRet()) { | |||
3287 | unsigned Reg = FuncInfo->getSRetReturnReg(); | |||
3288 | if (!Reg) { | |||
3289 | MVT PtrTy = getPointerTy(DAG.getDataLayout()); | |||
3290 | Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy)); | |||
3291 | FuncInfo->setSRetReturnReg(Reg); | |||
3292 | } | |||
3293 | SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]); | |||
3294 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); | |||
3295 | break; | |||
3296 | } | |||
3297 | } | |||
3298 | ||||
3299 | unsigned StackSize = CCInfo.getNextStackOffset(); | |||
3300 | // Align stack specially for tail calls. | |||
3301 | if (shouldGuaranteeTCO(CallConv, | |||
3302 | MF.getTarget().Options.GuaranteedTailCallOpt)) | |||
3303 | StackSize = GetAlignedArgumentStackSize(StackSize, DAG); | |||
3304 | ||||
3305 | // If the function takes variable number of arguments, make a frame index for | |||
3306 | // the start of the first vararg value... for expansion of llvm.va_start. We | |||
3307 | // can skip this if there are no va_start calls. | |||
3308 | if (MFI.hasVAStart() && | |||
3309 | (Is64Bit || (CallConv != CallingConv::X86_FastCall && | |||
3310 | CallConv != CallingConv::X86_ThisCall))) { | |||
3311 | FuncInfo->setVarArgsFrameIndex(MFI.CreateFixedObject(1, StackSize, true)); | |||
3312 | } | |||
3313 | ||||
3314 | // Figure out if XMM registers are in use. | |||
3315 | assert(!(Subtarget.useSoftFloat() &&((!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute ::NoImplicitFloat)) && "SSE register cannot be used when SSE is disabled!" ) ? static_cast<void> (0) : __assert_fail ("!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute::NoImplicitFloat)) && \"SSE register cannot be used when SSE is disabled!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3317, __PRETTY_FUNCTION__)) | |||
3316 | F.hasFnAttribute(Attribute::NoImplicitFloat)) &&((!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute ::NoImplicitFloat)) && "SSE register cannot be used when SSE is disabled!" ) ? static_cast<void> (0) : __assert_fail ("!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute::NoImplicitFloat)) && \"SSE register cannot be used when SSE is disabled!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3317, __PRETTY_FUNCTION__)) | |||
3317 | "SSE register cannot be used when SSE is disabled!")((!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute ::NoImplicitFloat)) && "SSE register cannot be used when SSE is disabled!" ) ? static_cast<void> (0) : __assert_fail ("!(Subtarget.useSoftFloat() && F.hasFnAttribute(Attribute::NoImplicitFloat)) && \"SSE register cannot be used when SSE is disabled!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3317, __PRETTY_FUNCTION__)); | |||
3318 | ||||
3319 | // 64-bit calling conventions support varargs and register parameters, so we | |||
3320 | // have to do extra work to spill them in the prologue. | |||
3321 | if (Is64Bit && isVarArg && MFI.hasVAStart()) { | |||
3322 | // Find the first unallocated argument registers. | |||
3323 | ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget); | |||
3324 | ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget); | |||
3325 | unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs); | |||
3326 | unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs); | |||
3327 | assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&((!(NumXMMRegs && !Subtarget.hasSSE1()) && "SSE register cannot be used when SSE is disabled!" ) ? static_cast<void> (0) : __assert_fail ("!(NumXMMRegs && !Subtarget.hasSSE1()) && \"SSE register cannot be used when SSE is disabled!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3328, __PRETTY_FUNCTION__)) | |||
3328 | "SSE register cannot be used when SSE is disabled!")((!(NumXMMRegs && !Subtarget.hasSSE1()) && "SSE register cannot be used when SSE is disabled!" ) ? static_cast<void> (0) : __assert_fail ("!(NumXMMRegs && !Subtarget.hasSSE1()) && \"SSE register cannot be used when SSE is disabled!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3328, __PRETTY_FUNCTION__)); | |||
3329 | ||||
3330 | // Gather all the live in physical registers. | |||
3331 | SmallVector<SDValue, 6> LiveGPRs; | |||
3332 | SmallVector<SDValue, 8> LiveXMMRegs; | |||
3333 | SDValue ALVal; | |||
3334 | for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) { | |||
3335 | unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass); | |||
3336 | LiveGPRs.push_back( | |||
3337 | DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64)); | |||
3338 | } | |||
3339 | if (!ArgXMMs.empty()) { | |||
3340 | unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass); | |||
3341 | ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8); | |||
3342 | for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) { | |||
3343 | unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass); | |||
3344 | LiveXMMRegs.push_back( | |||
3345 | DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32)); | |||
3346 | } | |||
3347 | } | |||
3348 | ||||
3349 | if (IsWin64) { | |||
3350 | // Get to the caller-allocated home save location. Add 8 to account | |||
3351 | // for the return address. | |||
3352 | int HomeOffset = TFI.getOffsetOfLocalArea() + 8; | |||
3353 | FuncInfo->setRegSaveFrameIndex( | |||
3354 | MFI.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false)); | |||
3355 | // Fixup to set vararg frame on shadow area (4 x i64). | |||
3356 | if (NumIntRegs < 4) | |||
3357 | FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex()); | |||
3358 | } else { | |||
3359 | // For X86-64, if there are vararg parameters that are passed via | |||
3360 | // registers, then we must store them to their spots on the stack so | |||
3361 | // they may be loaded by dereferencing the result of va_next. | |||
3362 | FuncInfo->setVarArgsGPOffset(NumIntRegs * 8); | |||
3363 | FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16); | |||
3364 | FuncInfo->setRegSaveFrameIndex(MFI.CreateStackObject( | |||
3365 | ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false)); | |||
3366 | } | |||
3367 | ||||
3368 | // Store the integer parameter registers. | |||
3369 | SmallVector<SDValue, 8> MemOps; | |||
3370 | SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), | |||
3371 | getPointerTy(DAG.getDataLayout())); | |||
3372 | unsigned Offset = FuncInfo->getVarArgsGPOffset(); | |||
3373 | for (SDValue Val : LiveGPRs) { | |||
3374 | SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), | |||
3375 | RSFIN, DAG.getIntPtrConstant(Offset, dl)); | |||
3376 | SDValue Store = | |||
3377 | DAG.getStore(Val.getValue(1), dl, Val, FIN, | |||
3378 | MachinePointerInfo::getFixedStack( | |||
3379 | DAG.getMachineFunction(), | |||
3380 | FuncInfo->getRegSaveFrameIndex(), Offset)); | |||
3381 | MemOps.push_back(Store); | |||
3382 | Offset += 8; | |||
3383 | } | |||
3384 | ||||
3385 | if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) { | |||
3386 | // Now store the XMM (fp + vector) parameter registers. | |||
3387 | SmallVector<SDValue, 12> SaveXMMOps; | |||
3388 | SaveXMMOps.push_back(Chain); | |||
3389 | SaveXMMOps.push_back(ALVal); | |||
3390 | SaveXMMOps.push_back(DAG.getIntPtrConstant( | |||
3391 | FuncInfo->getRegSaveFrameIndex(), dl)); | |||
3392 | SaveXMMOps.push_back(DAG.getIntPtrConstant( | |||
3393 | FuncInfo->getVarArgsFPOffset(), dl)); | |||
3394 | SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(), | |||
3395 | LiveXMMRegs.end()); | |||
3396 | MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl, | |||
3397 | MVT::Other, SaveXMMOps)); | |||
3398 | } | |||
3399 | ||||
3400 | if (!MemOps.empty()) | |||
3401 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps); | |||
3402 | } | |||
3403 | ||||
3404 | if (isVarArg && MFI.hasMustTailInVarArgFunc()) { | |||
3405 | // Find the largest legal vector type. | |||
3406 | MVT VecVT = MVT::Other; | |||
3407 | // FIXME: Only some x86_32 calling conventions support AVX512. | |||
3408 | if (Subtarget.hasAVX512() && | |||
3409 | (Is64Bit || (CallConv == CallingConv::X86_VectorCall || | |||
3410 | CallConv == CallingConv::Intel_OCL_BI))) | |||
3411 | VecVT = MVT::v16f32; | |||
3412 | else if (Subtarget.hasAVX()) | |||
3413 | VecVT = MVT::v8f32; | |||
3414 | else if (Subtarget.hasSSE2()) | |||
3415 | VecVT = MVT::v4f32; | |||
3416 | ||||
3417 | // We forward some GPRs and some vector types. | |||
3418 | SmallVector<MVT, 2> RegParmTypes; | |||
3419 | MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32; | |||
3420 | RegParmTypes.push_back(IntVT); | |||
3421 | if (VecVT != MVT::Other) | |||
3422 | RegParmTypes.push_back(VecVT); | |||
3423 | ||||
3424 | // Compute the set of forwarded registers. The rest are scratch. | |||
3425 | SmallVectorImpl<ForwardedRegister> &Forwards = | |||
3426 | FuncInfo->getForwardedMustTailRegParms(); | |||
3427 | CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86); | |||
3428 | ||||
3429 | // Conservatively forward AL on x86_64, since it might be used for varargs. | |||
3430 | if (Is64Bit && !CCInfo.isAllocated(X86::AL)) { | |||
3431 | unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass); | |||
3432 | Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8)); | |||
3433 | } | |||
3434 | ||||
3435 | // Copy all forwards from physical to virtual registers. | |||
3436 | for (ForwardedRegister &F : Forwards) { | |||
3437 | // FIXME: Can we use a less constrained schedule? | |||
3438 | SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT); | |||
3439 | F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT)); | |||
3440 | Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal); | |||
3441 | } | |||
3442 | } | |||
3443 | ||||
3444 | // Some CCs need callee pop. | |||
3445 | if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, | |||
3446 | MF.getTarget().Options.GuaranteedTailCallOpt)) { | |||
3447 | FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. | |||
3448 | } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) { | |||
3449 | // X86 interrupts must pop the error code (and the alignment padding) if | |||
3450 | // present. | |||
3451 | FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4); | |||
3452 | } else { | |||
3453 | FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. | |||
3454 | // If this is an sret function, the return should pop the hidden pointer. | |||
3455 | if (!Is64Bit && !canGuaranteeTCO(CallConv) && | |||
3456 | !Subtarget.getTargetTriple().isOSMSVCRT() && | |||
3457 | argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn) | |||
3458 | FuncInfo->setBytesToPopOnReturn(4); | |||
3459 | } | |||
3460 | ||||
3461 | if (!Is64Bit) { | |||
3462 | // RegSaveFrameIndex is X86-64 only. | |||
3463 | FuncInfo->setRegSaveFrameIndex(0xAAAAAAA); | |||
3464 | if (CallConv == CallingConv::X86_FastCall || | |||
3465 | CallConv == CallingConv::X86_ThisCall) | |||
3466 | // fastcc functions can't have varargs. | |||
3467 | FuncInfo->setVarArgsFrameIndex(0xAAAAAAA); | |||
3468 | } | |||
3469 | ||||
3470 | FuncInfo->setArgumentStackSize(StackSize); | |||
3471 | ||||
3472 | if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) { | |||
3473 | EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn()); | |||
3474 | if (Personality == EHPersonality::CoreCLR) { | |||
3475 | assert(Is64Bit)((Is64Bit) ? static_cast<void> (0) : __assert_fail ("Is64Bit" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3475, __PRETTY_FUNCTION__)); | |||
3476 | // TODO: Add a mechanism to frame lowering that will allow us to indicate | |||
3477 | // that we'd prefer this slot be allocated towards the bottom of the frame | |||
3478 | // (i.e. near the stack pointer after allocating the frame). Every | |||
3479 | // funclet needs a copy of this slot in its (mostly empty) frame, and the | |||
3480 | // offset from the bottom of this and each funclet's frame must be the | |||
3481 | // same, so the size of funclets' (mostly empty) frames is dictated by | |||
3482 | // how far this slot is from the bottom (since they allocate just enough | |||
3483 | // space to accommodate holding this slot at the correct offset). | |||
3484 | int PSPSymFI = MFI.CreateStackObject(8, 8, /*isSS=*/false); | |||
3485 | EHInfo->PSPSymFrameIdx = PSPSymFI; | |||
3486 | } | |||
3487 | } | |||
3488 | ||||
3489 | if (CallConv == CallingConv::X86_RegCall || | |||
3490 | F.hasFnAttribute("no_caller_saved_registers")) { | |||
3491 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
3492 | for (std::pair<unsigned, unsigned> Pair : MRI.liveins()) | |||
3493 | MRI.disableCalleeSavedRegister(Pair.first); | |||
3494 | } | |||
3495 | ||||
3496 | return Chain; | |||
3497 | } | |||
3498 | ||||
3499 | SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, | |||
3500 | SDValue Arg, const SDLoc &dl, | |||
3501 | SelectionDAG &DAG, | |||
3502 | const CCValAssign &VA, | |||
3503 | ISD::ArgFlagsTy Flags) const { | |||
3504 | unsigned LocMemOffset = VA.getLocMemOffset(); | |||
3505 | SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl); | |||
3506 | PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), | |||
3507 | StackPtr, PtrOff); | |||
3508 | if (Flags.isByVal()) | |||
3509 | return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl); | |||
3510 | ||||
3511 | return DAG.getStore( | |||
3512 | Chain, dl, Arg, PtrOff, | |||
3513 | MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset)); | |||
3514 | } | |||
3515 | ||||
3516 | /// Emit a load of return address if tail call | |||
3517 | /// optimization is performed and it is required. | |||
3518 | SDValue X86TargetLowering::EmitTailCallLoadRetAddr( | |||
3519 | SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall, | |||
3520 | bool Is64Bit, int FPDiff, const SDLoc &dl) const { | |||
3521 | // Adjust the Return address stack slot. | |||
3522 | EVT VT = getPointerTy(DAG.getDataLayout()); | |||
3523 | OutRetAddr = getReturnAddressFrameIndex(DAG); | |||
3524 | ||||
3525 | // Load the "old" Return address. | |||
3526 | OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo()); | |||
3527 | return SDValue(OutRetAddr.getNode(), 1); | |||
3528 | } | |||
3529 | ||||
3530 | /// Emit a store of the return address if tail call | |||
3531 | /// optimization is performed and it is required (FPDiff!=0). | |||
3532 | static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF, | |||
3533 | SDValue Chain, SDValue RetAddrFrIdx, | |||
3534 | EVT PtrVT, unsigned SlotSize, | |||
3535 | int FPDiff, const SDLoc &dl) { | |||
3536 | // Store the return address to the appropriate stack slot. | |||
3537 | if (!FPDiff) return Chain; | |||
3538 | // Calculate the new stack slot for the return address. | |||
3539 | int NewReturnAddrFI = | |||
3540 | MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize, | |||
3541 | false); | |||
3542 | SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT); | |||
3543 | Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx, | |||
3544 | MachinePointerInfo::getFixedStack( | |||
3545 | DAG.getMachineFunction(), NewReturnAddrFI)); | |||
3546 | return Chain; | |||
3547 | } | |||
3548 | ||||
3549 | /// Returns a vector_shuffle mask for an movs{s|d}, movd | |||
3550 | /// operation of specified width. | |||
3551 | static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1, | |||
3552 | SDValue V2) { | |||
3553 | unsigned NumElems = VT.getVectorNumElements(); | |||
3554 | SmallVector<int, 8> Mask; | |||
3555 | Mask.push_back(NumElems); | |||
3556 | for (unsigned i = 1; i != NumElems; ++i) | |||
3557 | Mask.push_back(i); | |||
3558 | return DAG.getVectorShuffle(VT, dl, V1, V2, Mask); | |||
3559 | } | |||
3560 | ||||
3561 | SDValue | |||
3562 | X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI, | |||
3563 | SmallVectorImpl<SDValue> &InVals) const { | |||
3564 | SelectionDAG &DAG = CLI.DAG; | |||
3565 | SDLoc &dl = CLI.DL; | |||
3566 | SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; | |||
3567 | SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; | |||
3568 | SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; | |||
3569 | SDValue Chain = CLI.Chain; | |||
3570 | SDValue Callee = CLI.Callee; | |||
3571 | CallingConv::ID CallConv = CLI.CallConv; | |||
3572 | bool &isTailCall = CLI.IsTailCall; | |||
3573 | bool isVarArg = CLI.IsVarArg; | |||
3574 | ||||
3575 | MachineFunction &MF = DAG.getMachineFunction(); | |||
3576 | bool Is64Bit = Subtarget.is64Bit(); | |||
3577 | bool IsWin64 = Subtarget.isCallingConvWin64(CallConv); | |||
3578 | StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU()); | |||
3579 | bool IsSibcall = false; | |||
3580 | X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>(); | |||
3581 | auto Attr = MF.getFunction().getFnAttribute("disable-tail-calls"); | |||
3582 | const auto *CI = dyn_cast_or_null<CallInst>(CLI.CS.getInstruction()); | |||
3583 | const Function *Fn = CI ? CI->getCalledFunction() : nullptr; | |||
3584 | bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) || | |||
3585 | (Fn && Fn->hasFnAttribute("no_caller_saved_registers")); | |||
3586 | const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CS.getInstruction()); | |||
3587 | bool HasNoCfCheck = | |||
3588 | (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck()); | |||
3589 | const Module *M = MF.getMMI().getModule(); | |||
3590 | Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch"); | |||
3591 | ||||
3592 | if (CallConv == CallingConv::X86_INTR) | |||
3593 | report_fatal_error("X86 interrupts may not be called directly"); | |||
3594 | ||||
3595 | if (Attr.getValueAsString() == "true") | |||
3596 | isTailCall = false; | |||
3597 | ||||
3598 | if (Subtarget.isPICStyleGOT() && | |||
3599 | !MF.getTarget().Options.GuaranteedTailCallOpt) { | |||
3600 | // If we are using a GOT, disable tail calls to external symbols with | |||
3601 | // default visibility. Tail calling such a symbol requires using a GOT | |||
3602 | // relocation, which forces early binding of the symbol. This breaks code | |||
3603 | // that require lazy function symbol resolution. Using musttail or | |||
3604 | // GuaranteedTailCallOpt will override this. | |||
3605 | GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); | |||
3606 | if (!G || (!G->getGlobal()->hasLocalLinkage() && | |||
3607 | G->getGlobal()->hasDefaultVisibility())) | |||
3608 | isTailCall = false; | |||
3609 | } | |||
3610 | ||||
3611 | bool IsMustTail = CLI.CS && CLI.CS.isMustTailCall(); | |||
3612 | if (IsMustTail) { | |||
3613 | // Force this to be a tail call. The verifier rules are enough to ensure | |||
3614 | // that we can lower this successfully without moving the return address | |||
3615 | // around. | |||
3616 | isTailCall = true; | |||
3617 | } else if (isTailCall) { | |||
3618 | // Check if it's really possible to do a tail call. | |||
3619 | isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, | |||
3620 | isVarArg, SR != NotStructReturn, | |||
3621 | MF.getFunction().hasStructRetAttr(), CLI.RetTy, | |||
3622 | Outs, OutVals, Ins, DAG); | |||
3623 | ||||
3624 | // Sibcalls are automatically detected tailcalls which do not require | |||
3625 | // ABI changes. | |||
3626 | if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall) | |||
3627 | IsSibcall = true; | |||
3628 | ||||
3629 | if (isTailCall) | |||
3630 | ++NumTailCalls; | |||
3631 | } | |||
3632 | ||||
3633 | assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&((!(isVarArg && canGuaranteeTCO(CallConv)) && "Var args not supported with calling convention fastcc, ghc or hipe" ) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling convention fastcc, ghc or hipe\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3634, __PRETTY_FUNCTION__)) | |||
3634 | "Var args not supported with calling convention fastcc, ghc or hipe")((!(isVarArg && canGuaranteeTCO(CallConv)) && "Var args not supported with calling convention fastcc, ghc or hipe" ) ? static_cast<void> (0) : __assert_fail ("!(isVarArg && canGuaranteeTCO(CallConv)) && \"Var args not supported with calling convention fastcc, ghc or hipe\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3634, __PRETTY_FUNCTION__)); | |||
3635 | ||||
3636 | // Analyze operands of the call, assigning locations to each operand. | |||
3637 | SmallVector<CCValAssign, 16> ArgLocs; | |||
3638 | CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext()); | |||
3639 | ||||
3640 | // Allocate shadow area for Win64. | |||
3641 | if (IsWin64) | |||
3642 | CCInfo.AllocateStack(32, 8); | |||
3643 | ||||
3644 | CCInfo.AnalyzeArguments(Outs, CC_X86); | |||
3645 | ||||
3646 | // In vectorcall calling convention a second pass is required for the HVA | |||
3647 | // types. | |||
3648 | if (CallingConv::X86_VectorCall == CallConv) { | |||
3649 | CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86); | |||
3650 | } | |||
3651 | ||||
3652 | // Get a count of how many bytes are to be pushed on the stack. | |||
3653 | unsigned NumBytes = CCInfo.getAlignedCallFrameSize(); | |||
3654 | if (IsSibcall) | |||
3655 | // This is a sibcall. The memory operands are available in caller's | |||
3656 | // own caller's stack. | |||
3657 | NumBytes = 0; | |||
3658 | else if (MF.getTarget().Options.GuaranteedTailCallOpt && | |||
3659 | canGuaranteeTCO(CallConv)) | |||
3660 | NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG); | |||
3661 | ||||
3662 | int FPDiff = 0; | |||
3663 | if (isTailCall && !IsSibcall && !IsMustTail) { | |||
3664 | // Lower arguments at fp - stackoffset + fpdiff. | |||
3665 | unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn(); | |||
3666 | ||||
3667 | FPDiff = NumBytesCallerPushed - NumBytes; | |||
3668 | ||||
3669 | // Set the delta of movement of the returnaddr stackslot. | |||
3670 | // But only set if delta is greater than previous delta. | |||
3671 | if (FPDiff < X86Info->getTCReturnAddrDelta()) | |||
3672 | X86Info->setTCReturnAddrDelta(FPDiff); | |||
3673 | } | |||
3674 | ||||
3675 | unsigned NumBytesToPush = NumBytes; | |||
3676 | unsigned NumBytesToPop = NumBytes; | |||
3677 | ||||
3678 | // If we have an inalloca argument, all stack space has already been allocated | |||
3679 | // for us and be right at the top of the stack. We don't support multiple | |||
3680 | // arguments passed in memory when using inalloca. | |||
3681 | if (!Outs.empty() && Outs.back().Flags.isInAlloca()) { | |||
3682 | NumBytesToPush = 0; | |||
3683 | if (!ArgLocs.back().isMemLoc()) | |||
3684 | report_fatal_error("cannot use inalloca attribute on a register " | |||
3685 | "parameter"); | |||
3686 | if (ArgLocs.back().getLocMemOffset() != 0) | |||
3687 | report_fatal_error("any parameter with the inalloca attribute must be " | |||
3688 | "the only memory argument"); | |||
3689 | } | |||
3690 | ||||
3691 | if (!IsSibcall) | |||
3692 | Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush, | |||
3693 | NumBytes - NumBytesToPush, dl); | |||
3694 | ||||
3695 | SDValue RetAddrFrIdx; | |||
3696 | // Load return address for tail calls. | |||
3697 | if (isTailCall && FPDiff) | |||
3698 | Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, | |||
3699 | Is64Bit, FPDiff, dl); | |||
3700 | ||||
3701 | SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; | |||
3702 | SmallVector<SDValue, 8> MemOpChains; | |||
3703 | SDValue StackPtr; | |||
3704 | ||||
3705 | // The next loop assumes that the locations are in the same order of the | |||
3706 | // input arguments. | |||
3707 | assert(isSortedByValueNo(ArgLocs) &&((isSortedByValueNo(ArgLocs) && "Argument Location list must be sorted before lowering" ) ? static_cast<void> (0) : __assert_fail ("isSortedByValueNo(ArgLocs) && \"Argument Location list must be sorted before lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3708, __PRETTY_FUNCTION__)) | |||
3708 | "Argument Location list must be sorted before lowering")((isSortedByValueNo(ArgLocs) && "Argument Location list must be sorted before lowering" ) ? static_cast<void> (0) : __assert_fail ("isSortedByValueNo(ArgLocs) && \"Argument Location list must be sorted before lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3708, __PRETTY_FUNCTION__)); | |||
3709 | ||||
3710 | // Walk the register/memloc assignments, inserting copies/loads. In the case | |||
3711 | // of tail call optimization arguments are handle later. | |||
3712 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
3713 | for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E; | |||
3714 | ++I, ++OutIndex) { | |||
3715 | assert(OutIndex < Outs.size() && "Invalid Out index")((OutIndex < Outs.size() && "Invalid Out index") ? static_cast<void> (0) : __assert_fail ("OutIndex < Outs.size() && \"Invalid Out index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3715, __PRETTY_FUNCTION__)); | |||
3716 | // Skip inalloca arguments, they have already been written. | |||
3717 | ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags; | |||
3718 | if (Flags.isInAlloca()) | |||
3719 | continue; | |||
3720 | ||||
3721 | CCValAssign &VA = ArgLocs[I]; | |||
3722 | EVT RegVT = VA.getLocVT(); | |||
3723 | SDValue Arg = OutVals[OutIndex]; | |||
3724 | bool isByVal = Flags.isByVal(); | |||
3725 | ||||
3726 | // Promote the value if needed. | |||
3727 | switch (VA.getLocInfo()) { | |||
3728 | default: llvm_unreachable("Unknown loc info!")::llvm::llvm_unreachable_internal("Unknown loc info!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3728); | |||
3729 | case CCValAssign::Full: break; | |||
3730 | case CCValAssign::SExt: | |||
3731 | Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg); | |||
3732 | break; | |||
3733 | case CCValAssign::ZExt: | |||
3734 | Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg); | |||
3735 | break; | |||
3736 | case CCValAssign::AExt: | |||
3737 | if (Arg.getValueType().isVector() && | |||
3738 | Arg.getValueType().getVectorElementType() == MVT::i1) | |||
3739 | Arg = lowerMasksToReg(Arg, RegVT, dl, DAG); | |||
3740 | else if (RegVT.is128BitVector()) { | |||
3741 | // Special case: passing MMX values in XMM registers. | |||
3742 | Arg = DAG.getBitcast(MVT::i64, Arg); | |||
3743 | Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); | |||
3744 | Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); | |||
3745 | } else | |||
3746 | Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); | |||
3747 | break; | |||
3748 | case CCValAssign::BCvt: | |||
3749 | Arg = DAG.getBitcast(RegVT, Arg); | |||
3750 | break; | |||
3751 | case CCValAssign::Indirect: { | |||
3752 | if (isByVal) { | |||
3753 | // Memcpy the argument to a temporary stack slot to prevent | |||
3754 | // the caller from seeing any modifications the callee may make | |||
3755 | // as guaranteed by the `byval` attribute. | |||
3756 | int FrameIdx = MF.getFrameInfo().CreateStackObject( | |||
3757 | Flags.getByValSize(), std::max(16, (int)Flags.getByValAlign()), | |||
3758 | false); | |||
3759 | SDValue StackSlot = | |||
3760 | DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout())); | |||
3761 | Chain = | |||
3762 | CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl); | |||
3763 | // From now on treat this as a regular pointer | |||
3764 | Arg = StackSlot; | |||
3765 | isByVal = false; | |||
3766 | } else { | |||
3767 | // Store the argument. | |||
3768 | SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT()); | |||
3769 | int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); | |||
3770 | Chain = DAG.getStore( | |||
3771 | Chain, dl, Arg, SpillSlot, | |||
3772 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)); | |||
3773 | Arg = SpillSlot; | |||
3774 | } | |||
3775 | break; | |||
3776 | } | |||
3777 | } | |||
3778 | ||||
3779 | if (VA.needsCustom()) { | |||
3780 | assert(VA.getValVT() == MVT::v64i1 &&((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3781, __PRETTY_FUNCTION__)) | |||
3781 | "Currently the only custom case is when we split v64i1 to 2 regs")((VA.getValVT() == MVT::v64i1 && "Currently the only custom case is when we split v64i1 to 2 regs" ) ? static_cast<void> (0) : __assert_fail ("VA.getValVT() == MVT::v64i1 && \"Currently the only custom case is when we split v64i1 to 2 regs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3781, __PRETTY_FUNCTION__)); | |||
3782 | // Split v64i1 value into two registers | |||
3783 | Passv64i1ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++I], | |||
3784 | Subtarget); | |||
3785 | } else if (VA.isRegLoc()) { | |||
3786 | RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); | |||
3787 | if (isVarArg && IsWin64) { | |||
3788 | // Win64 ABI requires argument XMM reg to be copied to the corresponding | |||
3789 | // shadow reg if callee is a varargs function. | |||
3790 | unsigned ShadowReg = 0; | |||
3791 | switch (VA.getLocReg()) { | |||
3792 | case X86::XMM0: ShadowReg = X86::RCX; break; | |||
3793 | case X86::XMM1: ShadowReg = X86::RDX; break; | |||
3794 | case X86::XMM2: ShadowReg = X86::R8; break; | |||
3795 | case X86::XMM3: ShadowReg = X86::R9; break; | |||
3796 | } | |||
3797 | if (ShadowReg) | |||
3798 | RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); | |||
3799 | } | |||
3800 | } else if (!IsSibcall && (!isTailCall || isByVal)) { | |||
3801 | assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3801, __PRETTY_FUNCTION__)); | |||
3802 | if (!StackPtr.getNode()) | |||
3803 | StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), | |||
3804 | getPointerTy(DAG.getDataLayout())); | |||
3805 | MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg, | |||
3806 | dl, DAG, VA, Flags)); | |||
3807 | } | |||
3808 | } | |||
3809 | ||||
3810 | if (!MemOpChains.empty()) | |||
3811 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains); | |||
3812 | ||||
3813 | if (Subtarget.isPICStyleGOT()) { | |||
3814 | // ELF / PIC requires GOT in the EBX register before function calls via PLT | |||
3815 | // GOT pointer. | |||
3816 | if (!isTailCall) { | |||
3817 | RegsToPass.push_back(std::make_pair( | |||
3818 | unsigned(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), | |||
3819 | getPointerTy(DAG.getDataLayout())))); | |||
3820 | } else { | |||
3821 | // If we are tail calling and generating PIC/GOT style code load the | |||
3822 | // address of the callee into ECX. The value in ecx is used as target of | |||
3823 | // the tail jump. This is done to circumvent the ebx/callee-saved problem | |||
3824 | // for tail calls on PIC/GOT architectures. Normally we would just put the | |||
3825 | // address of GOT into ebx and then call target@PLT. But for tail calls | |||
3826 | // ebx would be restored (since ebx is callee saved) before jumping to the | |||
3827 | // target@PLT. | |||
3828 | ||||
3829 | // Note: The actual moving to ECX is done further down. | |||
3830 | GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee); | |||
3831 | if (G && !G->getGlobal()->hasLocalLinkage() && | |||
3832 | G->getGlobal()->hasDefaultVisibility()) | |||
3833 | Callee = LowerGlobalAddress(Callee, DAG); | |||
3834 | else if (isa<ExternalSymbolSDNode>(Callee)) | |||
3835 | Callee = LowerExternalSymbol(Callee, DAG); | |||
3836 | } | |||
3837 | } | |||
3838 | ||||
3839 | if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) { | |||
3840 | // From AMD64 ABI document: | |||
3841 | // For calls that may call functions that use varargs or stdargs | |||
3842 | // (prototype-less calls or calls to functions containing ellipsis (...) in | |||
3843 | // the declaration) %al is used as hidden argument to specify the number | |||
3844 | // of SSE registers used. The contents of %al do not need to match exactly | |||
3845 | // the number of registers, but must be an ubound on the number of SSE | |||
3846 | // registers used and is in the range 0 - 8 inclusive. | |||
3847 | ||||
3848 | // Count the number of XMM registers allocated. | |||
3849 | static const MCPhysReg XMMArgRegs[] = { | |||
3850 | X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, | |||
3851 | X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7 | |||
3852 | }; | |||
3853 | unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs); | |||
3854 | assert((Subtarget.hasSSE1() || !NumXMMRegs)(((Subtarget.hasSSE1() || !NumXMMRegs) && "SSE registers cannot be used when SSE is disabled" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasSSE1() || !NumXMMRegs) && \"SSE registers cannot be used when SSE is disabled\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3855, __PRETTY_FUNCTION__)) | |||
3855 | && "SSE registers cannot be used when SSE is disabled")(((Subtarget.hasSSE1() || !NumXMMRegs) && "SSE registers cannot be used when SSE is disabled" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasSSE1() || !NumXMMRegs) && \"SSE registers cannot be used when SSE is disabled\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3855, __PRETTY_FUNCTION__)); | |||
3856 | ||||
3857 | RegsToPass.push_back(std::make_pair(unsigned(X86::AL), | |||
3858 | DAG.getConstant(NumXMMRegs, dl, | |||
3859 | MVT::i8))); | |||
3860 | } | |||
3861 | ||||
3862 | if (isVarArg && IsMustTail) { | |||
3863 | const auto &Forwards = X86Info->getForwardedMustTailRegParms(); | |||
3864 | for (const auto &F : Forwards) { | |||
3865 | SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT); | |||
3866 | RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val)); | |||
3867 | } | |||
3868 | } | |||
3869 | ||||
3870 | // For tail calls lower the arguments to the 'real' stack slots. Sibcalls | |||
3871 | // don't need this because the eligibility check rejects calls that require | |||
3872 | // shuffling arguments passed in memory. | |||
3873 | if (!IsSibcall && isTailCall) { | |||
3874 | // Force all the incoming stack arguments to be loaded from the stack | |||
3875 | // before any new outgoing arguments are stored to the stack, because the | |||
3876 | // outgoing stack slots may alias the incoming argument stack slots, and | |||
3877 | // the alias isn't otherwise explicit. This is slightly more conservative | |||
3878 | // than necessary, because it means that each store effectively depends | |||
3879 | // on every argument instead of just those arguments it would clobber. | |||
3880 | SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain); | |||
3881 | ||||
3882 | SmallVector<SDValue, 8> MemOpChains2; | |||
3883 | SDValue FIN; | |||
3884 | int FI = 0; | |||
3885 | for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E; | |||
3886 | ++I, ++OutsIndex) { | |||
3887 | CCValAssign &VA = ArgLocs[I]; | |||
3888 | ||||
3889 | if (VA.isRegLoc()) { | |||
3890 | if (VA.needsCustom()) { | |||
3891 | assert((CallConv == CallingConv::X86_RegCall) &&(((CallConv == CallingConv::X86_RegCall) && "Expecting custom case only in regcall calling convention" ) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::X86_RegCall) && \"Expecting custom case only in regcall calling convention\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3892, __PRETTY_FUNCTION__)) | |||
3892 | "Expecting custom case only in regcall calling convention")(((CallConv == CallingConv::X86_RegCall) && "Expecting custom case only in regcall calling convention" ) ? static_cast<void> (0) : __assert_fail ("(CallConv == CallingConv::X86_RegCall) && \"Expecting custom case only in regcall calling convention\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3892, __PRETTY_FUNCTION__)); | |||
3893 | // This means that we are in special case where one argument was | |||
3894 | // passed through two register locations - Skip the next location | |||
3895 | ++I; | |||
3896 | } | |||
3897 | ||||
3898 | continue; | |||
3899 | } | |||
3900 | ||||
3901 | assert(VA.isMemLoc())((VA.isMemLoc()) ? static_cast<void> (0) : __assert_fail ("VA.isMemLoc()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3901, __PRETTY_FUNCTION__)); | |||
3902 | SDValue Arg = OutVals[OutsIndex]; | |||
3903 | ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags; | |||
3904 | // Skip inalloca arguments. They don't require any work. | |||
3905 | if (Flags.isInAlloca()) | |||
3906 | continue; | |||
3907 | // Create frame index. | |||
3908 | int32_t Offset = VA.getLocMemOffset()+FPDiff; | |||
3909 | uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8; | |||
3910 | FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); | |||
3911 | FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); | |||
3912 | ||||
3913 | if (Flags.isByVal()) { | |||
3914 | // Copy relative to framepointer. | |||
3915 | SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl); | |||
3916 | if (!StackPtr.getNode()) | |||
3917 | StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(), | |||
3918 | getPointerTy(DAG.getDataLayout())); | |||
3919 | Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()), | |||
3920 | StackPtr, Source); | |||
3921 | ||||
3922 | MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN, | |||
3923 | ArgChain, | |||
3924 | Flags, DAG, dl)); | |||
3925 | } else { | |||
3926 | // Store relative to framepointer. | |||
3927 | MemOpChains2.push_back(DAG.getStore( | |||
3928 | ArgChain, dl, Arg, FIN, | |||
3929 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI))); | |||
3930 | } | |||
3931 | } | |||
3932 | ||||
3933 | if (!MemOpChains2.empty()) | |||
3934 | Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2); | |||
3935 | ||||
3936 | // Store the return address to the appropriate stack slot. | |||
3937 | Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx, | |||
3938 | getPointerTy(DAG.getDataLayout()), | |||
3939 | RegInfo->getSlotSize(), FPDiff, dl); | |||
3940 | } | |||
3941 | ||||
3942 | // Build a sequence of copy-to-reg nodes chained together with token chain | |||
3943 | // and flag operands which copy the outgoing args into registers. | |||
3944 | SDValue InFlag; | |||
3945 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { | |||
3946 | Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, | |||
3947 | RegsToPass[i].second, InFlag); | |||
3948 | InFlag = Chain.getValue(1); | |||
3949 | } | |||
3950 | ||||
3951 | if (DAG.getTarget().getCodeModel() == CodeModel::Large) { | |||
3952 | assert(Is64Bit && "Large code model is only legal in 64-bit mode.")((Is64Bit && "Large code model is only legal in 64-bit mode." ) ? static_cast<void> (0) : __assert_fail ("Is64Bit && \"Large code model is only legal in 64-bit mode.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 3952, __PRETTY_FUNCTION__)); | |||
3953 | // In the 64-bit large code model, we have to make all calls | |||
3954 | // through a register, since the call instruction's 32-bit | |||
3955 | // pc-relative offset may not be large enough to hold the whole | |||
3956 | // address. | |||
3957 | } else if (Callee->getOpcode() == ISD::GlobalAddress) { | |||
3958 | // If the callee is a GlobalAddress node (quite common, every direct call | |||
3959 | // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack | |||
3960 | // it. | |||
3961 | GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee); | |||
3962 | ||||
3963 | // We should use extra load for direct calls to dllimported functions in | |||
3964 | // non-JIT mode. | |||
3965 | const GlobalValue *GV = G->getGlobal(); | |||
3966 | if (!GV->hasDLLImportStorageClass()) { | |||
3967 | unsigned char OpFlags = Subtarget.classifyGlobalFunctionReference(GV); | |||
3968 | ||||
3969 | Callee = DAG.getTargetGlobalAddress( | |||
3970 | GV, dl, getPointerTy(DAG.getDataLayout()), G->getOffset(), OpFlags); | |||
3971 | ||||
3972 | if (OpFlags == X86II::MO_GOTPCREL) { | |||
3973 | // Add a wrapper. | |||
3974 | Callee = DAG.getNode(X86ISD::WrapperRIP, dl, | |||
3975 | getPointerTy(DAG.getDataLayout()), Callee); | |||
3976 | // Add extra indirection | |||
3977 | Callee = DAG.getLoad( | |||
3978 | getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee, | |||
3979 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
3980 | } | |||
3981 | } | |||
3982 | } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { | |||
3983 | const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); | |||
3984 | unsigned char OpFlags = | |||
3985 | Subtarget.classifyGlobalFunctionReference(nullptr, *Mod); | |||
3986 | ||||
3987 | Callee = DAG.getTargetExternalSymbol( | |||
3988 | S->getSymbol(), getPointerTy(DAG.getDataLayout()), OpFlags); | |||
3989 | ||||
3990 | if (OpFlags == X86II::MO_GOTPCREL) { | |||
3991 | Callee = DAG.getNode(X86ISD::WrapperRIP, dl, | |||
3992 | getPointerTy(DAG.getDataLayout()), Callee); | |||
3993 | Callee = DAG.getLoad( | |||
3994 | getPointerTy(DAG.getDataLayout()), dl, DAG.getEntryNode(), Callee, | |||
3995 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
3996 | } | |||
3997 | } else if (Subtarget.isTarget64BitILP32() && | |||
3998 | Callee->getValueType(0) == MVT::i32) { | |||
3999 | // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI | |||
4000 | Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee); | |||
4001 | } | |||
4002 | ||||
4003 | // Returns a chain & a flag for retval copy to use. | |||
4004 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
4005 | SmallVector<SDValue, 8> Ops; | |||
4006 | ||||
4007 | if (!IsSibcall && isTailCall) { | |||
4008 | Chain = DAG.getCALLSEQ_END(Chain, | |||
4009 | DAG.getIntPtrConstant(NumBytesToPop, dl, true), | |||
4010 | DAG.getIntPtrConstant(0, dl, true), InFlag, dl); | |||
4011 | InFlag = Chain.getValue(1); | |||
4012 | } | |||
4013 | ||||
4014 | Ops.push_back(Chain); | |||
4015 | Ops.push_back(Callee); | |||
4016 | ||||
4017 | if (isTailCall) | |||
4018 | Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32)); | |||
4019 | ||||
4020 | // Add argument registers to the end of the list so that they are known live | |||
4021 | // into the call. | |||
4022 | for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) | |||
4023 | Ops.push_back(DAG.getRegister(RegsToPass[i].first, | |||
4024 | RegsToPass[i].second.getValueType())); | |||
4025 | ||||
4026 | // Add a register mask operand representing the call-preserved registers. | |||
4027 | // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we | |||
4028 | // set X86_INTR calling convention because it has the same CSR mask | |||
4029 | // (same preserved registers). | |||
4030 | const uint32_t *Mask = RegInfo->getCallPreservedMask( | |||
4031 | MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv); | |||
4032 | assert(Mask && "Missing call preserved mask for calling convention")((Mask && "Missing call preserved mask for calling convention" ) ? static_cast<void> (0) : __assert_fail ("Mask && \"Missing call preserved mask for calling convention\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 4032, __PRETTY_FUNCTION__)); | |||
4033 | ||||
4034 | // If this is an invoke in a 32-bit function using a funclet-based | |||
4035 | // personality, assume the function clobbers all registers. If an exception | |||
4036 | // is thrown, the runtime will not restore CSRs. | |||
4037 | // FIXME: Model this more precisely so that we can register allocate across | |||
4038 | // the normal edge and spill and fill across the exceptional edge. | |||
4039 | if (!Is64Bit && CLI.CS && CLI.CS.isInvoke()) { | |||
4040 | const Function &CallerFn = MF.getFunction(); | |||
4041 | EHPersonality Pers = | |||
4042 | CallerFn.hasPersonalityFn() | |||
4043 | ? classifyEHPersonality(CallerFn.getPersonalityFn()) | |||
4044 | : EHPersonality::Unknown; | |||
4045 | if (isFuncletEHPersonality(Pers)) | |||
4046 | Mask = RegInfo->getNoPreservedMask(); | |||
4047 | } | |||
4048 | ||||
4049 | // Define a new register mask from the existing mask. | |||
4050 | uint32_t *RegMask = nullptr; | |||
4051 | ||||
4052 | // In some calling conventions we need to remove the used physical registers | |||
4053 | // from the reg mask. | |||
4054 | if (CallConv == CallingConv::X86_RegCall || HasNCSR) { | |||
4055 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
4056 | ||||
4057 | // Allocate a new Reg Mask and copy Mask. | |||
4058 | RegMask = MF.allocateRegMask(); | |||
4059 | unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs()); | |||
4060 | memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize); | |||
4061 | ||||
4062 | // Make sure all sub registers of the argument registers are reset | |||
4063 | // in the RegMask. | |||
4064 | for (auto const &RegPair : RegsToPass) | |||
4065 | for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true); | |||
4066 | SubRegs.isValid(); ++SubRegs) | |||
4067 | RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32)); | |||
4068 | ||||
4069 | // Create the RegMask Operand according to our updated mask. | |||
4070 | Ops.push_back(DAG.getRegisterMask(RegMask)); | |||
4071 | } else { | |||
4072 | // Create the RegMask Operand according to the static mask. | |||
4073 | Ops.push_back(DAG.getRegisterMask(Mask)); | |||
4074 | } | |||
4075 | ||||
4076 | if (InFlag.getNode()) | |||
4077 | Ops.push_back(InFlag); | |||
4078 | ||||
4079 | if (isTailCall) { | |||
4080 | // We used to do: | |||
4081 | //// If this is the first return lowered for this function, add the regs | |||
4082 | //// to the liveout set for the function. | |||
4083 | // This isn't right, although it's probably harmless on x86; liveouts | |||
4084 | // should be computed from returns not tail calls. Consider a void | |||
4085 | // function making a tail call to a function returning int. | |||
4086 | MF.getFrameInfo().setHasTailCall(); | |||
4087 | return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops); | |||
4088 | } | |||
4089 | ||||
4090 | if (HasNoCfCheck && IsCFProtectionSupported) { | |||
4091 | Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops); | |||
4092 | } else { | |||
4093 | Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops); | |||
4094 | } | |||
4095 | InFlag = Chain.getValue(1); | |||
4096 | ||||
4097 | // Create the CALLSEQ_END node. | |||
4098 | unsigned NumBytesForCalleeToPop; | |||
4099 | if (X86::isCalleePop(CallConv, Is64Bit, isVarArg, | |||
4100 | DAG.getTarget().Options.GuaranteedTailCallOpt)) | |||
4101 | NumBytesForCalleeToPop = NumBytes; // Callee pops everything | |||
4102 | else if (!Is64Bit && !canGuaranteeTCO(CallConv) && | |||
4103 | !Subtarget.getTargetTriple().isOSMSVCRT() && | |||
4104 | SR == StackStructReturn) | |||
4105 | // If this is a call to a struct-return function, the callee | |||
4106 | // pops the hidden struct pointer, so we have to push it back. | |||
4107 | // This is common for Darwin/X86, Linux & Mingw32 targets. | |||
4108 | // For MSVC Win32 targets, the caller pops the hidden struct pointer. | |||
4109 | NumBytesForCalleeToPop = 4; | |||
4110 | else | |||
4111 | NumBytesForCalleeToPop = 0; // Callee pops nothing. | |||
4112 | ||||
4113 | if (CLI.DoesNotReturn && !getTargetMachine().Options.TrapUnreachable) { | |||
4114 | // No need to reset the stack after the call if the call doesn't return. To | |||
4115 | // make the MI verify, we'll pretend the callee does it for us. | |||
4116 | NumBytesForCalleeToPop = NumBytes; | |||
4117 | } | |||
4118 | ||||
4119 | // Returns a flag for retval copy to use. | |||
4120 | if (!IsSibcall) { | |||
4121 | Chain = DAG.getCALLSEQ_END(Chain, | |||
4122 | DAG.getIntPtrConstant(NumBytesToPop, dl, true), | |||
4123 | DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl, | |||
4124 | true), | |||
4125 | InFlag, dl); | |||
4126 | InFlag = Chain.getValue(1); | |||
4127 | } | |||
4128 | ||||
4129 | // Handle result values, copying them out of physregs into vregs that we | |||
4130 | // return. | |||
4131 | return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG, | |||
4132 | InVals, RegMask); | |||
4133 | } | |||
4134 | ||||
4135 | //===----------------------------------------------------------------------===// | |||
4136 | // Fast Calling Convention (tail call) implementation | |||
4137 | //===----------------------------------------------------------------------===// | |||
4138 | ||||
4139 | // Like std call, callee cleans arguments, convention except that ECX is | |||
4140 | // reserved for storing the tail called function address. Only 2 registers are | |||
4141 | // free for argument passing (inreg). Tail call optimization is performed | |||
4142 | // provided: | |||
4143 | // * tailcallopt is enabled | |||
4144 | // * caller/callee are fastcc | |||
4145 | // On X86_64 architecture with GOT-style position independent code only local | |||
4146 | // (within module) calls are supported at the moment. | |||
4147 | // To keep the stack aligned according to platform abi the function | |||
4148 | // GetAlignedArgumentStackSize ensures that argument delta is always multiples | |||
4149 | // of stack alignment. (Dynamic linkers need this - darwin's dyld for example) | |||
4150 | // If a tail called function callee has more arguments than the caller the | |||
4151 | // caller needs to make sure that there is room to move the RETADDR to. This is | |||
4152 | // achieved by reserving an area the size of the argument delta right after the | |||
4153 | // original RETADDR, but before the saved framepointer or the spilled registers | |||
4154 | // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4) | |||
4155 | // stack layout: | |||
4156 | // arg1 | |||
4157 | // arg2 | |||
4158 | // RETADDR | |||
4159 | // [ new RETADDR | |||
4160 | // move area ] | |||
4161 | // (possible EBP) | |||
4162 | // ESI | |||
4163 | // EDI | |||
4164 | // local1 .. | |||
4165 | ||||
4166 | /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align | |||
4167 | /// requirement. | |||
4168 | unsigned | |||
4169 | X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize, | |||
4170 | SelectionDAG& DAG) const { | |||
4171 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
4172 | const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); | |||
4173 | unsigned StackAlignment = TFI.getStackAlignment(); | |||
4174 | uint64_t AlignMask = StackAlignment - 1; | |||
4175 | int64_t Offset = StackSize; | |||
4176 | unsigned SlotSize = RegInfo->getSlotSize(); | |||
4177 | if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) { | |||
4178 | // Number smaller than 12 so just add the difference. | |||
4179 | Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask)); | |||
4180 | } else { | |||
4181 | // Mask out lower bits, add stackalignment once plus the 12 bytes. | |||
4182 | Offset = ((~AlignMask) & Offset) + StackAlignment + | |||
4183 | (StackAlignment-SlotSize); | |||
4184 | } | |||
4185 | return Offset; | |||
4186 | } | |||
4187 | ||||
4188 | /// Return true if the given stack call argument is already available in the | |||
4189 | /// same position (relatively) of the caller's incoming argument stack. | |||
4190 | static | |||
4191 | bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, | |||
4192 | MachineFrameInfo &MFI, const MachineRegisterInfo *MRI, | |||
4193 | const X86InstrInfo *TII, const CCValAssign &VA) { | |||
4194 | unsigned Bytes = Arg.getValueSizeInBits() / 8; | |||
4195 | ||||
4196 | for (;;) { | |||
4197 | // Look through nodes that don't alter the bits of the incoming value. | |||
4198 | unsigned Op = Arg.getOpcode(); | |||
4199 | if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) { | |||
4200 | Arg = Arg.getOperand(0); | |||
4201 | continue; | |||
4202 | } | |||
4203 | if (Op == ISD::TRUNCATE) { | |||
4204 | const SDValue &TruncInput = Arg.getOperand(0); | |||
4205 | if (TruncInput.getOpcode() == ISD::AssertZext && | |||
4206 | cast<VTSDNode>(TruncInput.getOperand(1))->getVT() == | |||
4207 | Arg.getValueType()) { | |||
4208 | Arg = TruncInput.getOperand(0); | |||
4209 | continue; | |||
4210 | } | |||
4211 | } | |||
4212 | break; | |||
4213 | } | |||
4214 | ||||
4215 | int FI = INT_MAX2147483647; | |||
4216 | if (Arg.getOpcode() == ISD::CopyFromReg) { | |||
4217 | unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg(); | |||
4218 | if (!TargetRegisterInfo::isVirtualRegister(VR)) | |||
4219 | return false; | |||
4220 | MachineInstr *Def = MRI->getVRegDef(VR); | |||
4221 | if (!Def) | |||
4222 | return false; | |||
4223 | if (!Flags.isByVal()) { | |||
4224 | if (!TII->isLoadFromStackSlot(*Def, FI)) | |||
4225 | return false; | |||
4226 | } else { | |||
4227 | unsigned Opcode = Def->getOpcode(); | |||
4228 | if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r || | |||
4229 | Opcode == X86::LEA64_32r) && | |||
4230 | Def->getOperand(1).isFI()) { | |||
4231 | FI = Def->getOperand(1).getIndex(); | |||
4232 | Bytes = Flags.getByValSize(); | |||
4233 | } else | |||
4234 | return false; | |||
4235 | } | |||
4236 | } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) { | |||
4237 | if (Flags.isByVal()) | |||
4238 | // ByVal argument is passed in as a pointer but it's now being | |||
4239 | // dereferenced. e.g. | |||
4240 | // define @foo(%struct.X* %A) { | |||
4241 | // tail call @bar(%struct.X* byval %A) | |||
4242 | // } | |||
4243 | return false; | |||
4244 | SDValue Ptr = Ld->getBasePtr(); | |||
4245 | FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr); | |||
4246 | if (!FINode) | |||
4247 | return false; | |||
4248 | FI = FINode->getIndex(); | |||
4249 | } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) { | |||
4250 | FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg); | |||
4251 | FI = FINode->getIndex(); | |||
4252 | Bytes = Flags.getByValSize(); | |||
4253 | } else | |||
4254 | return false; | |||
4255 | ||||
4256 | assert(FI != INT_MAX)((FI != 2147483647) ? static_cast<void> (0) : __assert_fail ("FI != INT_MAX", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 4256, __PRETTY_FUNCTION__)); | |||
4257 | if (!MFI.isFixedObjectIndex(FI)) | |||
4258 | return false; | |||
4259 | ||||
4260 | if (Offset != MFI.getObjectOffset(FI)) | |||
4261 | return false; | |||
4262 | ||||
4263 | // If this is not byval, check that the argument stack object is immutable. | |||
4264 | // inalloca and argument copy elision can create mutable argument stack | |||
4265 | // objects. Byval objects can be mutated, but a byval call intends to pass the | |||
4266 | // mutated memory. | |||
4267 | if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI)) | |||
4268 | return false; | |||
4269 | ||||
4270 | if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) { | |||
4271 | // If the argument location is wider than the argument type, check that any | |||
4272 | // extension flags match. | |||
4273 | if (Flags.isZExt() != MFI.isObjectZExt(FI) || | |||
4274 | Flags.isSExt() != MFI.isObjectSExt(FI)) { | |||
4275 | return false; | |||
4276 | } | |||
4277 | } | |||
4278 | ||||
4279 | return Bytes == MFI.getObjectSize(FI); | |||
4280 | } | |||
4281 | ||||
4282 | /// Check whether the call is eligible for tail call optimization. Targets | |||
4283 | /// that want to do tail call optimization should implement this function. | |||
4284 | bool X86TargetLowering::IsEligibleForTailCallOptimization( | |||
4285 | SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, | |||
4286 | bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy, | |||
4287 | const SmallVectorImpl<ISD::OutputArg> &Outs, | |||
4288 | const SmallVectorImpl<SDValue> &OutVals, | |||
4289 | const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { | |||
4290 | if (!mayTailCallThisCC(CalleeCC)) | |||
4291 | return false; | |||
4292 | ||||
4293 | // If -tailcallopt is specified, make fastcc functions tail-callable. | |||
4294 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4295 | const Function &CallerF = MF.getFunction(); | |||
4296 | ||||
4297 | // If the function return type is x86_fp80 and the callee return type is not, | |||
4298 | // then the FP_EXTEND of the call result is not a nop. It's not safe to | |||
4299 | // perform a tailcall optimization here. | |||
4300 | if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty()) | |||
4301 | return false; | |||
4302 | ||||
4303 | CallingConv::ID CallerCC = CallerF.getCallingConv(); | |||
4304 | bool CCMatch = CallerCC == CalleeCC; | |||
4305 | bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC); | |||
4306 | bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC); | |||
4307 | ||||
4308 | // Win64 functions have extra shadow space for argument homing. Don't do the | |||
4309 | // sibcall if the caller and callee have mismatched expectations for this | |||
4310 | // space. | |||
4311 | if (IsCalleeWin64 != IsCallerWin64) | |||
4312 | return false; | |||
4313 | ||||
4314 | if (DAG.getTarget().Options.GuaranteedTailCallOpt) { | |||
4315 | if (canGuaranteeTCO(CalleeCC) && CCMatch) | |||
4316 | return true; | |||
4317 | return false; | |||
4318 | } | |||
4319 | ||||
4320 | // Look for obvious safe cases to perform tail call optimization that do not | |||
4321 | // require ABI changes. This is what gcc calls sibcall. | |||
4322 | ||||
4323 | // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to | |||
4324 | // emit a special epilogue. | |||
4325 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
4326 | if (RegInfo->needsStackRealignment(MF)) | |||
4327 | return false; | |||
4328 | ||||
4329 | // Also avoid sibcall optimization if either caller or callee uses struct | |||
4330 | // return semantics. | |||
4331 | if (isCalleeStructRet || isCallerStructRet) | |||
4332 | return false; | |||
4333 | ||||
4334 | // Do not sibcall optimize vararg calls unless all arguments are passed via | |||
4335 | // registers. | |||
4336 | LLVMContext &C = *DAG.getContext(); | |||
4337 | if (isVarArg && !Outs.empty()) { | |||
4338 | // Optimizing for varargs on Win64 is unlikely to be safe without | |||
4339 | // additional testing. | |||
4340 | if (IsCalleeWin64 || IsCallerWin64) | |||
4341 | return false; | |||
4342 | ||||
4343 | SmallVector<CCValAssign, 16> ArgLocs; | |||
4344 | CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); | |||
4345 | ||||
4346 | CCInfo.AnalyzeCallOperands(Outs, CC_X86); | |||
4347 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) | |||
4348 | if (!ArgLocs[i].isRegLoc()) | |||
4349 | return false; | |||
4350 | } | |||
4351 | ||||
4352 | // If the call result is in ST0 / ST1, it needs to be popped off the x87 | |||
4353 | // stack. Therefore, if it's not used by the call it is not safe to optimize | |||
4354 | // this into a sibcall. | |||
4355 | bool Unused = false; | |||
4356 | for (unsigned i = 0, e = Ins.size(); i != e; ++i) { | |||
4357 | if (!Ins[i].Used) { | |||
4358 | Unused = true; | |||
4359 | break; | |||
4360 | } | |||
4361 | } | |||
4362 | if (Unused) { | |||
4363 | SmallVector<CCValAssign, 16> RVLocs; | |||
4364 | CCState CCInfo(CalleeCC, false, MF, RVLocs, C); | |||
4365 | CCInfo.AnalyzeCallResult(Ins, RetCC_X86); | |||
4366 | for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) { | |||
4367 | CCValAssign &VA = RVLocs[i]; | |||
4368 | if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) | |||
4369 | return false; | |||
4370 | } | |||
4371 | } | |||
4372 | ||||
4373 | // Check that the call results are passed in the same way. | |||
4374 | if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, | |||
4375 | RetCC_X86, RetCC_X86)) | |||
4376 | return false; | |||
4377 | // The callee has to preserve all registers the caller needs to preserve. | |||
4378 | const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
4379 | const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); | |||
4380 | if (!CCMatch) { | |||
4381 | const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); | |||
4382 | if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) | |||
4383 | return false; | |||
4384 | } | |||
4385 | ||||
4386 | unsigned StackArgsSize = 0; | |||
4387 | ||||
4388 | // If the callee takes no arguments then go on to check the results of the | |||
4389 | // call. | |||
4390 | if (!Outs.empty()) { | |||
4391 | // Check if stack adjustment is needed. For now, do not do this if any | |||
4392 | // argument is passed on the stack. | |||
4393 | SmallVector<CCValAssign, 16> ArgLocs; | |||
4394 | CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); | |||
4395 | ||||
4396 | // Allocate shadow area for Win64 | |||
4397 | if (IsCalleeWin64) | |||
4398 | CCInfo.AllocateStack(32, 8); | |||
4399 | ||||
4400 | CCInfo.AnalyzeCallOperands(Outs, CC_X86); | |||
4401 | StackArgsSize = CCInfo.getNextStackOffset(); | |||
4402 | ||||
4403 | if (CCInfo.getNextStackOffset()) { | |||
4404 | // Check if the arguments are already laid out in the right way as | |||
4405 | // the caller's fixed stack objects. | |||
4406 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
4407 | const MachineRegisterInfo *MRI = &MF.getRegInfo(); | |||
4408 | const X86InstrInfo *TII = Subtarget.getInstrInfo(); | |||
4409 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { | |||
4410 | CCValAssign &VA = ArgLocs[i]; | |||
4411 | SDValue Arg = OutVals[i]; | |||
4412 | ISD::ArgFlagsTy Flags = Outs[i].Flags; | |||
4413 | if (VA.getLocInfo() == CCValAssign::Indirect) | |||
4414 | return false; | |||
4415 | if (!VA.isRegLoc()) { | |||
4416 | if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags, | |||
4417 | MFI, MRI, TII, VA)) | |||
4418 | return false; | |||
4419 | } | |||
4420 | } | |||
4421 | } | |||
4422 | ||||
4423 | bool PositionIndependent = isPositionIndependent(); | |||
4424 | // If the tailcall address may be in a register, then make sure it's | |||
4425 | // possible to register allocate for it. In 32-bit, the call address can | |||
4426 | // only target EAX, EDX, or ECX since the tail call must be scheduled after | |||
4427 | // callee-saved registers are restored. These happen to be the same | |||
4428 | // registers used to pass 'inreg' arguments so watch out for those. | |||
4429 | if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) && | |||
4430 | !isa<ExternalSymbolSDNode>(Callee)) || | |||
4431 | PositionIndependent)) { | |||
4432 | unsigned NumInRegs = 0; | |||
4433 | // In PIC we need an extra register to formulate the address computation | |||
4434 | // for the callee. | |||
4435 | unsigned MaxInRegs = PositionIndependent ? 2 : 3; | |||
4436 | ||||
4437 | for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { | |||
4438 | CCValAssign &VA = ArgLocs[i]; | |||
4439 | if (!VA.isRegLoc()) | |||
4440 | continue; | |||
4441 | unsigned Reg = VA.getLocReg(); | |||
4442 | switch (Reg) { | |||
4443 | default: break; | |||
4444 | case X86::EAX: case X86::EDX: case X86::ECX: | |||
4445 | if (++NumInRegs == MaxInRegs) | |||
4446 | return false; | |||
4447 | break; | |||
4448 | } | |||
4449 | } | |||
4450 | } | |||
4451 | ||||
4452 | const MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
4453 | if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) | |||
4454 | return false; | |||
4455 | } | |||
4456 | ||||
4457 | bool CalleeWillPop = | |||
4458 | X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg, | |||
4459 | MF.getTarget().Options.GuaranteedTailCallOpt); | |||
4460 | ||||
4461 | if (unsigned BytesToPop = | |||
4462 | MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) { | |||
4463 | // If we have bytes to pop, the callee must pop them. | |||
4464 | bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize; | |||
4465 | if (!CalleePopMatches) | |||
4466 | return false; | |||
4467 | } else if (CalleeWillPop && StackArgsSize > 0) { | |||
4468 | // If we don't have bytes to pop, make sure the callee doesn't pop any. | |||
4469 | return false; | |||
4470 | } | |||
4471 | ||||
4472 | return true; | |||
4473 | } | |||
4474 | ||||
4475 | FastISel * | |||
4476 | X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, | |||
4477 | const TargetLibraryInfo *libInfo) const { | |||
4478 | return X86::createFastISel(funcInfo, libInfo); | |||
4479 | } | |||
4480 | ||||
4481 | //===----------------------------------------------------------------------===// | |||
4482 | // Other Lowering Hooks | |||
4483 | //===----------------------------------------------------------------------===// | |||
4484 | ||||
4485 | static bool MayFoldLoad(SDValue Op) { | |||
4486 | return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); | |||
4487 | } | |||
4488 | ||||
4489 | static bool MayFoldIntoStore(SDValue Op) { | |||
4490 | return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); | |||
4491 | } | |||
4492 | ||||
4493 | static bool MayFoldIntoZeroExtend(SDValue Op) { | |||
4494 | if (Op.hasOneUse()) { | |||
4495 | unsigned Opcode = Op.getNode()->use_begin()->getOpcode(); | |||
4496 | return (ISD::ZERO_EXTEND == Opcode); | |||
4497 | } | |||
4498 | return false; | |||
4499 | } | |||
4500 | ||||
4501 | static bool isTargetShuffle(unsigned Opcode) { | |||
4502 | switch(Opcode) { | |||
4503 | default: return false; | |||
4504 | case X86ISD::BLENDI: | |||
4505 | case X86ISD::PSHUFB: | |||
4506 | case X86ISD::PSHUFD: | |||
4507 | case X86ISD::PSHUFHW: | |||
4508 | case X86ISD::PSHUFLW: | |||
4509 | case X86ISD::SHUFP: | |||
4510 | case X86ISD::INSERTPS: | |||
4511 | case X86ISD::EXTRQI: | |||
4512 | case X86ISD::INSERTQI: | |||
4513 | case X86ISD::PALIGNR: | |||
4514 | case X86ISD::VSHLDQ: | |||
4515 | case X86ISD::VSRLDQ: | |||
4516 | case X86ISD::MOVLHPS: | |||
4517 | case X86ISD::MOVHLPS: | |||
4518 | case X86ISD::MOVSHDUP: | |||
4519 | case X86ISD::MOVSLDUP: | |||
4520 | case X86ISD::MOVDDUP: | |||
4521 | case X86ISD::MOVSS: | |||
4522 | case X86ISD::MOVSD: | |||
4523 | case X86ISD::UNPCKL: | |||
4524 | case X86ISD::UNPCKH: | |||
4525 | case X86ISD::VBROADCAST: | |||
4526 | case X86ISD::VPERMILPI: | |||
4527 | case X86ISD::VPERMILPV: | |||
4528 | case X86ISD::VPERM2X128: | |||
4529 | case X86ISD::SHUF128: | |||
4530 | case X86ISD::VPERMIL2: | |||
4531 | case X86ISD::VPERMI: | |||
4532 | case X86ISD::VPPERM: | |||
4533 | case X86ISD::VPERMV: | |||
4534 | case X86ISD::VPERMV3: | |||
4535 | case X86ISD::VZEXT_MOVL: | |||
4536 | return true; | |||
4537 | } | |||
4538 | } | |||
4539 | ||||
4540 | static bool isTargetShuffleVariableMask(unsigned Opcode) { | |||
4541 | switch (Opcode) { | |||
4542 | default: return false; | |||
4543 | // Target Shuffles. | |||
4544 | case X86ISD::PSHUFB: | |||
4545 | case X86ISD::VPERMILPV: | |||
4546 | case X86ISD::VPERMIL2: | |||
4547 | case X86ISD::VPPERM: | |||
4548 | case X86ISD::VPERMV: | |||
4549 | case X86ISD::VPERMV3: | |||
4550 | return true; | |||
4551 | // 'Faux' Target Shuffles. | |||
4552 | case ISD::OR: | |||
4553 | case ISD::AND: | |||
4554 | case X86ISD::ANDNP: | |||
4555 | return true; | |||
4556 | } | |||
4557 | } | |||
4558 | ||||
4559 | SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { | |||
4560 | MachineFunction &MF = DAG.getMachineFunction(); | |||
4561 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
4562 | X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); | |||
4563 | int ReturnAddrIndex = FuncInfo->getRAIndex(); | |||
4564 | ||||
4565 | if (ReturnAddrIndex == 0) { | |||
4566 | // Set up a frame object for the return address. | |||
4567 | unsigned SlotSize = RegInfo->getSlotSize(); | |||
4568 | ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize, | |||
4569 | -(int64_t)SlotSize, | |||
4570 | false); | |||
4571 | FuncInfo->setRAIndex(ReturnAddrIndex); | |||
4572 | } | |||
4573 | ||||
4574 | return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout())); | |||
4575 | } | |||
4576 | ||||
4577 | bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M, | |||
4578 | bool hasSymbolicDisplacement) { | |||
4579 | // Offset should fit into 32 bit immediate field. | |||
4580 | if (!isInt<32>(Offset)) | |||
4581 | return false; | |||
4582 | ||||
4583 | // If we don't have a symbolic displacement - we don't have any extra | |||
4584 | // restrictions. | |||
4585 | if (!hasSymbolicDisplacement) | |||
4586 | return true; | |||
4587 | ||||
4588 | // FIXME: Some tweaks might be needed for medium code model. | |||
4589 | if (M != CodeModel::Small && M != CodeModel::Kernel) | |||
4590 | return false; | |||
4591 | ||||
4592 | // For small code model we assume that latest object is 16MB before end of 31 | |||
4593 | // bits boundary. We may also accept pretty large negative constants knowing | |||
4594 | // that all objects are in the positive half of address space. | |||
4595 | if (M == CodeModel::Small && Offset < 16*1024*1024) | |||
4596 | return true; | |||
4597 | ||||
4598 | // For kernel code model we know that all object resist in the negative half | |||
4599 | // of 32bits address space. We may not accept negative offsets, since they may | |||
4600 | // be just off and we may accept pretty large positive ones. | |||
4601 | if (M == CodeModel::Kernel && Offset >= 0) | |||
4602 | return true; | |||
4603 | ||||
4604 | return false; | |||
4605 | } | |||
4606 | ||||
4607 | /// Determines whether the callee is required to pop its own arguments. | |||
4608 | /// Callee pop is necessary to support tail calls. | |||
4609 | bool X86::isCalleePop(CallingConv::ID CallingConv, | |||
4610 | bool is64Bit, bool IsVarArg, bool GuaranteeTCO) { | |||
4611 | // If GuaranteeTCO is true, we force some calls to be callee pop so that we | |||
4612 | // can guarantee TCO. | |||
4613 | if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO)) | |||
4614 | return true; | |||
4615 | ||||
4616 | switch (CallingConv) { | |||
4617 | default: | |||
4618 | return false; | |||
4619 | case CallingConv::X86_StdCall: | |||
4620 | case CallingConv::X86_FastCall: | |||
4621 | case CallingConv::X86_ThisCall: | |||
4622 | case CallingConv::X86_VectorCall: | |||
4623 | return !is64Bit; | |||
4624 | } | |||
4625 | } | |||
4626 | ||||
4627 | /// Return true if the condition is an unsigned comparison operation. | |||
4628 | static bool isX86CCUnsigned(unsigned X86CC) { | |||
4629 | switch (X86CC) { | |||
4630 | default: | |||
4631 | llvm_unreachable("Invalid integer condition!")::llvm::llvm_unreachable_internal("Invalid integer condition!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 4631); | |||
4632 | case X86::COND_E: | |||
4633 | case X86::COND_NE: | |||
4634 | case X86::COND_B: | |||
4635 | case X86::COND_A: | |||
4636 | case X86::COND_BE: | |||
4637 | case X86::COND_AE: | |||
4638 | return true; | |||
4639 | case X86::COND_G: | |||
4640 | case X86::COND_GE: | |||
4641 | case X86::COND_L: | |||
4642 | case X86::COND_LE: | |||
4643 | return false; | |||
4644 | } | |||
4645 | } | |||
4646 | ||||
4647 | static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) { | |||
4648 | switch (SetCCOpcode) { | |||
4649 | default: llvm_unreachable("Invalid integer condition!")::llvm::llvm_unreachable_internal("Invalid integer condition!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 4649); | |||
4650 | case ISD::SETEQ: return X86::COND_E; | |||
4651 | case ISD::SETGT: return X86::COND_G; | |||
4652 | case ISD::SETGE: return X86::COND_GE; | |||
4653 | case ISD::SETLT: return X86::COND_L; | |||
4654 | case ISD::SETLE: return X86::COND_LE; | |||
4655 | case ISD::SETNE: return X86::COND_NE; | |||
4656 | case ISD::SETULT: return X86::COND_B; | |||
4657 | case ISD::SETUGT: return X86::COND_A; | |||
4658 | case ISD::SETULE: return X86::COND_BE; | |||
4659 | case ISD::SETUGE: return X86::COND_AE; | |||
4660 | } | |||
4661 | } | |||
4662 | ||||
4663 | /// Do a one-to-one translation of a ISD::CondCode to the X86-specific | |||
4664 | /// condition code, returning the condition code and the LHS/RHS of the | |||
4665 | /// comparison to make. | |||
4666 | static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL, | |||
4667 | bool isFP, SDValue &LHS, SDValue &RHS, | |||
4668 | SelectionDAG &DAG) { | |||
4669 | if (!isFP) { | |||
4670 | if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { | |||
4671 | if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) { | |||
4672 | // X > -1 -> X == 0, jump !sign. | |||
4673 | RHS = DAG.getConstant(0, DL, RHS.getValueType()); | |||
4674 | return X86::COND_NS; | |||
4675 | } | |||
4676 | if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) { | |||
4677 | // X < 0 -> X == 0, jump on sign. | |||
4678 | return X86::COND_S; | |||
4679 | } | |||
4680 | if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) { | |||
4681 | // X < 1 -> X <= 0 | |||
4682 | RHS = DAG.getConstant(0, DL, RHS.getValueType()); | |||
4683 | return X86::COND_LE; | |||
4684 | } | |||
4685 | } | |||
4686 | ||||
4687 | return TranslateIntegerX86CC(SetCCOpcode); | |||
4688 | } | |||
4689 | ||||
4690 | // First determine if it is required or is profitable to flip the operands. | |||
4691 | ||||
4692 | // If LHS is a foldable load, but RHS is not, flip the condition. | |||
4693 | if (ISD::isNON_EXTLoad(LHS.getNode()) && | |||
4694 | !ISD::isNON_EXTLoad(RHS.getNode())) { | |||
4695 | SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode); | |||
4696 | std::swap(LHS, RHS); | |||
4697 | } | |||
4698 | ||||
4699 | switch (SetCCOpcode) { | |||
4700 | default: break; | |||
4701 | case ISD::SETOLT: | |||
4702 | case ISD::SETOLE: | |||
4703 | case ISD::SETUGT: | |||
4704 | case ISD::SETUGE: | |||
4705 | std::swap(LHS, RHS); | |||
4706 | break; | |||
4707 | } | |||
4708 | ||||
4709 | // On a floating point condition, the flags are set as follows: | |||
4710 | // ZF PF CF op | |||
4711 | // 0 | 0 | 0 | X > Y | |||
4712 | // 0 | 0 | 1 | X < Y | |||
4713 | // 1 | 0 | 0 | X == Y | |||
4714 | // 1 | 1 | 1 | unordered | |||
4715 | switch (SetCCOpcode) { | |||
4716 | default: llvm_unreachable("Condcode should be pre-legalized away")::llvm::llvm_unreachable_internal("Condcode should be pre-legalized away" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 4716); | |||
4717 | case ISD::SETUEQ: | |||
4718 | case ISD::SETEQ: return X86::COND_E; | |||
4719 | case ISD::SETOLT: // flipped | |||
4720 | case ISD::SETOGT: | |||
4721 | case ISD::SETGT: return X86::COND_A; | |||
4722 | case ISD::SETOLE: // flipped | |||
4723 | case ISD::SETOGE: | |||
4724 | case ISD::SETGE: return X86::COND_AE; | |||
4725 | case ISD::SETUGT: // flipped | |||
4726 | case ISD::SETULT: | |||
4727 | case ISD::SETLT: return X86::COND_B; | |||
4728 | case ISD::SETUGE: // flipped | |||
4729 | case ISD::SETULE: | |||
4730 | case ISD::SETLE: return X86::COND_BE; | |||
4731 | case ISD::SETONE: | |||
4732 | case ISD::SETNE: return X86::COND_NE; | |||
4733 | case ISD::SETUO: return X86::COND_P; | |||
4734 | case ISD::SETO: return X86::COND_NP; | |||
4735 | case ISD::SETOEQ: | |||
4736 | case ISD::SETUNE: return X86::COND_INVALID; | |||
4737 | } | |||
4738 | } | |||
4739 | ||||
4740 | /// Is there a floating point cmov for the specific X86 condition code? | |||
4741 | /// Current x86 isa includes the following FP cmov instructions: | |||
4742 | /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu. | |||
4743 | static bool hasFPCMov(unsigned X86CC) { | |||
4744 | switch (X86CC) { | |||
4745 | default: | |||
4746 | return false; | |||
4747 | case X86::COND_B: | |||
4748 | case X86::COND_BE: | |||
4749 | case X86::COND_E: | |||
4750 | case X86::COND_P: | |||
4751 | case X86::COND_A: | |||
4752 | case X86::COND_AE: | |||
4753 | case X86::COND_NE: | |||
4754 | case X86::COND_NP: | |||
4755 | return true; | |||
4756 | } | |||
4757 | } | |||
4758 | ||||
4759 | ||||
4760 | bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, | |||
4761 | const CallInst &I, | |||
4762 | MachineFunction &MF, | |||
4763 | unsigned Intrinsic) const { | |||
4764 | ||||
4765 | const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic); | |||
4766 | if (!IntrData) | |||
4767 | return false; | |||
4768 | ||||
4769 | Info.opc = ISD::INTRINSIC_W_CHAIN; | |||
4770 | Info.flags = MachineMemOperand::MONone; | |||
4771 | Info.offset = 0; | |||
4772 | ||||
4773 | switch (IntrData->Type) { | |||
4774 | case TRUNCATE_TO_MEM_VI8: | |||
4775 | case TRUNCATE_TO_MEM_VI16: | |||
4776 | case TRUNCATE_TO_MEM_VI32: { | |||
4777 | Info.ptrVal = I.getArgOperand(0); | |||
4778 | MVT VT = MVT::getVT(I.getArgOperand(1)->getType()); | |||
4779 | MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE; | |||
4780 | if (IntrData->Type == TRUNCATE_TO_MEM_VI8) | |||
4781 | ScalarVT = MVT::i8; | |||
4782 | else if (IntrData->Type == TRUNCATE_TO_MEM_VI16) | |||
4783 | ScalarVT = MVT::i16; | |||
4784 | else if (IntrData->Type == TRUNCATE_TO_MEM_VI32) | |||
4785 | ScalarVT = MVT::i32; | |||
4786 | ||||
4787 | Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements()); | |||
4788 | Info.align = 1; | |||
4789 | Info.flags |= MachineMemOperand::MOStore; | |||
4790 | break; | |||
4791 | } | |||
4792 | default: | |||
4793 | return false; | |||
4794 | } | |||
4795 | ||||
4796 | return true; | |||
4797 | } | |||
4798 | ||||
4799 | /// Returns true if the target can instruction select the | |||
4800 | /// specified FP immediate natively. If false, the legalizer will | |||
4801 | /// materialize the FP immediate as a load from a constant pool. | |||
4802 | bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { | |||
4803 | for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) { | |||
4804 | if (Imm.bitwiseIsEqual(LegalFPImmediates[i])) | |||
4805 | return true; | |||
4806 | } | |||
4807 | return false; | |||
4808 | } | |||
4809 | ||||
4810 | bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load, | |||
4811 | ISD::LoadExtType ExtTy, | |||
4812 | EVT NewVT) const { | |||
4813 | // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF | |||
4814 | // relocation target a movq or addq instruction: don't let the load shrink. | |||
4815 | SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr(); | |||
4816 | if (BasePtr.getOpcode() == X86ISD::WrapperRIP) | |||
4817 | if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0))) | |||
4818 | return GA->getTargetFlags() != X86II::MO_GOTTPOFF; | |||
4819 | return true; | |||
4820 | } | |||
4821 | ||||
4822 | /// Returns true if it is beneficial to convert a load of a constant | |||
4823 | /// to just the constant itself. | |||
4824 | bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, | |||
4825 | Type *Ty) const { | |||
4826 | assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 4826, __PRETTY_FUNCTION__)); | |||
4827 | ||||
4828 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); | |||
4829 | if (BitSize == 0 || BitSize > 64) | |||
4830 | return false; | |||
4831 | return true; | |||
4832 | } | |||
4833 | ||||
4834 | bool X86TargetLowering::reduceSelectOfFPConstantLoads(bool IsFPSetCC) const { | |||
4835 | // If we are using XMM registers in the ABI and the condition of the select is | |||
4836 | // a floating-point compare and we have blendv or conditional move, then it is | |||
4837 | // cheaper to select instead of doing a cross-register move and creating a | |||
4838 | // load that depends on the compare result. | |||
4839 | return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX(); | |||
4840 | } | |||
4841 | ||||
4842 | bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const { | |||
4843 | // TODO: It might be a win to ease or lift this restriction, but the generic | |||
4844 | // folds in DAGCombiner conflict with vector folds for an AVX512 target. | |||
4845 | if (VT.isVector() && Subtarget.hasAVX512()) | |||
4846 | return false; | |||
4847 | ||||
4848 | return true; | |||
4849 | } | |||
4850 | ||||
4851 | bool X86TargetLowering::decomposeMulByConstant(EVT VT, SDValue C) const { | |||
4852 | // TODO: We handle scalars using custom code, but generic combining could make | |||
4853 | // that unnecessary. | |||
4854 | APInt MulC; | |||
4855 | if (!ISD::isConstantSplatVector(C.getNode(), MulC)) | |||
4856 | return false; | |||
4857 | ||||
4858 | // If vector multiply is legal, assume that's faster than shl + add/sub. | |||
4859 | // TODO: Multiply is a complex op with higher latency and lower througput in | |||
4860 | // most implementations, so this check could be loosened based on type | |||
4861 | // and/or a CPU attribute. | |||
4862 | if (isOperationLegal(ISD::MUL, VT)) | |||
4863 | return false; | |||
4864 | ||||
4865 | // shl+add, shl+sub, shl+add+neg | |||
4866 | return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() || | |||
4867 | (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2(); | |||
4868 | } | |||
4869 | ||||
4870 | bool X86TargetLowering::shouldUseStrictFP_TO_INT(EVT FpVT, EVT IntVT, | |||
4871 | bool IsSigned) const { | |||
4872 | // f80 UINT_TO_FP is more efficient using Strict code if FCMOV is available. | |||
4873 | return !IsSigned && FpVT == MVT::f80 && Subtarget.hasCMov(); | |||
4874 | } | |||
4875 | ||||
4876 | bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, | |||
4877 | unsigned Index) const { | |||
4878 | if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) | |||
4879 | return false; | |||
4880 | ||||
4881 | // Mask vectors support all subregister combinations and operations that | |||
4882 | // extract half of vector. | |||
4883 | if (ResVT.getVectorElementType() == MVT::i1) | |||
4884 | return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) && | |||
4885 | (Index == ResVT.getVectorNumElements())); | |||
4886 | ||||
4887 | return (Index % ResVT.getVectorNumElements()) == 0; | |||
4888 | } | |||
4889 | ||||
4890 | bool X86TargetLowering::isCheapToSpeculateCttz() const { | |||
4891 | // Speculate cttz only if we can directly use TZCNT. | |||
4892 | return Subtarget.hasBMI(); | |||
4893 | } | |||
4894 | ||||
4895 | bool X86TargetLowering::isCheapToSpeculateCtlz() const { | |||
4896 | // Speculate ctlz only if we can directly use LZCNT. | |||
4897 | return Subtarget.hasLZCNT(); | |||
4898 | } | |||
4899 | ||||
4900 | bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, | |||
4901 | EVT BitcastVT) const { | |||
4902 | if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() && | |||
4903 | BitcastVT.getVectorElementType() == MVT::i1) | |||
4904 | return false; | |||
4905 | ||||
4906 | if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8) | |||
4907 | return false; | |||
4908 | ||||
4909 | return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT); | |||
4910 | } | |||
4911 | ||||
4912 | bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT, | |||
4913 | const SelectionDAG &DAG) const { | |||
4914 | // Do not merge to float value size (128 bytes) if no implicit | |||
4915 | // float attribute is set. | |||
4916 | bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute( | |||
4917 | Attribute::NoImplicitFloat); | |||
4918 | ||||
4919 | if (NoFloat) { | |||
4920 | unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32; | |||
4921 | return (MemVT.getSizeInBits() <= MaxIntSize); | |||
4922 | } | |||
4923 | return true; | |||
4924 | } | |||
4925 | ||||
4926 | bool X86TargetLowering::isCtlzFast() const { | |||
4927 | return Subtarget.hasFastLZCNT(); | |||
4928 | } | |||
4929 | ||||
4930 | bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial( | |||
4931 | const Instruction &AndI) const { | |||
4932 | return true; | |||
4933 | } | |||
4934 | ||||
4935 | bool X86TargetLowering::hasAndNotCompare(SDValue Y) const { | |||
4936 | EVT VT = Y.getValueType(); | |||
4937 | ||||
4938 | if (VT.isVector()) | |||
4939 | return false; | |||
4940 | ||||
4941 | if (!Subtarget.hasBMI()) | |||
4942 | return false; | |||
4943 | ||||
4944 | // There are only 32-bit and 64-bit forms for 'andn'. | |||
4945 | if (VT != MVT::i32 && VT != MVT::i64) | |||
4946 | return false; | |||
4947 | ||||
4948 | return !isa<ConstantSDNode>(Y); | |||
4949 | } | |||
4950 | ||||
4951 | bool X86TargetLowering::hasAndNot(SDValue Y) const { | |||
4952 | EVT VT = Y.getValueType(); | |||
4953 | ||||
4954 | if (!VT.isVector()) | |||
4955 | return hasAndNotCompare(Y); | |||
4956 | ||||
4957 | // Vector. | |||
4958 | ||||
4959 | if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128) | |||
4960 | return false; | |||
4961 | ||||
4962 | if (VT == MVT::v4i32) | |||
4963 | return true; | |||
4964 | ||||
4965 | return Subtarget.hasSSE2(); | |||
4966 | } | |||
4967 | ||||
4968 | bool X86TargetLowering::preferShiftsToClearExtremeBits(SDValue Y) const { | |||
4969 | EVT VT = Y.getValueType(); | |||
4970 | ||||
4971 | // For vectors, we don't have a preference, but we probably want a mask. | |||
4972 | if (VT.isVector()) | |||
4973 | return false; | |||
4974 | ||||
4975 | // 64-bit shifts on 32-bit targets produce really bad bloated code. | |||
4976 | if (VT == MVT::i64 && !Subtarget.is64Bit()) | |||
4977 | return false; | |||
4978 | ||||
4979 | return true; | |||
4980 | } | |||
4981 | ||||
4982 | bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const { | |||
4983 | // Any legal vector type can be splatted more efficiently than | |||
4984 | // loading/spilling from memory. | |||
4985 | return isTypeLegal(VT); | |||
4986 | } | |||
4987 | ||||
4988 | MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const { | |||
4989 | MVT VT = MVT::getIntegerVT(NumBits); | |||
4990 | if (isTypeLegal(VT)) | |||
4991 | return VT; | |||
4992 | ||||
4993 | // PMOVMSKB can handle this. | |||
4994 | if (NumBits == 128 && isTypeLegal(MVT::v16i8)) | |||
4995 | return MVT::v16i8; | |||
4996 | ||||
4997 | // VPMOVMSKB can handle this. | |||
4998 | if (NumBits == 256 && isTypeLegal(MVT::v32i8)) | |||
4999 | return MVT::v32i8; | |||
5000 | ||||
5001 | // TODO: Allow 64-bit type for 32-bit target. | |||
5002 | // TODO: 512-bit types should be allowed, but make sure that those | |||
5003 | // cases are handled in combineVectorSizedSetCCEquality(). | |||
5004 | ||||
5005 | return MVT::INVALID_SIMPLE_VALUE_TYPE; | |||
5006 | } | |||
5007 | ||||
5008 | /// Val is the undef sentinel value or equal to the specified value. | |||
5009 | static bool isUndefOrEqual(int Val, int CmpVal) { | |||
5010 | return ((Val == SM_SentinelUndef) || (Val == CmpVal)); | |||
5011 | } | |||
5012 | ||||
5013 | /// Val is either the undef or zero sentinel value. | |||
5014 | static bool isUndefOrZero(int Val) { | |||
5015 | return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero)); | |||
5016 | } | |||
5017 | ||||
5018 | /// Return true if every element in Mask, beginning | |||
5019 | /// from position Pos and ending in Pos+Size is the undef sentinel value. | |||
5020 | static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) { | |||
5021 | for (unsigned i = Pos, e = Pos + Size; i != e; ++i) | |||
5022 | if (Mask[i] != SM_SentinelUndef) | |||
5023 | return false; | |||
5024 | return true; | |||
5025 | } | |||
5026 | ||||
5027 | /// Return true if Val falls within the specified range (L, H]. | |||
5028 | static bool isInRange(int Val, int Low, int Hi) { | |||
5029 | return (Val >= Low && Val < Hi); | |||
5030 | } | |||
5031 | ||||
5032 | /// Return true if the value of any element in Mask falls within the specified | |||
5033 | /// range (L, H]. | |||
5034 | static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) { | |||
5035 | for (int M : Mask) | |||
5036 | if (isInRange(M, Low, Hi)) | |||
5037 | return true; | |||
5038 | return false; | |||
5039 | } | |||
5040 | ||||
5041 | /// Return true if Val is undef or if its value falls within the | |||
5042 | /// specified range (L, H]. | |||
5043 | static bool isUndefOrInRange(int Val, int Low, int Hi) { | |||
5044 | return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi); | |||
5045 | } | |||
5046 | ||||
5047 | /// Return true if every element in Mask is undef or if its value | |||
5048 | /// falls within the specified range (L, H]. | |||
5049 | static bool isUndefOrInRange(ArrayRef<int> Mask, | |||
5050 | int Low, int Hi) { | |||
5051 | for (int M : Mask) | |||
5052 | if (!isUndefOrInRange(M, Low, Hi)) | |||
5053 | return false; | |||
5054 | return true; | |||
5055 | } | |||
5056 | ||||
5057 | /// Return true if Val is undef, zero or if its value falls within the | |||
5058 | /// specified range (L, H]. | |||
5059 | static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) { | |||
5060 | return isUndefOrZero(Val) || isInRange(Val, Low, Hi); | |||
5061 | } | |||
5062 | ||||
5063 | /// Return true if every element in Mask is undef, zero or if its value | |||
5064 | /// falls within the specified range (L, H]. | |||
5065 | static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) { | |||
5066 | for (int M : Mask) | |||
5067 | if (!isUndefOrZeroOrInRange(M, Low, Hi)) | |||
5068 | return false; | |||
5069 | return true; | |||
5070 | } | |||
5071 | ||||
5072 | /// Return true if every element in Mask, beginning | |||
5073 | /// from position Pos and ending in Pos + Size, falls within the specified | |||
5074 | /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef. | |||
5075 | static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos, | |||
5076 | unsigned Size, int Low, int Step = 1) { | |||
5077 | for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step) | |||
5078 | if (!isUndefOrEqual(Mask[i], Low)) | |||
5079 | return false; | |||
5080 | return true; | |||
5081 | } | |||
5082 | ||||
5083 | /// Return true if every element in Mask, beginning | |||
5084 | /// from position Pos and ending in Pos+Size, falls within the specified | |||
5085 | /// sequential range (Low, Low+Size], or is undef or is zero. | |||
5086 | static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos, | |||
5087 | unsigned Size, int Low) { | |||
5088 | for (unsigned i = Pos, e = Pos + Size; i != e; ++i, ++Low) | |||
5089 | if (!isUndefOrZero(Mask[i]) && Mask[i] != Low) | |||
5090 | return false; | |||
5091 | return true; | |||
5092 | } | |||
5093 | ||||
5094 | /// Return true if every element in Mask, beginning | |||
5095 | /// from position Pos and ending in Pos+Size is undef or is zero. | |||
5096 | static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos, | |||
5097 | unsigned Size) { | |||
5098 | for (unsigned i = Pos, e = Pos + Size; i != e; ++i) | |||
5099 | if (!isUndefOrZero(Mask[i])) | |||
5100 | return false; | |||
5101 | return true; | |||
5102 | } | |||
5103 | ||||
5104 | /// Helper function to test whether a shuffle mask could be | |||
5105 | /// simplified by widening the elements being shuffled. | |||
5106 | /// | |||
5107 | /// Appends the mask for wider elements in WidenedMask if valid. Otherwise | |||
5108 | /// leaves it in an unspecified state. | |||
5109 | /// | |||
5110 | /// NOTE: This must handle normal vector shuffle masks and *target* vector | |||
5111 | /// shuffle masks. The latter have the special property of a '-2' representing | |||
5112 | /// a zero-ed lane of a vector. | |||
5113 | static bool canWidenShuffleElements(ArrayRef<int> Mask, | |||
5114 | SmallVectorImpl<int> &WidenedMask) { | |||
5115 | WidenedMask.assign(Mask.size() / 2, 0); | |||
5116 | for (int i = 0, Size = Mask.size(); i < Size; i += 2) { | |||
5117 | int M0 = Mask[i]; | |||
5118 | int M1 = Mask[i + 1]; | |||
5119 | ||||
5120 | // If both elements are undef, its trivial. | |||
5121 | if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) { | |||
5122 | WidenedMask[i / 2] = SM_SentinelUndef; | |||
5123 | continue; | |||
5124 | } | |||
5125 | ||||
5126 | // Check for an undef mask and a mask value properly aligned to fit with | |||
5127 | // a pair of values. If we find such a case, use the non-undef mask's value. | |||
5128 | if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) { | |||
5129 | WidenedMask[i / 2] = M1 / 2; | |||
5130 | continue; | |||
5131 | } | |||
5132 | if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) { | |||
5133 | WidenedMask[i / 2] = M0 / 2; | |||
5134 | continue; | |||
5135 | } | |||
5136 | ||||
5137 | // When zeroing, we need to spread the zeroing across both lanes to widen. | |||
5138 | if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) { | |||
5139 | if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) && | |||
5140 | (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) { | |||
5141 | WidenedMask[i / 2] = SM_SentinelZero; | |||
5142 | continue; | |||
5143 | } | |||
5144 | return false; | |||
5145 | } | |||
5146 | ||||
5147 | // Finally check if the two mask values are adjacent and aligned with | |||
5148 | // a pair. | |||
5149 | if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) { | |||
5150 | WidenedMask[i / 2] = M0 / 2; | |||
5151 | continue; | |||
5152 | } | |||
5153 | ||||
5154 | // Otherwise we can't safely widen the elements used in this shuffle. | |||
5155 | return false; | |||
5156 | } | |||
5157 | assert(WidenedMask.size() == Mask.size() / 2 &&((WidenedMask.size() == Mask.size() / 2 && "Incorrect size of mask after widening the elements!" ) ? static_cast<void> (0) : __assert_fail ("WidenedMask.size() == Mask.size() / 2 && \"Incorrect size of mask after widening the elements!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5158, __PRETTY_FUNCTION__)) | |||
5158 | "Incorrect size of mask after widening the elements!")((WidenedMask.size() == Mask.size() / 2 && "Incorrect size of mask after widening the elements!" ) ? static_cast<void> (0) : __assert_fail ("WidenedMask.size() == Mask.size() / 2 && \"Incorrect size of mask after widening the elements!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5158, __PRETTY_FUNCTION__)); | |||
5159 | ||||
5160 | return true; | |||
5161 | } | |||
5162 | ||||
5163 | static bool canWidenShuffleElements(ArrayRef<int> Mask, | |||
5164 | const APInt &Zeroable, | |||
5165 | SmallVectorImpl<int> &WidenedMask) { | |||
5166 | SmallVector<int, 32> TargetMask(Mask.begin(), Mask.end()); | |||
5167 | for (int i = 0, Size = TargetMask.size(); i < Size; ++i) { | |||
5168 | if (TargetMask[i] == SM_SentinelUndef) | |||
5169 | continue; | |||
5170 | if (Zeroable[i]) | |||
5171 | TargetMask[i] = SM_SentinelZero; | |||
5172 | } | |||
5173 | return canWidenShuffleElements(TargetMask, WidenedMask); | |||
5174 | } | |||
5175 | ||||
5176 | static bool canWidenShuffleElements(ArrayRef<int> Mask) { | |||
5177 | SmallVector<int, 32> WidenedMask; | |||
5178 | return canWidenShuffleElements(Mask, WidenedMask); | |||
5179 | } | |||
5180 | ||||
5181 | /// Returns true if Elt is a constant zero or a floating point constant +0.0. | |||
5182 | bool X86::isZeroNode(SDValue Elt) { | |||
5183 | return isNullConstant(Elt) || isNullFPConstant(Elt); | |||
5184 | } | |||
5185 | ||||
5186 | // Build a vector of constants. | |||
5187 | // Use an UNDEF node if MaskElt == -1. | |||
5188 | // Split 64-bit constants in the 32-bit mode. | |||
5189 | static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG, | |||
5190 | const SDLoc &dl, bool IsMask = false) { | |||
5191 | ||||
5192 | SmallVector<SDValue, 32> Ops; | |||
5193 | bool Split = false; | |||
5194 | ||||
5195 | MVT ConstVecVT = VT; | |||
5196 | unsigned NumElts = VT.getVectorNumElements(); | |||
5197 | bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64); | |||
5198 | if (!In64BitMode && VT.getVectorElementType() == MVT::i64) { | |||
5199 | ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2); | |||
5200 | Split = true; | |||
5201 | } | |||
5202 | ||||
5203 | MVT EltVT = ConstVecVT.getVectorElementType(); | |||
5204 | for (unsigned i = 0; i < NumElts; ++i) { | |||
5205 | bool IsUndef = Values[i] < 0 && IsMask; | |||
5206 | SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) : | |||
5207 | DAG.getConstant(Values[i], dl, EltVT); | |||
5208 | Ops.push_back(OpNode); | |||
5209 | if (Split) | |||
5210 | Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) : | |||
5211 | DAG.getConstant(0, dl, EltVT)); | |||
5212 | } | |||
5213 | SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops); | |||
5214 | if (Split) | |||
5215 | ConstsNode = DAG.getBitcast(VT, ConstsNode); | |||
5216 | return ConstsNode; | |||
5217 | } | |||
5218 | ||||
5219 | static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs, | |||
5220 | MVT VT, SelectionDAG &DAG, const SDLoc &dl) { | |||
5221 | assert(Bits.size() == Undefs.getBitWidth() &&((Bits.size() == Undefs.getBitWidth() && "Unequal constant and undef arrays" ) ? static_cast<void> (0) : __assert_fail ("Bits.size() == Undefs.getBitWidth() && \"Unequal constant and undef arrays\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5222, __PRETTY_FUNCTION__)) | |||
5222 | "Unequal constant and undef arrays")((Bits.size() == Undefs.getBitWidth() && "Unequal constant and undef arrays" ) ? static_cast<void> (0) : __assert_fail ("Bits.size() == Undefs.getBitWidth() && \"Unequal constant and undef arrays\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5222, __PRETTY_FUNCTION__)); | |||
5223 | SmallVector<SDValue, 32> Ops; | |||
5224 | bool Split = false; | |||
5225 | ||||
5226 | MVT ConstVecVT = VT; | |||
5227 | unsigned NumElts = VT.getVectorNumElements(); | |||
5228 | bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64); | |||
5229 | if (!In64BitMode && VT.getVectorElementType() == MVT::i64) { | |||
5230 | ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2); | |||
5231 | Split = true; | |||
5232 | } | |||
5233 | ||||
5234 | MVT EltVT = ConstVecVT.getVectorElementType(); | |||
5235 | for (unsigned i = 0, e = Bits.size(); i != e; ++i) { | |||
5236 | if (Undefs[i]) { | |||
5237 | Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT)); | |||
5238 | continue; | |||
5239 | } | |||
5240 | const APInt &V = Bits[i]; | |||
5241 | assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes")((V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes" ) ? static_cast<void> (0) : __assert_fail ("V.getBitWidth() == VT.getScalarSizeInBits() && \"Unexpected sizes\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5241, __PRETTY_FUNCTION__)); | |||
5242 | if (Split) { | |||
5243 | Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT)); | |||
5244 | Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT)); | |||
5245 | } else if (EltVT == MVT::f32) { | |||
5246 | APFloat FV(APFloat::IEEEsingle(), V); | |||
5247 | Ops.push_back(DAG.getConstantFP(FV, dl, EltVT)); | |||
5248 | } else if (EltVT == MVT::f64) { | |||
5249 | APFloat FV(APFloat::IEEEdouble(), V); | |||
5250 | Ops.push_back(DAG.getConstantFP(FV, dl, EltVT)); | |||
5251 | } else { | |||
5252 | Ops.push_back(DAG.getConstant(V, dl, EltVT)); | |||
5253 | } | |||
5254 | } | |||
5255 | ||||
5256 | SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops); | |||
5257 | return DAG.getBitcast(VT, ConstsNode); | |||
5258 | } | |||
5259 | ||||
5260 | /// Returns a vector of specified type with all zero elements. | |||
5261 | static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget, | |||
5262 | SelectionDAG &DAG, const SDLoc &dl) { | |||
5263 | assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector () || VT.getVectorElementType() == MVT::i1) && "Unexpected vector type" ) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() || VT.getVectorElementType() == MVT::i1) && \"Unexpected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5265, __PRETTY_FUNCTION__)) | |||
5264 | VT.getVectorElementType() == MVT::i1) &&(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector () || VT.getVectorElementType() == MVT::i1) && "Unexpected vector type" ) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() || VT.getVectorElementType() == MVT::i1) && \"Unexpected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5265, __PRETTY_FUNCTION__)) | |||
5265 | "Unexpected vector type")(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector () || VT.getVectorElementType() == MVT::i1) && "Unexpected vector type" ) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() || VT.getVectorElementType() == MVT::i1) && \"Unexpected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5265, __PRETTY_FUNCTION__)); | |||
5266 | ||||
5267 | // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest | |||
5268 | // type. This ensures they get CSE'd. But if the integer type is not | |||
5269 | // available, use a floating-point +0.0 instead. | |||
5270 | SDValue Vec; | |||
5271 | if (!Subtarget.hasSSE2() && VT.is128BitVector()) { | |||
5272 | Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32); | |||
5273 | } else if (VT.getVectorElementType() == MVT::i1) { | |||
5274 | assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&(((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) && "Unexpected vector type") ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) && \"Unexpected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5275, __PRETTY_FUNCTION__)) | |||
5275 | "Unexpected vector type")(((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) && "Unexpected vector type") ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) && \"Unexpected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5275, __PRETTY_FUNCTION__)); | |||
5276 | Vec = DAG.getConstant(0, dl, VT); | |||
5277 | } else { | |||
5278 | unsigned Num32BitElts = VT.getSizeInBits() / 32; | |||
5279 | Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts)); | |||
5280 | } | |||
5281 | return DAG.getBitcast(VT, Vec); | |||
5282 | } | |||
5283 | ||||
5284 | static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG, | |||
5285 | const SDLoc &dl, unsigned vectorWidth) { | |||
5286 | EVT VT = Vec.getValueType(); | |||
5287 | EVT ElVT = VT.getVectorElementType(); | |||
5288 | unsigned Factor = VT.getSizeInBits()/vectorWidth; | |||
5289 | EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT, | |||
5290 | VT.getVectorNumElements()/Factor); | |||
5291 | ||||
5292 | // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR | |||
5293 | unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits(); | |||
5294 | assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2")((isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(ElemsPerChunk) && \"Elements per chunk not power of 2\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5294, __PRETTY_FUNCTION__)); | |||
5295 | ||||
5296 | // This is the index of the first element of the vectorWidth-bit chunk | |||
5297 | // we want. Since ElemsPerChunk is a power of 2 just need to clear bits. | |||
5298 | IdxVal &= ~(ElemsPerChunk - 1); | |||
5299 | ||||
5300 | // If the input is a buildvector just emit a smaller one. | |||
5301 | if (Vec.getOpcode() == ISD::BUILD_VECTOR) | |||
5302 | return DAG.getBuildVector(ResultVT, dl, | |||
5303 | Vec->ops().slice(IdxVal, ElemsPerChunk)); | |||
5304 | ||||
5305 | SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl); | |||
5306 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx); | |||
5307 | } | |||
5308 | ||||
5309 | /// Generate a DAG to grab 128-bits from a vector > 128 bits. This | |||
5310 | /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128 | |||
5311 | /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4 | |||
5312 | /// instructions or a simple subregister reference. Idx is an index in the | |||
5313 | /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes | |||
5314 | /// lowering EXTRACT_VECTOR_ELT operations easier. | |||
5315 | static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal, | |||
5316 | SelectionDAG &DAG, const SDLoc &dl) { | |||
5317 | assert((Vec.getValueType().is256BitVector() ||(((Vec.getValueType().is256BitVector() || Vec.getValueType(). is512BitVector()) && "Unexpected vector size!") ? static_cast <void> (0) : __assert_fail ("(Vec.getValueType().is256BitVector() || Vec.getValueType().is512BitVector()) && \"Unexpected vector size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5318, __PRETTY_FUNCTION__)) | |||
5318 | Vec.getValueType().is512BitVector()) && "Unexpected vector size!")(((Vec.getValueType().is256BitVector() || Vec.getValueType(). is512BitVector()) && "Unexpected vector size!") ? static_cast <void> (0) : __assert_fail ("(Vec.getValueType().is256BitVector() || Vec.getValueType().is512BitVector()) && \"Unexpected vector size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5318, __PRETTY_FUNCTION__)); | |||
5319 | return extractSubVector(Vec, IdxVal, DAG, dl, 128); | |||
5320 | } | |||
5321 | ||||
5322 | /// Generate a DAG to grab 256-bits from a 512-bit vector. | |||
5323 | static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal, | |||
5324 | SelectionDAG &DAG, const SDLoc &dl) { | |||
5325 | assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!")((Vec.getValueType().is512BitVector() && "Unexpected vector size!" ) ? static_cast<void> (0) : __assert_fail ("Vec.getValueType().is512BitVector() && \"Unexpected vector size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5325, __PRETTY_FUNCTION__)); | |||
5326 | return extractSubVector(Vec, IdxVal, DAG, dl, 256); | |||
5327 | } | |||
5328 | ||||
5329 | static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal, | |||
5330 | SelectionDAG &DAG, const SDLoc &dl, | |||
5331 | unsigned vectorWidth) { | |||
5332 | assert((vectorWidth == 128 || vectorWidth == 256) &&(((vectorWidth == 128 || vectorWidth == 256) && "Unsupported vector width" ) ? static_cast<void> (0) : __assert_fail ("(vectorWidth == 128 || vectorWidth == 256) && \"Unsupported vector width\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5333, __PRETTY_FUNCTION__)) | |||
5333 | "Unsupported vector width")(((vectorWidth == 128 || vectorWidth == 256) && "Unsupported vector width" ) ? static_cast<void> (0) : __assert_fail ("(vectorWidth == 128 || vectorWidth == 256) && \"Unsupported vector width\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5333, __PRETTY_FUNCTION__)); | |||
5334 | // Inserting UNDEF is Result | |||
5335 | if (Vec.isUndef()) | |||
5336 | return Result; | |||
5337 | EVT VT = Vec.getValueType(); | |||
5338 | EVT ElVT = VT.getVectorElementType(); | |||
5339 | EVT ResultVT = Result.getValueType(); | |||
5340 | ||||
5341 | // Insert the relevant vectorWidth bits. | |||
5342 | unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits(); | |||
5343 | assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2")((isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(ElemsPerChunk) && \"Elements per chunk not power of 2\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5343, __PRETTY_FUNCTION__)); | |||
5344 | ||||
5345 | // This is the index of the first element of the vectorWidth-bit chunk | |||
5346 | // we want. Since ElemsPerChunk is a power of 2 just need to clear bits. | |||
5347 | IdxVal &= ~(ElemsPerChunk - 1); | |||
5348 | ||||
5349 | SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl); | |||
5350 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx); | |||
5351 | } | |||
5352 | ||||
5353 | /// Generate a DAG to put 128-bits into a vector > 128 bits. This | |||
5354 | /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or | |||
5355 | /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a | |||
5356 | /// simple superregister reference. Idx is an index in the 128 bits | |||
5357 | /// we want. It need not be aligned to a 128-bit boundary. That makes | |||
5358 | /// lowering INSERT_VECTOR_ELT operations easier. | |||
5359 | static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal, | |||
5360 | SelectionDAG &DAG, const SDLoc &dl) { | |||
5361 | assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!")((Vec.getValueType().is128BitVector() && "Unexpected vector size!" ) ? static_cast<void> (0) : __assert_fail ("Vec.getValueType().is128BitVector() && \"Unexpected vector size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5361, __PRETTY_FUNCTION__)); | |||
5362 | return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128); | |||
5363 | } | |||
5364 | ||||
5365 | /// Widen a vector to a larger size with the same scalar type, with the new | |||
5366 | /// elements either zero or undef. | |||
5367 | static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements, | |||
5368 | const X86Subtarget &Subtarget, SelectionDAG &DAG, | |||
5369 | const SDLoc &dl) { | |||
5370 | assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&((Vec.getValueSizeInBits() < VT.getSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && "Unsupported vector widening type") ? static_cast<void> (0) : __assert_fail ("Vec.getValueSizeInBits() < VT.getSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && \"Unsupported vector widening type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5372, __PRETTY_FUNCTION__)) | |||
5371 | Vec.getValueType().getScalarType() == VT.getScalarType() &&((Vec.getValueSizeInBits() < VT.getSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && "Unsupported vector widening type") ? static_cast<void> (0) : __assert_fail ("Vec.getValueSizeInBits() < VT.getSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && \"Unsupported vector widening type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5372, __PRETTY_FUNCTION__)) | |||
5372 | "Unsupported vector widening type")((Vec.getValueSizeInBits() < VT.getSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && "Unsupported vector widening type") ? static_cast<void> (0) : __assert_fail ("Vec.getValueSizeInBits() < VT.getSizeInBits() && Vec.getValueType().getScalarType() == VT.getScalarType() && \"Unsupported vector widening type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5372, __PRETTY_FUNCTION__)); | |||
5373 | SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl) | |||
5374 | : DAG.getUNDEF(VT); | |||
5375 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec, | |||
5376 | DAG.getIntPtrConstant(0, dl)); | |||
5377 | } | |||
5378 | ||||
5379 | // Helper for splitting operands of an operation to legal target size and | |||
5380 | // apply a function on each part. | |||
5381 | // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in | |||
5382 | // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for | |||
5383 | // deciding if/how to split Ops. Ops elements do *not* have to be of type VT. | |||
5384 | // The argument Builder is a function that will be applied on each split part: | |||
5385 | // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>) | |||
5386 | template <typename F> | |||
5387 | SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget, | |||
5388 | const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops, | |||
5389 | F Builder, bool CheckBWI = true) { | |||
5390 | assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2")((Subtarget.hasSSE2() && "Target assumed to support at least SSE2" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Target assumed to support at least SSE2\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5390, __PRETTY_FUNCTION__)); | |||
5391 | unsigned NumSubs = 1; | |||
5392 | if ((CheckBWI && Subtarget.useBWIRegs()) || | |||
5393 | (!CheckBWI && Subtarget.useAVX512Regs())) { | |||
5394 | if (VT.getSizeInBits() > 512) { | |||
5395 | NumSubs = VT.getSizeInBits() / 512; | |||
5396 | assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size")(((VT.getSizeInBits() % 512) == 0 && "Illegal vector size" ) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % 512) == 0 && \"Illegal vector size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5396, __PRETTY_FUNCTION__)); | |||
5397 | } | |||
5398 | } else if (Subtarget.hasAVX2()) { | |||
5399 | if (VT.getSizeInBits() > 256) { | |||
5400 | NumSubs = VT.getSizeInBits() / 256; | |||
5401 | assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size")(((VT.getSizeInBits() % 256) == 0 && "Illegal vector size" ) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % 256) == 0 && \"Illegal vector size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5401, __PRETTY_FUNCTION__)); | |||
5402 | } | |||
5403 | } else { | |||
5404 | if (VT.getSizeInBits() > 128) { | |||
5405 | NumSubs = VT.getSizeInBits() / 128; | |||
5406 | assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size")(((VT.getSizeInBits() % 128) == 0 && "Illegal vector size" ) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % 128) == 0 && \"Illegal vector size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5406, __PRETTY_FUNCTION__)); | |||
5407 | } | |||
5408 | } | |||
5409 | ||||
5410 | if (NumSubs == 1) | |||
5411 | return Builder(DAG, DL, Ops); | |||
5412 | ||||
5413 | SmallVector<SDValue, 4> Subs; | |||
5414 | for (unsigned i = 0; i != NumSubs; ++i) { | |||
5415 | SmallVector<SDValue, 2> SubOps; | |||
5416 | for (SDValue Op : Ops) { | |||
5417 | EVT OpVT = Op.getValueType(); | |||
5418 | unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs; | |||
5419 | unsigned SizeSub = OpVT.getSizeInBits() / NumSubs; | |||
5420 | SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub)); | |||
5421 | } | |||
5422 | Subs.push_back(Builder(DAG, DL, SubOps)); | |||
5423 | } | |||
5424 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs); | |||
5425 | } | |||
5426 | ||||
5427 | // Return true if the instruction zeroes the unused upper part of the | |||
5428 | // destination and accepts mask. | |||
5429 | static bool isMaskedZeroUpperBitsvXi1(unsigned int Opcode) { | |||
5430 | switch (Opcode) { | |||
5431 | default: | |||
5432 | return false; | |||
5433 | case X86ISD::CMPM: | |||
5434 | case X86ISD::CMPM_RND: | |||
5435 | case ISD::SETCC: | |||
5436 | return true; | |||
5437 | } | |||
5438 | } | |||
5439 | ||||
5440 | /// Insert i1-subvector to i1-vector. | |||
5441 | static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG, | |||
5442 | const X86Subtarget &Subtarget) { | |||
5443 | ||||
5444 | SDLoc dl(Op); | |||
5445 | SDValue Vec = Op.getOperand(0); | |||
5446 | SDValue SubVec = Op.getOperand(1); | |||
5447 | SDValue Idx = Op.getOperand(2); | |||
5448 | ||||
5449 | if (!isa<ConstantSDNode>(Idx)) | |||
5450 | return SDValue(); | |||
5451 | ||||
5452 | // Inserting undef is a nop. We can just return the original vector. | |||
5453 | if (SubVec.isUndef()) | |||
5454 | return Vec; | |||
5455 | ||||
5456 | unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); | |||
5457 | if (IdxVal == 0 && Vec.isUndef()) // the operation is legal | |||
5458 | return Op; | |||
5459 | ||||
5460 | MVT OpVT = Op.getSimpleValueType(); | |||
5461 | unsigned NumElems = OpVT.getVectorNumElements(); | |||
5462 | ||||
5463 | SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl); | |||
5464 | ||||
5465 | // Extend to natively supported kshift. | |||
5466 | MVT WideOpVT = OpVT; | |||
5467 | if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) | |||
5468 | WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1; | |||
5469 | ||||
5470 | // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts | |||
5471 | // if necessary. | |||
5472 | if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) { | |||
5473 | // May need to promote to a legal type. | |||
5474 | Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, | |||
5475 | getZeroVector(WideOpVT, Subtarget, DAG, dl), | |||
5476 | SubVec, Idx); | |||
5477 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx); | |||
5478 | } | |||
5479 | ||||
5480 | MVT SubVecVT = SubVec.getSimpleValueType(); | |||
5481 | unsigned SubVecNumElems = SubVecVT.getVectorNumElements(); | |||
5482 | ||||
5483 | assert(IdxVal + SubVecNumElems <= NumElems &&((IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT .getSizeInBits() == 0 && "Unexpected index value in INSERT_SUBVECTOR" ) ? static_cast<void> (0) : __assert_fail ("IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT.getSizeInBits() == 0 && \"Unexpected index value in INSERT_SUBVECTOR\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5485, __PRETTY_FUNCTION__)) | |||
5484 | IdxVal % SubVecVT.getSizeInBits() == 0 &&((IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT .getSizeInBits() == 0 && "Unexpected index value in INSERT_SUBVECTOR" ) ? static_cast<void> (0) : __assert_fail ("IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT.getSizeInBits() == 0 && \"Unexpected index value in INSERT_SUBVECTOR\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5485, __PRETTY_FUNCTION__)) | |||
5485 | "Unexpected index value in INSERT_SUBVECTOR")((IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT .getSizeInBits() == 0 && "Unexpected index value in INSERT_SUBVECTOR" ) ? static_cast<void> (0) : __assert_fail ("IdxVal + SubVecNumElems <= NumElems && IdxVal % SubVecVT.getSizeInBits() == 0 && \"Unexpected index value in INSERT_SUBVECTOR\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5485, __PRETTY_FUNCTION__)); | |||
5486 | ||||
5487 | SDValue Undef = DAG.getUNDEF(WideOpVT); | |||
5488 | ||||
5489 | if (IdxVal == 0) { | |||
5490 | // Zero lower bits of the Vec | |||
5491 | SDValue ShiftBits = DAG.getConstant(SubVecNumElems, dl, MVT::i8); | |||
5492 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, | |||
5493 | ZeroIdx); | |||
5494 | Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits); | |||
5495 | Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits); | |||
5496 | // Merge them together, SubVec should be zero extended. | |||
5497 | SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, | |||
5498 | getZeroVector(WideOpVT, Subtarget, DAG, dl), | |||
5499 | SubVec, ZeroIdx); | |||
5500 | Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec); | |||
5501 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx); | |||
5502 | } | |||
5503 | ||||
5504 | SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, | |||
5505 | Undef, SubVec, ZeroIdx); | |||
5506 | ||||
5507 | if (Vec.isUndef()) { | |||
5508 | assert(IdxVal != 0 && "Unexpected index")((IdxVal != 0 && "Unexpected index") ? static_cast< void> (0) : __assert_fail ("IdxVal != 0 && \"Unexpected index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5508, __PRETTY_FUNCTION__)); | |||
5509 | SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec, | |||
5510 | DAG.getConstant(IdxVal, dl, MVT::i8)); | |||
5511 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx); | |||
5512 | } | |||
5513 | ||||
5514 | if (ISD::isBuildVectorAllZeros(Vec.getNode())) { | |||
5515 | assert(IdxVal != 0 && "Unexpected index")((IdxVal != 0 && "Unexpected index") ? static_cast< void> (0) : __assert_fail ("IdxVal != 0 && \"Unexpected index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5515, __PRETTY_FUNCTION__)); | |||
5516 | NumElems = WideOpVT.getVectorNumElements(); | |||
5517 | unsigned ShiftLeft = NumElems - SubVecNumElems; | |||
5518 | unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal; | |||
5519 | SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec, | |||
5520 | DAG.getConstant(ShiftLeft, dl, MVT::i8)); | |||
5521 | if (ShiftRight != 0) | |||
5522 | SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec, | |||
5523 | DAG.getConstant(ShiftRight, dl, MVT::i8)); | |||
5524 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx); | |||
5525 | } | |||
5526 | ||||
5527 | // Simple case when we put subvector in the upper part | |||
5528 | if (IdxVal + SubVecNumElems == NumElems) { | |||
5529 | SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec, | |||
5530 | DAG.getConstant(IdxVal, dl, MVT::i8)); | |||
5531 | if (SubVecNumElems * 2 == NumElems) { | |||
5532 | // Special case, use legal zero extending insert_subvector. This allows | |||
5533 | // isel to opimitize when bits are known zero. | |||
5534 | Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx); | |||
5535 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, | |||
5536 | getZeroVector(WideOpVT, Subtarget, DAG, dl), | |||
5537 | Vec, ZeroIdx); | |||
5538 | } else { | |||
5539 | // Otherwise use explicit shifts to zero the bits. | |||
5540 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, | |||
5541 | Undef, Vec, ZeroIdx); | |||
5542 | NumElems = WideOpVT.getVectorNumElements(); | |||
5543 | SDValue ShiftBits = DAG.getConstant(NumElems - IdxVal, dl, MVT::i8); | |||
5544 | Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits); | |||
5545 | Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits); | |||
5546 | } | |||
5547 | Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec); | |||
5548 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx); | |||
5549 | } | |||
5550 | ||||
5551 | // Inserting into the middle is more complicated. | |||
5552 | ||||
5553 | NumElems = WideOpVT.getVectorNumElements(); | |||
5554 | ||||
5555 | // Widen the vector if needed. | |||
5556 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx); | |||
5557 | // Move the current value of the bit to be replace to the lsbs. | |||
5558 | Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, | |||
5559 | DAG.getConstant(IdxVal, dl, MVT::i8)); | |||
5560 | // Xor with the new bit. | |||
5561 | Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Op, SubVec); | |||
5562 | // Shift to MSB, filling bottom bits with 0. | |||
5563 | unsigned ShiftLeft = NumElems - SubVecNumElems; | |||
5564 | Op = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Op, | |||
5565 | DAG.getConstant(ShiftLeft, dl, MVT::i8)); | |||
5566 | // Shift to the final position, filling upper bits with 0. | |||
5567 | unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal; | |||
5568 | Op = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Op, | |||
5569 | DAG.getConstant(ShiftRight, dl, MVT::i8)); | |||
5570 | // Xor with original vector leaving the new value. | |||
5571 | Op = DAG.getNode(ISD::XOR, dl, WideOpVT, Vec, Op); | |||
5572 | // Reduce to original width if needed. | |||
5573 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx); | |||
5574 | } | |||
5575 | ||||
5576 | static SDValue concatSubVectors(SDValue V1, SDValue V2, EVT VT, | |||
5577 | unsigned NumElems, SelectionDAG &DAG, | |||
5578 | const SDLoc &dl, unsigned VectorWidth) { | |||
5579 | SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, VectorWidth); | |||
5580 | return insertSubVector(V, V2, NumElems / 2, DAG, dl, VectorWidth); | |||
5581 | } | |||
5582 | ||||
5583 | /// Returns a vector of specified type with all bits set. | |||
5584 | /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>. | |||
5585 | /// Then bitcast to their original type, ensuring they get CSE'd. | |||
5586 | static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) { | |||
5587 | assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector ()) && "Expected a 128/256/512-bit vector type") ? static_cast <void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected a 128/256/512-bit vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5588, __PRETTY_FUNCTION__)) | |||
5588 | "Expected a 128/256/512-bit vector type")(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector ()) && "Expected a 128/256/512-bit vector type") ? static_cast <void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected a 128/256/512-bit vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5588, __PRETTY_FUNCTION__)); | |||
5589 | ||||
5590 | APInt Ones = APInt::getAllOnesValue(32); | |||
5591 | unsigned NumElts = VT.getSizeInBits() / 32; | |||
5592 | SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts)); | |||
5593 | return DAG.getBitcast(VT, Vec); | |||
5594 | } | |||
5595 | ||||
5596 | static SDValue getExtendInVec(bool Signed, const SDLoc &DL, EVT VT, SDValue In, | |||
5597 | SelectionDAG &DAG) { | |||
5598 | EVT InVT = In.getValueType(); | |||
5599 | assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.")((VT.isVector() && InVT.isVector() && "Expected vector VTs." ) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && InVT.isVector() && \"Expected vector VTs.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5599, __PRETTY_FUNCTION__)); | |||
5600 | ||||
5601 | // For 256-bit vectors, we only need the lower (128-bit) input half. | |||
5602 | // For 512-bit vectors, we only need the lower input half or quarter. | |||
5603 | if (InVT.getSizeInBits() > 128) { | |||
5604 | assert(VT.getSizeInBits() == InVT.getSizeInBits() &&((VT.getSizeInBits() == InVT.getSizeInBits() && "Expected VTs to be the same size!" ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() == InVT.getSizeInBits() && \"Expected VTs to be the same size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5605, __PRETTY_FUNCTION__)) | |||
5605 | "Expected VTs to be the same size!")((VT.getSizeInBits() == InVT.getSizeInBits() && "Expected VTs to be the same size!" ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() == InVT.getSizeInBits() && \"Expected VTs to be the same size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5605, __PRETTY_FUNCTION__)); | |||
5606 | unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits(); | |||
5607 | In = extractSubVector(In, 0, DAG, DL, | |||
5608 | std::max(128U, VT.getSizeInBits() / Scale)); | |||
5609 | InVT = In.getValueType(); | |||
5610 | } | |||
5611 | ||||
5612 | if (VT.getVectorNumElements() == InVT.getVectorNumElements()) | |||
5613 | return DAG.getNode(Signed ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, | |||
5614 | DL, VT, In); | |||
5615 | ||||
5616 | return DAG.getNode(Signed ? ISD::SIGN_EXTEND_VECTOR_INREG | |||
5617 | : ISD::ZERO_EXTEND_VECTOR_INREG, | |||
5618 | DL, VT, In); | |||
5619 | } | |||
5620 | ||||
5621 | /// Returns a vector_shuffle node for an unpackl operation. | |||
5622 | static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT, | |||
5623 | SDValue V1, SDValue V2) { | |||
5624 | SmallVector<int, 8> Mask; | |||
5625 | createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false); | |||
5626 | return DAG.getVectorShuffle(VT, dl, V1, V2, Mask); | |||
5627 | } | |||
5628 | ||||
5629 | /// Returns a vector_shuffle node for an unpackh operation. | |||
5630 | static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT, | |||
5631 | SDValue V1, SDValue V2) { | |||
5632 | SmallVector<int, 8> Mask; | |||
5633 | createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false); | |||
5634 | return DAG.getVectorShuffle(VT, dl, V1, V2, Mask); | |||
5635 | } | |||
5636 | ||||
5637 | /// Return a vector_shuffle of the specified vector of zero or undef vector. | |||
5638 | /// This produces a shuffle where the low element of V2 is swizzled into the | |||
5639 | /// zero/undef vector, landing at element Idx. | |||
5640 | /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3). | |||
5641 | static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx, | |||
5642 | bool IsZero, | |||
5643 | const X86Subtarget &Subtarget, | |||
5644 | SelectionDAG &DAG) { | |||
5645 | MVT VT = V2.getSimpleValueType(); | |||
5646 | SDValue V1 = IsZero | |||
5647 | ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT); | |||
5648 | int NumElems = VT.getVectorNumElements(); | |||
5649 | SmallVector<int, 16> MaskVec(NumElems); | |||
5650 | for (int i = 0; i != NumElems; ++i) | |||
5651 | // If this is the insertion idx, put the low elt of V2 here. | |||
5652 | MaskVec[i] = (i == Idx) ? NumElems : i; | |||
5653 | return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec); | |||
5654 | } | |||
5655 | ||||
5656 | // Peek through EXTRACT_SUBVECTORs - typically used for AVX1 256-bit intops. | |||
5657 | static SDValue peekThroughEXTRACT_SUBVECTORs(SDValue V) { | |||
5658 | while (V.getOpcode() == ISD::EXTRACT_SUBVECTOR) | |||
5659 | V = V.getOperand(0); | |||
5660 | return V; | |||
5661 | } | |||
5662 | ||||
5663 | static const Constant *getTargetConstantFromNode(SDValue Op) { | |||
5664 | Op = peekThroughBitcasts(Op); | |||
5665 | ||||
5666 | auto *Load = dyn_cast<LoadSDNode>(Op); | |||
5667 | if (!Load) | |||
5668 | return nullptr; | |||
5669 | ||||
5670 | SDValue Ptr = Load->getBasePtr(); | |||
5671 | if (Ptr->getOpcode() == X86ISD::Wrapper || | |||
5672 | Ptr->getOpcode() == X86ISD::WrapperRIP) | |||
5673 | Ptr = Ptr->getOperand(0); | |||
5674 | ||||
5675 | auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr); | |||
5676 | if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0) | |||
5677 | return nullptr; | |||
5678 | ||||
5679 | return CNode->getConstVal(); | |||
5680 | } | |||
5681 | ||||
5682 | // Extract raw constant bits from constant pools. | |||
5683 | static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits, | |||
5684 | APInt &UndefElts, | |||
5685 | SmallVectorImpl<APInt> &EltBits, | |||
5686 | bool AllowWholeUndefs = true, | |||
5687 | bool AllowPartialUndefs = true) { | |||
5688 | assert(EltBits.empty() && "Expected an empty EltBits vector")((EltBits.empty() && "Expected an empty EltBits vector" ) ? static_cast<void> (0) : __assert_fail ("EltBits.empty() && \"Expected an empty EltBits vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5688, __PRETTY_FUNCTION__)); | |||
5689 | ||||
5690 | Op = peekThroughBitcasts(Op); | |||
5691 | ||||
5692 | EVT VT = Op.getValueType(); | |||
5693 | unsigned SizeInBits = VT.getSizeInBits(); | |||
5694 | assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!")(((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!" ) ? static_cast<void> (0) : __assert_fail ("(SizeInBits % EltSizeInBits) == 0 && \"Can't split constant!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5694, __PRETTY_FUNCTION__)); | |||
5695 | unsigned NumElts = SizeInBits / EltSizeInBits; | |||
5696 | ||||
5697 | // Bitcast a source array of element bits to the target size. | |||
5698 | auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) { | |||
5699 | unsigned NumSrcElts = UndefSrcElts.getBitWidth(); | |||
5700 | unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth(); | |||
5701 | assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&(((NumSrcElts * SrcEltSizeInBits) == SizeInBits && "Constant bit sizes don't match" ) ? static_cast<void> (0) : __assert_fail ("(NumSrcElts * SrcEltSizeInBits) == SizeInBits && \"Constant bit sizes don't match\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5702, __PRETTY_FUNCTION__)) | |||
5702 | "Constant bit sizes don't match")(((NumSrcElts * SrcEltSizeInBits) == SizeInBits && "Constant bit sizes don't match" ) ? static_cast<void> (0) : __assert_fail ("(NumSrcElts * SrcEltSizeInBits) == SizeInBits && \"Constant bit sizes don't match\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5702, __PRETTY_FUNCTION__)); | |||
5703 | ||||
5704 | // Don't split if we don't allow undef bits. | |||
5705 | bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs; | |||
5706 | if (UndefSrcElts.getBoolValue() && !AllowUndefs) | |||
5707 | return false; | |||
5708 | ||||
5709 | // If we're already the right size, don't bother bitcasting. | |||
5710 | if (NumSrcElts == NumElts) { | |||
5711 | UndefElts = UndefSrcElts; | |||
5712 | EltBits.assign(SrcEltBits.begin(), SrcEltBits.end()); | |||
5713 | return true; | |||
5714 | } | |||
5715 | ||||
5716 | // Extract all the undef/constant element data and pack into single bitsets. | |||
5717 | APInt UndefBits(SizeInBits, 0); | |||
5718 | APInt MaskBits(SizeInBits, 0); | |||
5719 | ||||
5720 | for (unsigned i = 0; i != NumSrcElts; ++i) { | |||
5721 | unsigned BitOffset = i * SrcEltSizeInBits; | |||
5722 | if (UndefSrcElts[i]) | |||
5723 | UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits); | |||
5724 | MaskBits.insertBits(SrcEltBits[i], BitOffset); | |||
5725 | } | |||
5726 | ||||
5727 | // Split the undef/constant single bitset data into the target elements. | |||
5728 | UndefElts = APInt(NumElts, 0); | |||
5729 | EltBits.resize(NumElts, APInt(EltSizeInBits, 0)); | |||
5730 | ||||
5731 | for (unsigned i = 0; i != NumElts; ++i) { | |||
5732 | unsigned BitOffset = i * EltSizeInBits; | |||
5733 | APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset); | |||
5734 | ||||
5735 | // Only treat an element as UNDEF if all bits are UNDEF. | |||
5736 | if (UndefEltBits.isAllOnesValue()) { | |||
5737 | if (!AllowWholeUndefs) | |||
5738 | return false; | |||
5739 | UndefElts.setBit(i); | |||
5740 | continue; | |||
5741 | } | |||
5742 | ||||
5743 | // If only some bits are UNDEF then treat them as zero (or bail if not | |||
5744 | // supported). | |||
5745 | if (UndefEltBits.getBoolValue() && !AllowPartialUndefs) | |||
5746 | return false; | |||
5747 | ||||
5748 | APInt Bits = MaskBits.extractBits(EltSizeInBits, BitOffset); | |||
5749 | EltBits[i] = Bits.getZExtValue(); | |||
5750 | } | |||
5751 | return true; | |||
5752 | }; | |||
5753 | ||||
5754 | // Collect constant bits and insert into mask/undef bit masks. | |||
5755 | auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs, | |||
5756 | unsigned UndefBitIndex) { | |||
5757 | if (!Cst) | |||
5758 | return false; | |||
5759 | if (isa<UndefValue>(Cst)) { | |||
5760 | Undefs.setBit(UndefBitIndex); | |||
5761 | return true; | |||
5762 | } | |||
5763 | if (auto *CInt = dyn_cast<ConstantInt>(Cst)) { | |||
5764 | Mask = CInt->getValue(); | |||
5765 | return true; | |||
5766 | } | |||
5767 | if (auto *CFP = dyn_cast<ConstantFP>(Cst)) { | |||
5768 | Mask = CFP->getValueAPF().bitcastToAPInt(); | |||
5769 | return true; | |||
5770 | } | |||
5771 | return false; | |||
5772 | }; | |||
5773 | ||||
5774 | // Handle UNDEFs. | |||
5775 | if (Op.isUndef()) { | |||
5776 | APInt UndefSrcElts = APInt::getAllOnesValue(NumElts); | |||
5777 | SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0)); | |||
5778 | return CastBitData(UndefSrcElts, SrcEltBits); | |||
5779 | } | |||
5780 | ||||
5781 | // Extract scalar constant bits. | |||
5782 | if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) { | |||
5783 | APInt UndefSrcElts = APInt::getNullValue(1); | |||
5784 | SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue()); | |||
5785 | return CastBitData(UndefSrcElts, SrcEltBits); | |||
5786 | } | |||
5787 | if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) { | |||
5788 | APInt UndefSrcElts = APInt::getNullValue(1); | |||
5789 | APInt RawBits = Cst->getValueAPF().bitcastToAPInt(); | |||
5790 | SmallVector<APInt, 64> SrcEltBits(1, RawBits); | |||
5791 | return CastBitData(UndefSrcElts, SrcEltBits); | |||
5792 | } | |||
5793 | ||||
5794 | // Extract constant bits from build vector. | |||
5795 | if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) { | |||
5796 | unsigned SrcEltSizeInBits = VT.getScalarSizeInBits(); | |||
5797 | unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits; | |||
5798 | ||||
5799 | APInt UndefSrcElts(NumSrcElts, 0); | |||
5800 | SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0)); | |||
5801 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { | |||
5802 | const SDValue &Src = Op.getOperand(i); | |||
5803 | if (Src.isUndef()) { | |||
5804 | UndefSrcElts.setBit(i); | |||
5805 | continue; | |||
5806 | } | |||
5807 | auto *Cst = cast<ConstantSDNode>(Src); | |||
5808 | SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits); | |||
5809 | } | |||
5810 | return CastBitData(UndefSrcElts, SrcEltBits); | |||
5811 | } | |||
5812 | if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) { | |||
5813 | unsigned SrcEltSizeInBits = VT.getScalarSizeInBits(); | |||
5814 | unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits; | |||
5815 | ||||
5816 | APInt UndefSrcElts(NumSrcElts, 0); | |||
5817 | SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0)); | |||
5818 | for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) { | |||
5819 | const SDValue &Src = Op.getOperand(i); | |||
5820 | if (Src.isUndef()) { | |||
5821 | UndefSrcElts.setBit(i); | |||
5822 | continue; | |||
5823 | } | |||
5824 | auto *Cst = cast<ConstantFPSDNode>(Src); | |||
5825 | APInt RawBits = Cst->getValueAPF().bitcastToAPInt(); | |||
5826 | SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits); | |||
5827 | } | |||
5828 | return CastBitData(UndefSrcElts, SrcEltBits); | |||
5829 | } | |||
5830 | ||||
5831 | // Extract constant bits from constant pool vector. | |||
5832 | if (auto *Cst = getTargetConstantFromNode(Op)) { | |||
5833 | Type *CstTy = Cst->getType(); | |||
5834 | unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits(); | |||
5835 | if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0) | |||
5836 | return false; | |||
5837 | ||||
5838 | unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits(); | |||
5839 | unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits; | |||
5840 | ||||
5841 | APInt UndefSrcElts(NumSrcElts, 0); | |||
5842 | SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0)); | |||
5843 | for (unsigned i = 0; i != NumSrcElts; ++i) | |||
5844 | if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i], | |||
5845 | UndefSrcElts, i)) | |||
5846 | return false; | |||
5847 | ||||
5848 | return CastBitData(UndefSrcElts, SrcEltBits); | |||
5849 | } | |||
5850 | ||||
5851 | // Extract constant bits from a broadcasted constant pool scalar. | |||
5852 | if (Op.getOpcode() == X86ISD::VBROADCAST && | |||
5853 | EltSizeInBits <= VT.getScalarSizeInBits()) { | |||
5854 | if (auto *Broadcast = getTargetConstantFromNode(Op.getOperand(0))) { | |||
5855 | unsigned SrcEltSizeInBits = Broadcast->getType()->getScalarSizeInBits(); | |||
5856 | unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits; | |||
5857 | ||||
5858 | APInt UndefSrcElts(NumSrcElts, 0); | |||
5859 | SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0)); | |||
5860 | if (CollectConstantBits(Broadcast, SrcEltBits[0], UndefSrcElts, 0)) { | |||
5861 | if (UndefSrcElts[0]) | |||
5862 | UndefSrcElts.setBits(0, NumSrcElts); | |||
5863 | SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]); | |||
5864 | return CastBitData(UndefSrcElts, SrcEltBits); | |||
5865 | } | |||
5866 | } | |||
5867 | } | |||
5868 | ||||
5869 | // Extract a rematerialized scalar constant insertion. | |||
5870 | if (Op.getOpcode() == X86ISD::VZEXT_MOVL && | |||
5871 | Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR && | |||
5872 | isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) { | |||
5873 | unsigned SrcEltSizeInBits = VT.getScalarSizeInBits(); | |||
5874 | unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits; | |||
5875 | ||||
5876 | APInt UndefSrcElts(NumSrcElts, 0); | |||
5877 | SmallVector<APInt, 64> SrcEltBits; | |||
5878 | auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0)); | |||
5879 | SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits)); | |||
5880 | SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0)); | |||
5881 | return CastBitData(UndefSrcElts, SrcEltBits); | |||
5882 | } | |||
5883 | ||||
5884 | // Extract constant bits from a subvector's source. | |||
5885 | if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR && | |||
5886 | isa<ConstantSDNode>(Op.getOperand(1))) { | |||
5887 | // TODO - support extract_subvector through bitcasts. | |||
5888 | if (EltSizeInBits != VT.getScalarSizeInBits()) | |||
5889 | return false; | |||
5890 | ||||
5891 | if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits, | |||
5892 | UndefElts, EltBits, AllowWholeUndefs, | |||
5893 | AllowPartialUndefs)) { | |||
5894 | EVT SrcVT = Op.getOperand(0).getValueType(); | |||
5895 | unsigned NumSrcElts = SrcVT.getVectorNumElements(); | |||
5896 | unsigned NumSubElts = VT.getVectorNumElements(); | |||
5897 | unsigned BaseIdx = Op.getConstantOperandVal(1); | |||
5898 | UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx); | |||
5899 | if ((BaseIdx + NumSubElts) != NumSrcElts) | |||
5900 | EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end()); | |||
5901 | if (BaseIdx != 0) | |||
5902 | EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx); | |||
5903 | return true; | |||
5904 | } | |||
5905 | } | |||
5906 | ||||
5907 | // Extract constant bits from shuffle node sources. | |||
5908 | if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) { | |||
5909 | // TODO - support shuffle through bitcasts. | |||
5910 | if (EltSizeInBits != VT.getScalarSizeInBits()) | |||
5911 | return false; | |||
5912 | ||||
5913 | ArrayRef<int> Mask = SVN->getMask(); | |||
5914 | if ((!AllowWholeUndefs || !AllowPartialUndefs) && | |||
5915 | llvm::any_of(Mask, [](int M) { return M < 0; })) | |||
5916 | return false; | |||
5917 | ||||
5918 | APInt UndefElts0, UndefElts1; | |||
5919 | SmallVector<APInt, 32> EltBits0, EltBits1; | |||
5920 | if (isAnyInRange(Mask, 0, NumElts) && | |||
5921 | !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits, | |||
5922 | UndefElts0, EltBits0, AllowWholeUndefs, | |||
5923 | AllowPartialUndefs)) | |||
5924 | return false; | |||
5925 | if (isAnyInRange(Mask, NumElts, 2 * NumElts) && | |||
5926 | !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits, | |||
5927 | UndefElts1, EltBits1, AllowWholeUndefs, | |||
5928 | AllowPartialUndefs)) | |||
5929 | return false; | |||
5930 | ||||
5931 | UndefElts = APInt::getNullValue(NumElts); | |||
5932 | for (int i = 0; i != (int)NumElts; ++i) { | |||
5933 | int M = Mask[i]; | |||
5934 | if (M < 0) { | |||
5935 | UndefElts.setBit(i); | |||
5936 | EltBits.push_back(APInt::getNullValue(EltSizeInBits)); | |||
5937 | } else if (M < (int)NumElts) { | |||
5938 | if (UndefElts0[M]) | |||
5939 | UndefElts.setBit(i); | |||
5940 | EltBits.push_back(EltBits0[M]); | |||
5941 | } else { | |||
5942 | if (UndefElts1[M - NumElts]) | |||
5943 | UndefElts.setBit(i); | |||
5944 | EltBits.push_back(EltBits1[M - NumElts]); | |||
5945 | } | |||
5946 | } | |||
5947 | return true; | |||
5948 | } | |||
5949 | ||||
5950 | return false; | |||
5951 | } | |||
5952 | ||||
5953 | static bool isConstantSplat(SDValue Op, APInt &SplatVal) { | |||
5954 | APInt UndefElts; | |||
5955 | SmallVector<APInt, 16> EltBits; | |||
5956 | if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(), | |||
5957 | UndefElts, EltBits, true, false)) { | |||
5958 | int SplatIndex = -1; | |||
5959 | for (int i = 0, e = EltBits.size(); i != e; ++i) { | |||
5960 | if (UndefElts[i]) | |||
5961 | continue; | |||
5962 | if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) { | |||
5963 | SplatIndex = -1; | |||
5964 | break; | |||
5965 | } | |||
5966 | SplatIndex = i; | |||
5967 | } | |||
5968 | if (0 <= SplatIndex) { | |||
5969 | SplatVal = EltBits[SplatIndex]; | |||
5970 | return true; | |||
5971 | } | |||
5972 | } | |||
5973 | ||||
5974 | return false; | |||
5975 | } | |||
5976 | ||||
5977 | static bool getTargetShuffleMaskIndices(SDValue MaskNode, | |||
5978 | unsigned MaskEltSizeInBits, | |||
5979 | SmallVectorImpl<uint64_t> &RawMask, | |||
5980 | APInt &UndefElts) { | |||
5981 | // Extract the raw target constant bits. | |||
5982 | SmallVector<APInt, 64> EltBits; | |||
5983 | if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts, | |||
5984 | EltBits, /* AllowWholeUndefs */ true, | |||
5985 | /* AllowPartialUndefs */ false)) | |||
5986 | return false; | |||
5987 | ||||
5988 | // Insert the extracted elements into the mask. | |||
5989 | for (APInt Elt : EltBits) | |||
5990 | RawMask.push_back(Elt.getZExtValue()); | |||
5991 | ||||
5992 | return true; | |||
5993 | } | |||
5994 | ||||
5995 | /// Create a shuffle mask that matches the PACKSS/PACKUS truncation. | |||
5996 | /// Note: This ignores saturation, so inputs must be checked first. | |||
5997 | static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask, | |||
5998 | bool Unary) { | |||
5999 | assert(Mask.empty() && "Expected an empty shuffle mask vector")((Mask.empty() && "Expected an empty shuffle mask vector" ) ? static_cast<void> (0) : __assert_fail ("Mask.empty() && \"Expected an empty shuffle mask vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 5999, __PRETTY_FUNCTION__)); | |||
6000 | unsigned NumElts = VT.getVectorNumElements(); | |||
6001 | unsigned NumLanes = VT.getSizeInBits() / 128; | |||
6002 | unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits(); | |||
6003 | unsigned Offset = Unary ? 0 : NumElts; | |||
6004 | ||||
6005 | for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { | |||
6006 | for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2) | |||
6007 | Mask.push_back(Elt + (Lane * NumEltsPerLane)); | |||
6008 | for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += 2) | |||
6009 | Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset); | |||
6010 | } | |||
6011 | } | |||
6012 | ||||
6013 | // Split the demanded elts of a PACKSS/PACKUS node between its operands. | |||
6014 | static void getPackDemandedElts(EVT VT, const APInt &DemandedElts, | |||
6015 | APInt &DemandedLHS, APInt &DemandedRHS) { | |||
6016 | int NumLanes = VT.getSizeInBits() / 128; | |||
6017 | int NumElts = DemandedElts.getBitWidth(); | |||
6018 | int NumInnerElts = NumElts / 2; | |||
6019 | int NumEltsPerLane = NumElts / NumLanes; | |||
6020 | int NumInnerEltsPerLane = NumInnerElts / NumLanes; | |||
6021 | ||||
6022 | DemandedLHS = APInt::getNullValue(NumInnerElts); | |||
6023 | DemandedRHS = APInt::getNullValue(NumInnerElts); | |||
6024 | ||||
6025 | // Map DemandedElts to the packed operands. | |||
6026 | for (int Lane = 0; Lane != NumLanes; ++Lane) { | |||
6027 | for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) { | |||
6028 | int OuterIdx = (Lane * NumEltsPerLane) + Elt; | |||
6029 | int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt; | |||
6030 | if (DemandedElts[OuterIdx]) | |||
6031 | DemandedLHS.setBit(InnerIdx); | |||
6032 | if (DemandedElts[OuterIdx + NumInnerEltsPerLane]) | |||
6033 | DemandedRHS.setBit(InnerIdx); | |||
6034 | } | |||
6035 | } | |||
6036 | } | |||
6037 | ||||
6038 | /// Calculates the shuffle mask corresponding to the target-specific opcode. | |||
6039 | /// If the mask could be calculated, returns it in \p Mask, returns the shuffle | |||
6040 | /// operands in \p Ops, and returns true. | |||
6041 | /// Sets \p IsUnary to true if only one source is used. Note that this will set | |||
6042 | /// IsUnary for shuffles which use a single input multiple times, and in those | |||
6043 | /// cases it will adjust the mask to only have indices within that single input. | |||
6044 | /// It is an error to call this with non-empty Mask/Ops vectors. | |||
6045 | static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero, | |||
6046 | SmallVectorImpl<SDValue> &Ops, | |||
6047 | SmallVectorImpl<int> &Mask, bool &IsUnary) { | |||
6048 | unsigned NumElems = VT.getVectorNumElements(); | |||
6049 | unsigned MaskEltSize = VT.getScalarSizeInBits(); | |||
6050 | SmallVector<uint64_t, 32> RawMask; | |||
6051 | APInt RawUndefs; | |||
6052 | SDValue ImmN; | |||
6053 | ||||
6054 | assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector")((Mask.empty() && "getTargetShuffleMask expects an empty Mask vector" ) ? static_cast<void> (0) : __assert_fail ("Mask.empty() && \"getTargetShuffleMask expects an empty Mask vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6054, __PRETTY_FUNCTION__)); | |||
6055 | assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector")((Ops.empty() && "getTargetShuffleMask expects an empty Ops vector" ) ? static_cast<void> (0) : __assert_fail ("Ops.empty() && \"getTargetShuffleMask expects an empty Ops vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6055, __PRETTY_FUNCTION__)); | |||
6056 | ||||
6057 | IsUnary = false; | |||
6058 | bool IsFakeUnary = false; | |||
6059 | switch (N->getOpcode()) { | |||
6060 | case X86ISD::BLENDI: | |||
6061 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6061, __PRETTY_FUNCTION__)); | |||
6062 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6062, __PRETTY_FUNCTION__)); | |||
6063 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6064 | DecodeBLENDMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); | |||
6065 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6066 | break; | |||
6067 | case X86ISD::SHUFP: | |||
6068 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6068, __PRETTY_FUNCTION__)); | |||
6069 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6069, __PRETTY_FUNCTION__)); | |||
6070 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6071 | DecodeSHUFPMask(NumElems, MaskEltSize, | |||
6072 | cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); | |||
6073 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6074 | break; | |||
6075 | case X86ISD::INSERTPS: | |||
6076 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6076, __PRETTY_FUNCTION__)); | |||
6077 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6077, __PRETTY_FUNCTION__)); | |||
6078 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6079 | DecodeINSERTPSMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); | |||
6080 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6081 | break; | |||
6082 | case X86ISD::EXTRQI: | |||
6083 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6083, __PRETTY_FUNCTION__)); | |||
6084 | if (isa<ConstantSDNode>(N->getOperand(1)) && | |||
6085 | isa<ConstantSDNode>(N->getOperand(2))) { | |||
6086 | int BitLen = N->getConstantOperandVal(1); | |||
6087 | int BitIdx = N->getConstantOperandVal(2); | |||
6088 | DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask); | |||
6089 | IsUnary = true; | |||
6090 | } | |||
6091 | break; | |||
6092 | case X86ISD::INSERTQI: | |||
6093 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6093, __PRETTY_FUNCTION__)); | |||
6094 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6094, __PRETTY_FUNCTION__)); | |||
6095 | if (isa<ConstantSDNode>(N->getOperand(2)) && | |||
6096 | isa<ConstantSDNode>(N->getOperand(3))) { | |||
6097 | int BitLen = N->getConstantOperandVal(2); | |||
6098 | int BitIdx = N->getConstantOperandVal(3); | |||
6099 | DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask); | |||
6100 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6101 | } | |||
6102 | break; | |||
6103 | case X86ISD::UNPCKH: | |||
6104 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6104, __PRETTY_FUNCTION__)); | |||
6105 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6105, __PRETTY_FUNCTION__)); | |||
6106 | DecodeUNPCKHMask(NumElems, MaskEltSize, Mask); | |||
6107 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6108 | break; | |||
6109 | case X86ISD::UNPCKL: | |||
6110 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6110, __PRETTY_FUNCTION__)); | |||
6111 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6111, __PRETTY_FUNCTION__)); | |||
6112 | DecodeUNPCKLMask(NumElems, MaskEltSize, Mask); | |||
6113 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6114 | break; | |||
6115 | case X86ISD::MOVHLPS: | |||
6116 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6116, __PRETTY_FUNCTION__)); | |||
6117 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6117, __PRETTY_FUNCTION__)); | |||
6118 | DecodeMOVHLPSMask(NumElems, Mask); | |||
6119 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6120 | break; | |||
6121 | case X86ISD::MOVLHPS: | |||
6122 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6122, __PRETTY_FUNCTION__)); | |||
6123 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6123, __PRETTY_FUNCTION__)); | |||
6124 | DecodeMOVLHPSMask(NumElems, Mask); | |||
6125 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6126 | break; | |||
6127 | case X86ISD::PALIGNR: | |||
6128 | assert(VT.getScalarType() == MVT::i8 && "Byte vector expected")((VT.getScalarType() == MVT::i8 && "Byte vector expected" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Byte vector expected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6128, __PRETTY_FUNCTION__)); | |||
6129 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6129, __PRETTY_FUNCTION__)); | |||
6130 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6130, __PRETTY_FUNCTION__)); | |||
6131 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6132 | DecodePALIGNRMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), | |||
6133 | Mask); | |||
6134 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6135 | Ops.push_back(N->getOperand(1)); | |||
6136 | Ops.push_back(N->getOperand(0)); | |||
6137 | break; | |||
6138 | case X86ISD::VSHLDQ: | |||
6139 | assert(VT.getScalarType() == MVT::i8 && "Byte vector expected")((VT.getScalarType() == MVT::i8 && "Byte vector expected" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Byte vector expected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6139, __PRETTY_FUNCTION__)); | |||
6140 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6140, __PRETTY_FUNCTION__)); | |||
6141 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6142 | DecodePSLLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), | |||
6143 | Mask); | |||
6144 | IsUnary = true; | |||
6145 | break; | |||
6146 | case X86ISD::VSRLDQ: | |||
6147 | assert(VT.getScalarType() == MVT::i8 && "Byte vector expected")((VT.getScalarType() == MVT::i8 && "Byte vector expected" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Byte vector expected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6147, __PRETTY_FUNCTION__)); | |||
6148 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6148, __PRETTY_FUNCTION__)); | |||
6149 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6150 | DecodePSRLDQMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), | |||
6151 | Mask); | |||
6152 | IsUnary = true; | |||
6153 | break; | |||
6154 | case X86ISD::PSHUFD: | |||
6155 | case X86ISD::VPERMILPI: | |||
6156 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6156, __PRETTY_FUNCTION__)); | |||
6157 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6158 | DecodePSHUFMask(NumElems, MaskEltSize, | |||
6159 | cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); | |||
6160 | IsUnary = true; | |||
6161 | break; | |||
6162 | case X86ISD::PSHUFHW: | |||
6163 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6163, __PRETTY_FUNCTION__)); | |||
6164 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6165 | DecodePSHUFHWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), | |||
6166 | Mask); | |||
6167 | IsUnary = true; | |||
6168 | break; | |||
6169 | case X86ISD::PSHUFLW: | |||
6170 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6170, __PRETTY_FUNCTION__)); | |||
6171 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6172 | DecodePSHUFLWMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), | |||
6173 | Mask); | |||
6174 | IsUnary = true; | |||
6175 | break; | |||
6176 | case X86ISD::VZEXT_MOVL: | |||
6177 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6177, __PRETTY_FUNCTION__)); | |||
6178 | DecodeZeroMoveLowMask(NumElems, Mask); | |||
6179 | IsUnary = true; | |||
6180 | break; | |||
6181 | case X86ISD::VBROADCAST: { | |||
6182 | SDValue N0 = N->getOperand(0); | |||
6183 | // See if we're broadcasting from index 0 of an EXTRACT_SUBVECTOR. If so, | |||
6184 | // add the pre-extracted value to the Ops vector. | |||
6185 | if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR && | |||
6186 | N0.getOperand(0).getValueType() == VT && | |||
6187 | N0.getConstantOperandVal(1) == 0) | |||
6188 | Ops.push_back(N0.getOperand(0)); | |||
6189 | ||||
6190 | // We only decode broadcasts of same-sized vectors, unless the broadcast | |||
6191 | // came from an extract from the original width. If we found one, we | |||
6192 | // pushed it the Ops vector above. | |||
6193 | if (N0.getValueType() == VT || !Ops.empty()) { | |||
6194 | DecodeVectorBroadcast(NumElems, Mask); | |||
6195 | IsUnary = true; | |||
6196 | break; | |||
6197 | } | |||
6198 | return false; | |||
6199 | } | |||
6200 | case X86ISD::VPERMILPV: { | |||
6201 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6201, __PRETTY_FUNCTION__)); | |||
6202 | IsUnary = true; | |||
6203 | SDValue MaskNode = N->getOperand(1); | |||
6204 | if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask, | |||
6205 | RawUndefs)) { | |||
6206 | DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask); | |||
6207 | break; | |||
6208 | } | |||
6209 | return false; | |||
6210 | } | |||
6211 | case X86ISD::PSHUFB: { | |||
6212 | assert(VT.getScalarType() == MVT::i8 && "Byte vector expected")((VT.getScalarType() == MVT::i8 && "Byte vector expected" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Byte vector expected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6212, __PRETTY_FUNCTION__)); | |||
6213 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6213, __PRETTY_FUNCTION__)); | |||
6214 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6214, __PRETTY_FUNCTION__)); | |||
6215 | IsUnary = true; | |||
6216 | SDValue MaskNode = N->getOperand(1); | |||
6217 | if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) { | |||
6218 | DecodePSHUFBMask(RawMask, RawUndefs, Mask); | |||
6219 | break; | |||
6220 | } | |||
6221 | return false; | |||
6222 | } | |||
6223 | case X86ISD::VPERMI: | |||
6224 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6224, __PRETTY_FUNCTION__)); | |||
6225 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6226 | DecodeVPERMMask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); | |||
6227 | IsUnary = true; | |||
6228 | break; | |||
6229 | case X86ISD::MOVSS: | |||
6230 | case X86ISD::MOVSD: | |||
6231 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6231, __PRETTY_FUNCTION__)); | |||
6232 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6232, __PRETTY_FUNCTION__)); | |||
6233 | DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask); | |||
6234 | break; | |||
6235 | case X86ISD::VPERM2X128: | |||
6236 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6236, __PRETTY_FUNCTION__)); | |||
6237 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6237, __PRETTY_FUNCTION__)); | |||
6238 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6239 | DecodeVPERM2X128Mask(NumElems, cast<ConstantSDNode>(ImmN)->getZExtValue(), | |||
6240 | Mask); | |||
6241 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6242 | break; | |||
6243 | case X86ISD::SHUF128: | |||
6244 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6244, __PRETTY_FUNCTION__)); | |||
6245 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6245, __PRETTY_FUNCTION__)); | |||
6246 | ImmN = N->getOperand(N->getNumOperands() - 1); | |||
6247 | decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, | |||
6248 | cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask); | |||
6249 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6250 | break; | |||
6251 | case X86ISD::MOVSLDUP: | |||
6252 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6252, __PRETTY_FUNCTION__)); | |||
6253 | DecodeMOVSLDUPMask(NumElems, Mask); | |||
6254 | IsUnary = true; | |||
6255 | break; | |||
6256 | case X86ISD::MOVSHDUP: | |||
6257 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6257, __PRETTY_FUNCTION__)); | |||
6258 | DecodeMOVSHDUPMask(NumElems, Mask); | |||
6259 | IsUnary = true; | |||
6260 | break; | |||
6261 | case X86ISD::MOVDDUP: | |||
6262 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6262, __PRETTY_FUNCTION__)); | |||
6263 | DecodeMOVDDUPMask(NumElems, Mask); | |||
6264 | IsUnary = true; | |||
6265 | break; | |||
6266 | case X86ISD::VPERMIL2: { | |||
6267 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6267, __PRETTY_FUNCTION__)); | |||
6268 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6268, __PRETTY_FUNCTION__)); | |||
6269 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6270 | SDValue MaskNode = N->getOperand(2); | |||
6271 | SDValue CtrlNode = N->getOperand(3); | |||
6272 | if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) { | |||
6273 | unsigned CtrlImm = CtrlOp->getZExtValue(); | |||
6274 | if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask, | |||
6275 | RawUndefs)) { | |||
6276 | DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs, | |||
6277 | Mask); | |||
6278 | break; | |||
6279 | } | |||
6280 | } | |||
6281 | return false; | |||
6282 | } | |||
6283 | case X86ISD::VPPERM: { | |||
6284 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6284, __PRETTY_FUNCTION__)); | |||
6285 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6285, __PRETTY_FUNCTION__)); | |||
6286 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1); | |||
6287 | SDValue MaskNode = N->getOperand(2); | |||
6288 | if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) { | |||
6289 | DecodeVPPERMMask(RawMask, RawUndefs, Mask); | |||
6290 | break; | |||
6291 | } | |||
6292 | return false; | |||
6293 | } | |||
6294 | case X86ISD::VPERMV: { | |||
6295 | assert(N->getOperand(1).getValueType() == VT && "Unexpected value type")((N->getOperand(1).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(1).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6295, __PRETTY_FUNCTION__)); | |||
6296 | IsUnary = true; | |||
6297 | // Unlike most shuffle nodes, VPERMV's mask operand is operand 0. | |||
6298 | Ops.push_back(N->getOperand(1)); | |||
6299 | SDValue MaskNode = N->getOperand(0); | |||
6300 | if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask, | |||
6301 | RawUndefs)) { | |||
6302 | DecodeVPERMVMask(RawMask, RawUndefs, Mask); | |||
6303 | break; | |||
6304 | } | |||
6305 | return false; | |||
6306 | } | |||
6307 | case X86ISD::VPERMV3: { | |||
6308 | assert(N->getOperand(0).getValueType() == VT && "Unexpected value type")((N->getOperand(0).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(0).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6308, __PRETTY_FUNCTION__)); | |||
6309 | assert(N->getOperand(2).getValueType() == VT && "Unexpected value type")((N->getOperand(2).getValueType() == VT && "Unexpected value type" ) ? static_cast<void> (0) : __assert_fail ("N->getOperand(2).getValueType() == VT && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6309, __PRETTY_FUNCTION__)); | |||
6310 | IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2); | |||
6311 | // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one. | |||
6312 | Ops.push_back(N->getOperand(0)); | |||
6313 | Ops.push_back(N->getOperand(2)); | |||
6314 | SDValue MaskNode = N->getOperand(1); | |||
6315 | if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask, | |||
6316 | RawUndefs)) { | |||
6317 | DecodeVPERMV3Mask(RawMask, RawUndefs, Mask); | |||
6318 | break; | |||
6319 | } | |||
6320 | return false; | |||
6321 | } | |||
6322 | default: llvm_unreachable("unknown target shuffle node")::llvm::llvm_unreachable_internal("unknown target shuffle node" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6322); | |||
6323 | } | |||
6324 | ||||
6325 | // Empty mask indicates the decode failed. | |||
6326 | if (Mask.empty()) | |||
6327 | return false; | |||
6328 | ||||
6329 | // Check if we're getting a shuffle mask with zero'd elements. | |||
6330 | if (!AllowSentinelZero) | |||
6331 | if (any_of(Mask, [](int M) { return M == SM_SentinelZero; })) | |||
6332 | return false; | |||
6333 | ||||
6334 | // If we have a fake unary shuffle, the shuffle mask is spread across two | |||
6335 | // inputs that are actually the same node. Re-map the mask to always point | |||
6336 | // into the first input. | |||
6337 | if (IsFakeUnary) | |||
6338 | for (int &M : Mask) | |||
6339 | if (M >= (int)Mask.size()) | |||
6340 | M -= Mask.size(); | |||
6341 | ||||
6342 | // If we didn't already add operands in the opcode-specific code, default to | |||
6343 | // adding 1 or 2 operands starting at 0. | |||
6344 | if (Ops.empty()) { | |||
6345 | Ops.push_back(N->getOperand(0)); | |||
6346 | if (!IsUnary || IsFakeUnary) | |||
6347 | Ops.push_back(N->getOperand(1)); | |||
6348 | } | |||
6349 | ||||
6350 | return true; | |||
6351 | } | |||
6352 | ||||
6353 | /// Check a target shuffle mask's inputs to see if we can set any values to | |||
6354 | /// SM_SentinelZero - this is for elements that are known to be zero | |||
6355 | /// (not just zeroable) from their inputs. | |||
6356 | /// Returns true if the target shuffle mask was decoded. | |||
6357 | static bool setTargetShuffleZeroElements(SDValue N, | |||
6358 | SmallVectorImpl<int> &Mask, | |||
6359 | SmallVectorImpl<SDValue> &Ops) { | |||
6360 | bool IsUnary; | |||
6361 | if (!isTargetShuffle(N.getOpcode())) | |||
6362 | return false; | |||
6363 | ||||
6364 | MVT VT = N.getSimpleValueType(); | |||
6365 | if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary)) | |||
6366 | return false; | |||
6367 | ||||
6368 | SDValue V1 = Ops[0]; | |||
6369 | SDValue V2 = IsUnary ? V1 : Ops[1]; | |||
6370 | ||||
6371 | V1 = peekThroughBitcasts(V1); | |||
6372 | V2 = peekThroughBitcasts(V2); | |||
6373 | ||||
6374 | assert((VT.getSizeInBits() % Mask.size()) == 0 &&(((VT.getSizeInBits() % Mask.size()) == 0 && "Illegal split of shuffle value type" ) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % Mask.size()) == 0 && \"Illegal split of shuffle value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6375, __PRETTY_FUNCTION__)) | |||
6375 | "Illegal split of shuffle value type")(((VT.getSizeInBits() % Mask.size()) == 0 && "Illegal split of shuffle value type" ) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() % Mask.size()) == 0 && \"Illegal split of shuffle value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6375, __PRETTY_FUNCTION__)); | |||
6376 | unsigned EltSizeInBits = VT.getSizeInBits() / Mask.size(); | |||
6377 | ||||
6378 | // Extract known constant input data. | |||
6379 | APInt UndefSrcElts[2]; | |||
6380 | SmallVector<APInt, 32> SrcEltBits[2]; | |||
6381 | bool IsSrcConstant[2] = { | |||
6382 | getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0], | |||
6383 | SrcEltBits[0], true, false), | |||
6384 | getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1], | |||
6385 | SrcEltBits[1], true, false)}; | |||
6386 | ||||
6387 | for (int i = 0, Size = Mask.size(); i < Size; ++i) { | |||
6388 | int M = Mask[i]; | |||
6389 | ||||
6390 | // Already decoded as SM_SentinelZero / SM_SentinelUndef. | |||
6391 | if (M < 0) | |||
6392 | continue; | |||
6393 | ||||
6394 | // Determine shuffle input and normalize the mask. | |||
6395 | unsigned SrcIdx = M / Size; | |||
6396 | SDValue V = M < Size ? V1 : V2; | |||
6397 | M %= Size; | |||
6398 | ||||
6399 | // We are referencing an UNDEF input. | |||
6400 | if (V.isUndef()) { | |||
6401 | Mask[i] = SM_SentinelUndef; | |||
6402 | continue; | |||
6403 | } | |||
6404 | ||||
6405 | // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF. | |||
6406 | // TODO: We currently only set UNDEF for integer types - floats use the same | |||
6407 | // registers as vectors and many of the scalar folded loads rely on the | |||
6408 | // SCALAR_TO_VECTOR pattern. | |||
6409 | if (V.getOpcode() == ISD::SCALAR_TO_VECTOR && | |||
6410 | (Size % V.getValueType().getVectorNumElements()) == 0) { | |||
6411 | int Scale = Size / V.getValueType().getVectorNumElements(); | |||
6412 | int Idx = M / Scale; | |||
6413 | if (Idx != 0 && !VT.isFloatingPoint()) | |||
6414 | Mask[i] = SM_SentinelUndef; | |||
6415 | else if (Idx == 0 && X86::isZeroNode(V.getOperand(0))) | |||
6416 | Mask[i] = SM_SentinelZero; | |||
6417 | continue; | |||
6418 | } | |||
6419 | ||||
6420 | // Attempt to extract from the source's constant bits. | |||
6421 | if (IsSrcConstant[SrcIdx]) { | |||
6422 | if (UndefSrcElts[SrcIdx][M]) | |||
6423 | Mask[i] = SM_SentinelUndef; | |||
6424 | else if (SrcEltBits[SrcIdx][M] == 0) | |||
6425 | Mask[i] = SM_SentinelZero; | |||
6426 | } | |||
6427 | } | |||
6428 | ||||
6429 | assert(VT.getVectorNumElements() == Mask.size() &&((VT.getVectorNumElements() == Mask.size() && "Different mask size from vector size!" ) ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == Mask.size() && \"Different mask size from vector size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6430, __PRETTY_FUNCTION__)) | |||
6430 | "Different mask size from vector size!")((VT.getVectorNumElements() == Mask.size() && "Different mask size from vector size!" ) ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == Mask.size() && \"Different mask size from vector size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6430, __PRETTY_FUNCTION__)); | |||
6431 | return true; | |||
6432 | } | |||
6433 | ||||
6434 | // Forward declaration (for getFauxShuffleMask recursive check). | |||
6435 | static bool resolveTargetShuffleInputs(SDValue Op, | |||
6436 | SmallVectorImpl<SDValue> &Inputs, | |||
6437 | SmallVectorImpl<int> &Mask, | |||
6438 | const SelectionDAG &DAG); | |||
6439 | ||||
6440 | // Attempt to decode ops that could be represented as a shuffle mask. | |||
6441 | // The decoded shuffle mask may contain a different number of elements to the | |||
6442 | // destination value type. | |||
6443 | static bool getFauxShuffleMask(SDValue N, SmallVectorImpl<int> &Mask, | |||
6444 | SmallVectorImpl<SDValue> &Ops, | |||
6445 | const SelectionDAG &DAG) { | |||
6446 | Mask.clear(); | |||
6447 | Ops.clear(); | |||
6448 | ||||
6449 | MVT VT = N.getSimpleValueType(); | |||
6450 | unsigned NumElts = VT.getVectorNumElements(); | |||
6451 | unsigned NumSizeInBits = VT.getSizeInBits(); | |||
6452 | unsigned NumBitsPerElt = VT.getScalarSizeInBits(); | |||
6453 | assert((NumBitsPerElt % 8) == 0 && (NumSizeInBits % 8) == 0 &&(((NumBitsPerElt % 8) == 0 && (NumSizeInBits % 8) == 0 && "Expected byte aligned value types") ? static_cast <void> (0) : __assert_fail ("(NumBitsPerElt % 8) == 0 && (NumSizeInBits % 8) == 0 && \"Expected byte aligned value types\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6454, __PRETTY_FUNCTION__)) | |||
6454 | "Expected byte aligned value types")(((NumBitsPerElt % 8) == 0 && (NumSizeInBits % 8) == 0 && "Expected byte aligned value types") ? static_cast <void> (0) : __assert_fail ("(NumBitsPerElt % 8) == 0 && (NumSizeInBits % 8) == 0 && \"Expected byte aligned value types\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6454, __PRETTY_FUNCTION__)); | |||
6455 | ||||
6456 | unsigned Opcode = N.getOpcode(); | |||
6457 | switch (Opcode) { | |||
6458 | case ISD::VECTOR_SHUFFLE: { | |||
6459 | // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here. | |||
6460 | ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask(); | |||
6461 | if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) { | |||
6462 | Mask.append(ShuffleMask.begin(), ShuffleMask.end()); | |||
6463 | Ops.push_back(N.getOperand(0)); | |||
6464 | Ops.push_back(N.getOperand(1)); | |||
6465 | return true; | |||
6466 | } | |||
6467 | return false; | |||
6468 | } | |||
6469 | case ISD::AND: | |||
6470 | case X86ISD::ANDNP: { | |||
6471 | // Attempt to decode as a per-byte mask. | |||
6472 | APInt UndefElts; | |||
6473 | SmallVector<APInt, 32> EltBits; | |||
6474 | SDValue N0 = N.getOperand(0); | |||
6475 | SDValue N1 = N.getOperand(1); | |||
6476 | bool IsAndN = (X86ISD::ANDNP == Opcode); | |||
6477 | uint64_t ZeroMask = IsAndN ? 255 : 0; | |||
6478 | if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits)) | |||
6479 | return false; | |||
6480 | for (int i = 0, e = (int)EltBits.size(); i != e; ++i) { | |||
6481 | if (UndefElts[i]) { | |||
6482 | Mask.push_back(SM_SentinelUndef); | |||
6483 | continue; | |||
6484 | } | |||
6485 | uint64_t ByteBits = EltBits[i].getZExtValue(); | |||
6486 | if (ByteBits != 0 && ByteBits != 255) | |||
6487 | return false; | |||
6488 | Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i); | |||
6489 | } | |||
6490 | Ops.push_back(IsAndN ? N1 : N0); | |||
6491 | return true; | |||
6492 | } | |||
6493 | case ISD::OR: { | |||
6494 | // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other | |||
6495 | // is a valid shuffle index. | |||
6496 | SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0)); | |||
6497 | SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1)); | |||
6498 | if (!N0.getValueType().isVector() || !N1.getValueType().isVector()) | |||
6499 | return false; | |||
6500 | SmallVector<int, 64> SrcMask0, SrcMask1; | |||
6501 | SmallVector<SDValue, 2> SrcInputs0, SrcInputs1; | |||
6502 | if (!resolveTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG) || | |||
6503 | !resolveTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG)) | |||
6504 | return false; | |||
6505 | int MaskSize = std::max(SrcMask0.size(), SrcMask1.size()); | |||
6506 | SmallVector<int, 64> Mask0, Mask1; | |||
6507 | scaleShuffleMask<int>(MaskSize / SrcMask0.size(), SrcMask0, Mask0); | |||
6508 | scaleShuffleMask<int>(MaskSize / SrcMask1.size(), SrcMask1, Mask1); | |||
6509 | for (int i = 0; i != MaskSize; ++i) { | |||
6510 | if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef) | |||
6511 | Mask.push_back(SM_SentinelUndef); | |||
6512 | else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero) | |||
6513 | Mask.push_back(SM_SentinelZero); | |||
6514 | else if (Mask1[i] == SM_SentinelZero) | |||
6515 | Mask.push_back(Mask0[i]); | |||
6516 | else if (Mask0[i] == SM_SentinelZero) | |||
6517 | Mask.push_back(Mask1[i] + (MaskSize * SrcInputs0.size())); | |||
6518 | else | |||
6519 | return false; | |||
6520 | } | |||
6521 | for (SDValue &Op : SrcInputs0) | |||
6522 | Ops.push_back(Op); | |||
6523 | for (SDValue &Op : SrcInputs1) | |||
6524 | Ops.push_back(Op); | |||
6525 | return true; | |||
6526 | } | |||
6527 | case ISD::INSERT_SUBVECTOR: { | |||
6528 | // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(EXTRACT_SUBVECTOR(SRC1)) where | |||
6529 | // SRC0/SRC1 are both of the same valuetype VT. | |||
6530 | // TODO - add peekThroughOneUseBitcasts support. | |||
6531 | SDValue Src = N.getOperand(0); | |||
6532 | SDValue Sub = N.getOperand(1); | |||
6533 | EVT SubVT = Sub.getValueType(); | |||
6534 | unsigned NumSubElts = SubVT.getVectorNumElements(); | |||
6535 | if (!isa<ConstantSDNode>(N.getOperand(2)) || | |||
6536 | !N->isOnlyUserOf(Sub.getNode())) | |||
6537 | return false; | |||
6538 | SmallVector<int, 64> SubMask; | |||
6539 | SmallVector<SDValue, 2> SubInputs; | |||
6540 | if (!resolveTargetShuffleInputs(Sub, SubInputs, SubMask, DAG) || | |||
6541 | SubMask.size() != NumSubElts) | |||
6542 | return false; | |||
6543 | Ops.push_back(Src); | |||
6544 | for (SDValue &SubInput : SubInputs) { | |||
6545 | if (SubInput.getOpcode() != ISD::EXTRACT_SUBVECTOR || | |||
6546 | SubInput.getOperand(0).getValueType() != VT || | |||
6547 | !isa<ConstantSDNode>(SubInput.getOperand(1))) | |||
6548 | return false; | |||
6549 | Ops.push_back(SubInput.getOperand(0)); | |||
6550 | } | |||
6551 | int InsertIdx = N.getConstantOperandVal(2); | |||
6552 | for (int i = 0; i != (int)NumElts; ++i) | |||
6553 | Mask.push_back(i); | |||
6554 | for (int i = 0; i != (int)NumSubElts; ++i) { | |||
6555 | int M = SubMask[i]; | |||
6556 | if (0 <= M) { | |||
6557 | int InputIdx = M / NumSubElts; | |||
6558 | int ExtractIdx = SubInputs[InputIdx].getConstantOperandVal(1); | |||
6559 | M = (NumElts * (1 + InputIdx)) + ExtractIdx + (M % NumSubElts); | |||
6560 | } | |||
6561 | Mask[i + InsertIdx] = M; | |||
6562 | } | |||
6563 | return true; | |||
6564 | } | |||
6565 | case ISD::SCALAR_TO_VECTOR: { | |||
6566 | // Match against a scalar_to_vector of an extract from a vector, | |||
6567 | // for PEXTRW/PEXTRB we must handle the implicit zext of the scalar. | |||
6568 | SDValue N0 = N.getOperand(0); | |||
6569 | SDValue SrcExtract; | |||
6570 | ||||
6571 | if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
6572 | N0.getOperand(0).getValueType() == VT) || | |||
6573 | (N0.getOpcode() == X86ISD::PEXTRW && | |||
6574 | N0.getOperand(0).getValueType() == MVT::v8i16) || | |||
6575 | (N0.getOpcode() == X86ISD::PEXTRB && | |||
6576 | N0.getOperand(0).getValueType() == MVT::v16i8)) { | |||
6577 | SrcExtract = N0; | |||
6578 | } | |||
6579 | ||||
6580 | if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1))) | |||
6581 | return false; | |||
6582 | ||||
6583 | SDValue SrcVec = SrcExtract.getOperand(0); | |||
6584 | EVT SrcVT = SrcVec.getValueType(); | |||
6585 | unsigned NumSrcElts = SrcVT.getVectorNumElements(); | |||
6586 | unsigned NumZeros = (NumBitsPerElt / SrcVT.getScalarSizeInBits()) - 1; | |||
6587 | ||||
6588 | unsigned SrcIdx = SrcExtract.getConstantOperandVal(1); | |||
6589 | if (NumSrcElts <= SrcIdx) | |||
6590 | return false; | |||
6591 | ||||
6592 | Ops.push_back(SrcVec); | |||
6593 | Mask.push_back(SrcIdx); | |||
6594 | Mask.append(NumZeros, SM_SentinelZero); | |||
6595 | Mask.append(NumSrcElts - Mask.size(), SM_SentinelUndef); | |||
6596 | return true; | |||
6597 | } | |||
6598 | case X86ISD::PINSRB: | |||
6599 | case X86ISD::PINSRW: { | |||
6600 | SDValue InVec = N.getOperand(0); | |||
6601 | SDValue InScl = N.getOperand(1); | |||
6602 | SDValue InIndex = N.getOperand(2); | |||
6603 | if (!isa<ConstantSDNode>(InIndex) || | |||
6604 | cast<ConstantSDNode>(InIndex)->getAPIntValue().uge(NumElts)) | |||
6605 | return false; | |||
6606 | uint64_t InIdx = N.getConstantOperandVal(2); | |||
6607 | ||||
6608 | // Attempt to recognise a PINSR*(VEC, 0, Idx) shuffle pattern. | |||
6609 | if (X86::isZeroNode(InScl)) { | |||
6610 | Ops.push_back(InVec); | |||
6611 | for (unsigned i = 0; i != NumElts; ++i) | |||
6612 | Mask.push_back(i == InIdx ? SM_SentinelZero : (int)i); | |||
6613 | return true; | |||
6614 | } | |||
6615 | ||||
6616 | // Attempt to recognise a PINSR*(PEXTR*) shuffle pattern. | |||
6617 | // TODO: Expand this to support INSERT_VECTOR_ELT/etc. | |||
6618 | unsigned ExOp = | |||
6619 | (X86ISD::PINSRB == Opcode ? X86ISD::PEXTRB : X86ISD::PEXTRW); | |||
6620 | if (InScl.getOpcode() != ExOp) | |||
6621 | return false; | |||
6622 | ||||
6623 | SDValue ExVec = InScl.getOperand(0); | |||
6624 | SDValue ExIndex = InScl.getOperand(1); | |||
6625 | if (!isa<ConstantSDNode>(ExIndex) || | |||
6626 | cast<ConstantSDNode>(ExIndex)->getAPIntValue().uge(NumElts)) | |||
6627 | return false; | |||
6628 | uint64_t ExIdx = InScl.getConstantOperandVal(1); | |||
6629 | ||||
6630 | Ops.push_back(InVec); | |||
6631 | Ops.push_back(ExVec); | |||
6632 | for (unsigned i = 0; i != NumElts; ++i) | |||
6633 | Mask.push_back(i == InIdx ? NumElts + ExIdx : i); | |||
6634 | return true; | |||
6635 | } | |||
6636 | case X86ISD::PACKSS: | |||
6637 | case X86ISD::PACKUS: { | |||
6638 | SDValue N0 = N.getOperand(0); | |||
6639 | SDValue N1 = N.getOperand(1); | |||
6640 | assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&((N0.getValueType().getVectorNumElements() == (NumElts / 2) && N1.getValueType().getVectorNumElements() == (NumElts / 2) && "Unexpected input value type") ? static_cast<void> (0) : __assert_fail ("N0.getValueType().getVectorNumElements() == (NumElts / 2) && N1.getValueType().getVectorNumElements() == (NumElts / 2) && \"Unexpected input value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6642, __PRETTY_FUNCTION__)) | |||
6641 | N1.getValueType().getVectorNumElements() == (NumElts / 2) &&((N0.getValueType().getVectorNumElements() == (NumElts / 2) && N1.getValueType().getVectorNumElements() == (NumElts / 2) && "Unexpected input value type") ? static_cast<void> (0) : __assert_fail ("N0.getValueType().getVectorNumElements() == (NumElts / 2) && N1.getValueType().getVectorNumElements() == (NumElts / 2) && \"Unexpected input value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6642, __PRETTY_FUNCTION__)) | |||
6642 | "Unexpected input value type")((N0.getValueType().getVectorNumElements() == (NumElts / 2) && N1.getValueType().getVectorNumElements() == (NumElts / 2) && "Unexpected input value type") ? static_cast<void> (0) : __assert_fail ("N0.getValueType().getVectorNumElements() == (NumElts / 2) && N1.getValueType().getVectorNumElements() == (NumElts / 2) && \"Unexpected input value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6642, __PRETTY_FUNCTION__)); | |||
6643 | ||||
6644 | // If we know input saturation won't happen we can treat this | |||
6645 | // as a truncation shuffle. | |||
6646 | if (Opcode == X86ISD::PACKSS) { | |||
6647 | if ((!N0.isUndef() && DAG.ComputeNumSignBits(N0) <= NumBitsPerElt) || | |||
6648 | (!N1.isUndef() && DAG.ComputeNumSignBits(N1) <= NumBitsPerElt)) | |||
6649 | return false; | |||
6650 | } else { | |||
6651 | APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt); | |||
6652 | if ((!N0.isUndef() && !DAG.MaskedValueIsZero(N0, ZeroMask)) || | |||
6653 | (!N1.isUndef() && !DAG.MaskedValueIsZero(N1, ZeroMask))) | |||
6654 | return false; | |||
6655 | } | |||
6656 | ||||
6657 | bool IsUnary = (N0 == N1); | |||
6658 | ||||
6659 | Ops.push_back(N0); | |||
6660 | if (!IsUnary) | |||
6661 | Ops.push_back(N1); | |||
6662 | ||||
6663 | createPackShuffleMask(VT, Mask, IsUnary); | |||
6664 | return true; | |||
6665 | } | |||
6666 | case X86ISD::VSHLI: | |||
6667 | case X86ISD::VSRLI: { | |||
6668 | uint64_t ShiftVal = N.getConstantOperandVal(1); | |||
6669 | // Out of range bit shifts are guaranteed to be zero. | |||
6670 | if (NumBitsPerElt <= ShiftVal) { | |||
6671 | Mask.append(NumElts, SM_SentinelZero); | |||
6672 | return true; | |||
6673 | } | |||
6674 | ||||
6675 | // We can only decode 'whole byte' bit shifts as shuffles. | |||
6676 | if ((ShiftVal % 8) != 0) | |||
6677 | break; | |||
6678 | ||||
6679 | uint64_t ByteShift = ShiftVal / 8; | |||
6680 | unsigned NumBytes = NumSizeInBits / 8; | |||
6681 | unsigned NumBytesPerElt = NumBitsPerElt / 8; | |||
6682 | Ops.push_back(N.getOperand(0)); | |||
6683 | ||||
6684 | // Clear mask to all zeros and insert the shifted byte indices. | |||
6685 | Mask.append(NumBytes, SM_SentinelZero); | |||
6686 | ||||
6687 | if (X86ISD::VSHLI == Opcode) { | |||
6688 | for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt) | |||
6689 | for (unsigned j = ByteShift; j != NumBytesPerElt; ++j) | |||
6690 | Mask[i + j] = i + j - ByteShift; | |||
6691 | } else { | |||
6692 | for (unsigned i = 0; i != NumBytes; i += NumBytesPerElt) | |||
6693 | for (unsigned j = ByteShift; j != NumBytesPerElt; ++j) | |||
6694 | Mask[i + j - ByteShift] = i + j; | |||
6695 | } | |||
6696 | return true; | |||
6697 | } | |||
6698 | case ISD::ZERO_EXTEND_VECTOR_INREG: | |||
6699 | case ISD::ZERO_EXTEND: { | |||
6700 | // TODO - add support for VPMOVZX with smaller input vector types. | |||
6701 | SDValue Src = N.getOperand(0); | |||
6702 | MVT SrcVT = Src.getSimpleValueType(); | |||
6703 | if (NumSizeInBits != SrcVT.getSizeInBits()) | |||
6704 | break; | |||
6705 | DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts, | |||
6706 | Mask); | |||
6707 | Ops.push_back(Src); | |||
6708 | return true; | |||
6709 | } | |||
6710 | } | |||
6711 | ||||
6712 | return false; | |||
6713 | } | |||
6714 | ||||
6715 | /// Removes unused shuffle source inputs and adjusts the shuffle mask accordingly. | |||
6716 | static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs, | |||
6717 | SmallVectorImpl<int> &Mask) { | |||
6718 | int MaskWidth = Mask.size(); | |||
6719 | SmallVector<SDValue, 16> UsedInputs; | |||
6720 | for (int i = 0, e = Inputs.size(); i < e; ++i) { | |||
6721 | int lo = UsedInputs.size() * MaskWidth; | |||
6722 | int hi = lo + MaskWidth; | |||
6723 | ||||
6724 | // Strip UNDEF input usage. | |||
6725 | if (Inputs[i].isUndef()) | |||
6726 | for (int &M : Mask) | |||
6727 | if ((lo <= M) && (M < hi)) | |||
6728 | M = SM_SentinelUndef; | |||
6729 | ||||
6730 | // Check for unused inputs. | |||
6731 | if (any_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) { | |||
6732 | UsedInputs.push_back(Inputs[i]); | |||
6733 | continue; | |||
6734 | } | |||
6735 | for (int &M : Mask) | |||
6736 | if (lo <= M) | |||
6737 | M -= MaskWidth; | |||
6738 | } | |||
6739 | Inputs = UsedInputs; | |||
6740 | } | |||
6741 | ||||
6742 | /// Calls setTargetShuffleZeroElements to resolve a target shuffle mask's inputs | |||
6743 | /// and set the SM_SentinelUndef and SM_SentinelZero values. Then check the | |||
6744 | /// remaining input indices in case we now have a unary shuffle and adjust the | |||
6745 | /// inputs accordingly. | |||
6746 | /// Returns true if the target shuffle mask was decoded. | |||
6747 | static bool resolveTargetShuffleInputs(SDValue Op, | |||
6748 | SmallVectorImpl<SDValue> &Inputs, | |||
6749 | SmallVectorImpl<int> &Mask, | |||
6750 | const SelectionDAG &DAG) { | |||
6751 | if (!setTargetShuffleZeroElements(Op, Mask, Inputs)) | |||
6752 | if (!getFauxShuffleMask(Op, Mask, Inputs, DAG)) | |||
6753 | return false; | |||
6754 | ||||
6755 | resolveTargetShuffleInputsAndMask(Inputs, Mask); | |||
6756 | return true; | |||
6757 | } | |||
6758 | ||||
6759 | /// Returns the scalar element that will make up the ith | |||
6760 | /// element of the result of the vector shuffle. | |||
6761 | static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG, | |||
6762 | unsigned Depth) { | |||
6763 | if (Depth == 6) | |||
6764 | return SDValue(); // Limit search depth. | |||
6765 | ||||
6766 | SDValue V = SDValue(N, 0); | |||
6767 | EVT VT = V.getValueType(); | |||
6768 | unsigned Opcode = V.getOpcode(); | |||
6769 | ||||
6770 | // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. | |||
6771 | if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { | |||
6772 | int Elt = SV->getMaskElt(Index); | |||
6773 | ||||
6774 | if (Elt < 0) | |||
6775 | return DAG.getUNDEF(VT.getVectorElementType()); | |||
6776 | ||||
6777 | unsigned NumElems = VT.getVectorNumElements(); | |||
6778 | SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0) | |||
6779 | : SV->getOperand(1); | |||
6780 | return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1); | |||
6781 | } | |||
6782 | ||||
6783 | // Recurse into target specific vector shuffles to find scalars. | |||
6784 | if (isTargetShuffle(Opcode)) { | |||
6785 | MVT ShufVT = V.getSimpleValueType(); | |||
6786 | MVT ShufSVT = ShufVT.getVectorElementType(); | |||
6787 | int NumElems = (int)ShufVT.getVectorNumElements(); | |||
6788 | SmallVector<int, 16> ShuffleMask; | |||
6789 | SmallVector<SDValue, 16> ShuffleOps; | |||
6790 | bool IsUnary; | |||
6791 | ||||
6792 | if (!getTargetShuffleMask(N, ShufVT, true, ShuffleOps, ShuffleMask, IsUnary)) | |||
6793 | return SDValue(); | |||
6794 | ||||
6795 | int Elt = ShuffleMask[Index]; | |||
6796 | if (Elt == SM_SentinelZero) | |||
6797 | return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(N), ShufSVT) | |||
6798 | : DAG.getConstantFP(+0.0, SDLoc(N), ShufSVT); | |||
6799 | if (Elt == SM_SentinelUndef) | |||
6800 | return DAG.getUNDEF(ShufSVT); | |||
6801 | ||||
6802 | assert(0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range")((0 <= Elt && Elt < (2*NumElems) && "Shuffle index out of range" ) ? static_cast<void> (0) : __assert_fail ("0 <= Elt && Elt < (2*NumElems) && \"Shuffle index out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6802, __PRETTY_FUNCTION__)); | |||
6803 | SDValue NewV = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1]; | |||
6804 | return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, | |||
6805 | Depth+1); | |||
6806 | } | |||
6807 | ||||
6808 | // Actual nodes that may contain scalar elements | |||
6809 | if (Opcode == ISD::BITCAST) { | |||
6810 | V = V.getOperand(0); | |||
6811 | EVT SrcVT = V.getValueType(); | |||
6812 | unsigned NumElems = VT.getVectorNumElements(); | |||
6813 | ||||
6814 | if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) | |||
6815 | return SDValue(); | |||
6816 | } | |||
6817 | ||||
6818 | if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) | |||
6819 | return (Index == 0) ? V.getOperand(0) | |||
6820 | : DAG.getUNDEF(VT.getVectorElementType()); | |||
6821 | ||||
6822 | if (V.getOpcode() == ISD::BUILD_VECTOR) | |||
6823 | return V.getOperand(Index); | |||
6824 | ||||
6825 | return SDValue(); | |||
6826 | } | |||
6827 | ||||
6828 | // Use PINSRB/PINSRW/PINSRD to create a build vector. | |||
6829 | static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros, | |||
6830 | unsigned NumNonZero, unsigned NumZero, | |||
6831 | SelectionDAG &DAG, | |||
6832 | const X86Subtarget &Subtarget) { | |||
6833 | MVT VT = Op.getSimpleValueType(); | |||
6834 | unsigned NumElts = VT.getVectorNumElements(); | |||
6835 | assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||((((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41 ())) && "Illegal vector insertion") ? static_cast< void> (0) : __assert_fail ("((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) && \"Illegal vector insertion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6837, __PRETTY_FUNCTION__)) | |||
6836 | ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&((((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41 ())) && "Illegal vector insertion") ? static_cast< void> (0) : __assert_fail ("((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) && \"Illegal vector insertion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6837, __PRETTY_FUNCTION__)) | |||
6837 | "Illegal vector insertion")((((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41 ())) && "Illegal vector insertion") ? static_cast< void> (0) : __assert_fail ("((VT == MVT::v8i16 && Subtarget.hasSSE2()) || ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) && \"Illegal vector insertion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6837, __PRETTY_FUNCTION__)); | |||
6838 | ||||
6839 | SDLoc dl(Op); | |||
6840 | SDValue V; | |||
6841 | bool First = true; | |||
6842 | ||||
6843 | for (unsigned i = 0; i < NumElts; ++i) { | |||
6844 | bool IsNonZero = (NonZeros & (1 << i)) != 0; | |||
6845 | if (!IsNonZero) | |||
6846 | continue; | |||
6847 | ||||
6848 | // If the build vector contains zeros or our first insertion is not the | |||
6849 | // first index then insert into zero vector to break any register | |||
6850 | // dependency else use SCALAR_TO_VECTOR/VZEXT_MOVL. | |||
6851 | if (First) { | |||
6852 | First = false; | |||
6853 | if (NumZero || 0 != i) | |||
6854 | V = getZeroVector(VT, Subtarget, DAG, dl); | |||
6855 | else { | |||
6856 | assert(0 == i && "Expected insertion into zero-index")((0 == i && "Expected insertion into zero-index") ? static_cast <void> (0) : __assert_fail ("0 == i && \"Expected insertion into zero-index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6856, __PRETTY_FUNCTION__)); | |||
6857 | V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32); | |||
6858 | V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V); | |||
6859 | V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V); | |||
6860 | V = DAG.getBitcast(VT, V); | |||
6861 | continue; | |||
6862 | } | |||
6863 | } | |||
6864 | V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i), | |||
6865 | DAG.getIntPtrConstant(i, dl)); | |||
6866 | } | |||
6867 | ||||
6868 | return V; | |||
6869 | } | |||
6870 | ||||
6871 | /// Custom lower build_vector of v16i8. | |||
6872 | static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros, | |||
6873 | unsigned NumNonZero, unsigned NumZero, | |||
6874 | SelectionDAG &DAG, | |||
6875 | const X86Subtarget &Subtarget) { | |||
6876 | if (NumNonZero > 8 && !Subtarget.hasSSE41()) | |||
6877 | return SDValue(); | |||
6878 | ||||
6879 | // SSE4.1 - use PINSRB to insert each byte directly. | |||
6880 | if (Subtarget.hasSSE41()) | |||
6881 | return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG, | |||
6882 | Subtarget); | |||
6883 | ||||
6884 | SDLoc dl(Op); | |||
6885 | SDValue V; | |||
6886 | bool First = true; | |||
6887 | ||||
6888 | // Pre-SSE4.1 - merge byte pairs and insert with PINSRW. | |||
6889 | for (unsigned i = 0; i < 16; ++i) { | |||
6890 | bool ThisIsNonZero = (NonZeros & (1 << i)) != 0; | |||
6891 | if (ThisIsNonZero && First) { | |||
6892 | if (NumZero) | |||
6893 | V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl); | |||
6894 | else | |||
6895 | V = DAG.getUNDEF(MVT::v8i16); | |||
6896 | First = false; | |||
6897 | } | |||
6898 | ||||
6899 | if ((i & 1) != 0) { | |||
6900 | // FIXME: Investigate extending to i32 instead of just i16. | |||
6901 | // FIXME: Investigate combining the first 4 bytes as a i32 instead. | |||
6902 | SDValue ThisElt, LastElt; | |||
6903 | bool LastIsNonZero = (NonZeros & (1 << (i - 1))) != 0; | |||
6904 | if (LastIsNonZero) { | |||
6905 | LastElt = | |||
6906 | DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i - 1)); | |||
6907 | } | |||
6908 | if (ThisIsNonZero) { | |||
6909 | ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i)); | |||
6910 | ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16, ThisElt, | |||
6911 | DAG.getConstant(8, dl, MVT::i8)); | |||
6912 | if (LastIsNonZero) | |||
6913 | ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt); | |||
6914 | } else | |||
6915 | ThisElt = LastElt; | |||
6916 | ||||
6917 | if (ThisElt) { | |||
6918 | if (1 == i) { | |||
6919 | V = NumZero ? DAG.getZExtOrTrunc(ThisElt, dl, MVT::i32) | |||
6920 | : DAG.getAnyExtOrTrunc(ThisElt, dl, MVT::i32); | |||
6921 | V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V); | |||
6922 | V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V); | |||
6923 | V = DAG.getBitcast(MVT::v8i16, V); | |||
6924 | } else { | |||
6925 | V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt, | |||
6926 | DAG.getIntPtrConstant(i / 2, dl)); | |||
6927 | } | |||
6928 | } | |||
6929 | } | |||
6930 | } | |||
6931 | ||||
6932 | return DAG.getBitcast(MVT::v16i8, V); | |||
6933 | } | |||
6934 | ||||
6935 | /// Custom lower build_vector of v8i16. | |||
6936 | static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros, | |||
6937 | unsigned NumNonZero, unsigned NumZero, | |||
6938 | SelectionDAG &DAG, | |||
6939 | const X86Subtarget &Subtarget) { | |||
6940 | if (NumNonZero > 4 && !Subtarget.hasSSE41()) | |||
6941 | return SDValue(); | |||
6942 | ||||
6943 | // Use PINSRW to insert each byte directly. | |||
6944 | return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG, | |||
6945 | Subtarget); | |||
6946 | } | |||
6947 | ||||
6948 | /// Custom lower build_vector of v4i32 or v4f32. | |||
6949 | static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG, | |||
6950 | const X86Subtarget &Subtarget) { | |||
6951 | // If this is a splat of a pair of elements, use MOVDDUP (unless the target | |||
6952 | // has XOP; in that case defer lowering to potentially use VPERMIL2PS). | |||
6953 | // Because we're creating a less complicated build vector here, we may enable | |||
6954 | // further folding of the MOVDDUP via shuffle transforms. | |||
6955 | if (Subtarget.hasSSE3() && !Subtarget.hasXOP() && | |||
6956 | Op.getOperand(0) == Op.getOperand(2) && | |||
6957 | Op.getOperand(1) == Op.getOperand(3) && | |||
6958 | Op.getOperand(0) != Op.getOperand(1)) { | |||
6959 | SDLoc DL(Op); | |||
6960 | MVT VT = Op.getSimpleValueType(); | |||
6961 | MVT EltVT = VT.getVectorElementType(); | |||
6962 | // Create a new build vector with the first 2 elements followed by undef | |||
6963 | // padding, bitcast to v2f64, duplicate, and bitcast back. | |||
6964 | SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1), | |||
6965 | DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) }; | |||
6966 | SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops)); | |||
6967 | SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV); | |||
6968 | return DAG.getBitcast(VT, Dup); | |||
6969 | } | |||
6970 | ||||
6971 | // Find all zeroable elements. | |||
6972 | std::bitset<4> Zeroable; | |||
6973 | for (int i=0; i < 4; ++i) { | |||
6974 | SDValue Elt = Op->getOperand(i); | |||
6975 | Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt)); | |||
6976 | } | |||
6977 | assert(Zeroable.size() - Zeroable.count() > 1 &&((Zeroable.size() - Zeroable.count() > 1 && "We expect at least two non-zero elements!" ) ? static_cast<void> (0) : __assert_fail ("Zeroable.size() - Zeroable.count() > 1 && \"We expect at least two non-zero elements!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6978, __PRETTY_FUNCTION__)) | |||
6978 | "We expect at least two non-zero elements!")((Zeroable.size() - Zeroable.count() > 1 && "We expect at least two non-zero elements!" ) ? static_cast<void> (0) : __assert_fail ("Zeroable.size() - Zeroable.count() > 1 && \"We expect at least two non-zero elements!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 6978, __PRETTY_FUNCTION__)); | |||
6979 | ||||
6980 | // We only know how to deal with build_vector nodes where elements are either | |||
6981 | // zeroable or extract_vector_elt with constant index. | |||
6982 | SDValue FirstNonZero; | |||
6983 | unsigned FirstNonZeroIdx; | |||
6984 | for (unsigned i=0; i < 4; ++i) { | |||
6985 | if (Zeroable[i]) | |||
6986 | continue; | |||
6987 | SDValue Elt = Op->getOperand(i); | |||
6988 | if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
6989 | !isa<ConstantSDNode>(Elt.getOperand(1))) | |||
6990 | return SDValue(); | |||
6991 | // Make sure that this node is extracting from a 128-bit vector. | |||
6992 | MVT VT = Elt.getOperand(0).getSimpleValueType(); | |||
6993 | if (!VT.is128BitVector()) | |||
6994 | return SDValue(); | |||
6995 | if (!FirstNonZero.getNode()) { | |||
6996 | FirstNonZero = Elt; | |||
6997 | FirstNonZeroIdx = i; | |||
6998 | } | |||
6999 | } | |||
7000 | ||||
7001 | assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!")((FirstNonZero.getNode() && "Unexpected build vector of all zeros!" ) ? static_cast<void> (0) : __assert_fail ("FirstNonZero.getNode() && \"Unexpected build vector of all zeros!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7001, __PRETTY_FUNCTION__)); | |||
7002 | SDValue V1 = FirstNonZero.getOperand(0); | |||
7003 | MVT VT = V1.getSimpleValueType(); | |||
7004 | ||||
7005 | // See if this build_vector can be lowered as a blend with zero. | |||
7006 | SDValue Elt; | |||
7007 | unsigned EltMaskIdx, EltIdx; | |||
7008 | int Mask[4]; | |||
7009 | for (EltIdx = 0; EltIdx < 4; ++EltIdx) { | |||
7010 | if (Zeroable[EltIdx]) { | |||
7011 | // The zero vector will be on the right hand side. | |||
7012 | Mask[EltIdx] = EltIdx+4; | |||
7013 | continue; | |||
7014 | } | |||
7015 | ||||
7016 | Elt = Op->getOperand(EltIdx); | |||
7017 | // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index. | |||
7018 | EltMaskIdx = Elt.getConstantOperandVal(1); | |||
7019 | if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx) | |||
7020 | break; | |||
7021 | Mask[EltIdx] = EltIdx; | |||
7022 | } | |||
7023 | ||||
7024 | if (EltIdx == 4) { | |||
7025 | // Let the shuffle legalizer deal with blend operations. | |||
7026 | SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op)); | |||
7027 | if (V1.getSimpleValueType() != VT) | |||
7028 | V1 = DAG.getBitcast(VT, V1); | |||
7029 | return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, Mask); | |||
7030 | } | |||
7031 | ||||
7032 | // See if we can lower this build_vector to a INSERTPS. | |||
7033 | if (!Subtarget.hasSSE41()) | |||
7034 | return SDValue(); | |||
7035 | ||||
7036 | SDValue V2 = Elt.getOperand(0); | |||
7037 | if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx) | |||
7038 | V1 = SDValue(); | |||
7039 | ||||
7040 | bool CanFold = true; | |||
7041 | for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) { | |||
7042 | if (Zeroable[i]) | |||
7043 | continue; | |||
7044 | ||||
7045 | SDValue Current = Op->getOperand(i); | |||
7046 | SDValue SrcVector = Current->getOperand(0); | |||
7047 | if (!V1.getNode()) | |||
7048 | V1 = SrcVector; | |||
7049 | CanFold = (SrcVector == V1) && (Current.getConstantOperandVal(1) == i); | |||
7050 | } | |||
7051 | ||||
7052 | if (!CanFold) | |||
7053 | return SDValue(); | |||
7054 | ||||
7055 | assert(V1.getNode() && "Expected at least two non-zero elements!")((V1.getNode() && "Expected at least two non-zero elements!" ) ? static_cast<void> (0) : __assert_fail ("V1.getNode() && \"Expected at least two non-zero elements!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7055, __PRETTY_FUNCTION__)); | |||
7056 | if (V1.getSimpleValueType() != MVT::v4f32) | |||
7057 | V1 = DAG.getBitcast(MVT::v4f32, V1); | |||
7058 | if (V2.getSimpleValueType() != MVT::v4f32) | |||
7059 | V2 = DAG.getBitcast(MVT::v4f32, V2); | |||
7060 | ||||
7061 | // Ok, we can emit an INSERTPS instruction. | |||
7062 | unsigned ZMask = Zeroable.to_ulong(); | |||
7063 | ||||
7064 | unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask; | |||
7065 | assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!")(((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!" ) ? static_cast<void> (0) : __assert_fail ("(InsertPSMask & ~0xFFu) == 0 && \"Invalid mask!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7065, __PRETTY_FUNCTION__)); | |||
7066 | SDLoc DL(Op); | |||
7067 | SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2, | |||
7068 | DAG.getIntPtrConstant(InsertPSMask, DL)); | |||
7069 | return DAG.getBitcast(VT, Result); | |||
7070 | } | |||
7071 | ||||
7072 | /// Return a vector logical shift node. | |||
7073 | static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits, | |||
7074 | SelectionDAG &DAG, const TargetLowering &TLI, | |||
7075 | const SDLoc &dl) { | |||
7076 | assert(VT.is128BitVector() && "Unknown type for VShift")((VT.is128BitVector() && "Unknown type for VShift") ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Unknown type for VShift\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7076, __PRETTY_FUNCTION__)); | |||
7077 | MVT ShVT = MVT::v16i8; | |||
7078 | unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ; | |||
7079 | SrcOp = DAG.getBitcast(ShVT, SrcOp); | |||
7080 | assert(NumBits % 8 == 0 && "Only support byte sized shifts")((NumBits % 8 == 0 && "Only support byte sized shifts" ) ? static_cast<void> (0) : __assert_fail ("NumBits % 8 == 0 && \"Only support byte sized shifts\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7080, __PRETTY_FUNCTION__)); | |||
7081 | SDValue ShiftVal = DAG.getConstant(NumBits/8, dl, MVT::i8); | |||
7082 | return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal)); | |||
7083 | } | |||
7084 | ||||
7085 | static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl, | |||
7086 | SelectionDAG &DAG) { | |||
7087 | ||||
7088 | // Check if the scalar load can be widened into a vector load. And if | |||
7089 | // the address is "base + cst" see if the cst can be "absorbed" into | |||
7090 | // the shuffle mask. | |||
7091 | if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) { | |||
7092 | SDValue Ptr = LD->getBasePtr(); | |||
7093 | if (!ISD::isNormalLoad(LD) || LD->isVolatile()) | |||
7094 | return SDValue(); | |||
7095 | EVT PVT = LD->getValueType(0); | |||
7096 | if (PVT != MVT::i32 && PVT != MVT::f32) | |||
7097 | return SDValue(); | |||
7098 | ||||
7099 | int FI = -1; | |||
7100 | int64_t Offset = 0; | |||
7101 | if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) { | |||
7102 | FI = FINode->getIndex(); | |||
7103 | Offset = 0; | |||
7104 | } else if (DAG.isBaseWithConstantOffset(Ptr) && | |||
7105 | isa<FrameIndexSDNode>(Ptr.getOperand(0))) { | |||
7106 | FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); | |||
7107 | Offset = Ptr.getConstantOperandVal(1); | |||
7108 | Ptr = Ptr.getOperand(0); | |||
7109 | } else { | |||
7110 | return SDValue(); | |||
7111 | } | |||
7112 | ||||
7113 | // FIXME: 256-bit vector instructions don't require a strict alignment, | |||
7114 | // improve this code to support it better. | |||
7115 | unsigned RequiredAlign = VT.getSizeInBits()/8; | |||
7116 | SDValue Chain = LD->getChain(); | |||
7117 | // Make sure the stack object alignment is at least 16 or 32. | |||
7118 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | |||
7119 | if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) { | |||
7120 | if (MFI.isFixedObjectIndex(FI)) { | |||
7121 | // Can't change the alignment. FIXME: It's possible to compute | |||
7122 | // the exact stack offset and reference FI + adjust offset instead. | |||
7123 | // If someone *really* cares about this. That's the way to implement it. | |||
7124 | return SDValue(); | |||
7125 | } else { | |||
7126 | MFI.setObjectAlignment(FI, RequiredAlign); | |||
7127 | } | |||
7128 | } | |||
7129 | ||||
7130 | // (Offset % 16 or 32) must be multiple of 4. Then address is then | |||
7131 | // Ptr + (Offset & ~15). | |||
7132 | if (Offset < 0) | |||
7133 | return SDValue(); | |||
7134 | if ((Offset % RequiredAlign) & 3) | |||
7135 | return SDValue(); | |||
7136 | int64_t StartOffset = Offset & ~int64_t(RequiredAlign - 1); | |||
7137 | if (StartOffset) { | |||
7138 | SDLoc DL(Ptr); | |||
7139 | Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, | |||
7140 | DAG.getConstant(StartOffset, DL, Ptr.getValueType())); | |||
7141 | } | |||
7142 | ||||
7143 | int EltNo = (Offset - StartOffset) >> 2; | |||
7144 | unsigned NumElems = VT.getVectorNumElements(); | |||
7145 | ||||
7146 | EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems); | |||
7147 | SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr, | |||
7148 | LD->getPointerInfo().getWithOffset(StartOffset)); | |||
7149 | ||||
7150 | SmallVector<int, 8> Mask(NumElems, EltNo); | |||
7151 | ||||
7152 | return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask); | |||
7153 | } | |||
7154 | ||||
7155 | return SDValue(); | |||
7156 | } | |||
7157 | ||||
7158 | /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the | |||
7159 | /// elements can be replaced by a single large load which has the same value as | |||
7160 | /// a build_vector or insert_subvector whose loaded operands are 'Elts'. | |||
7161 | /// | |||
7162 | /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a | |||
7163 | static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts, | |||
7164 | const SDLoc &DL, SelectionDAG &DAG, | |||
7165 | const X86Subtarget &Subtarget, | |||
7166 | bool isAfterLegalize) { | |||
7167 | unsigned NumElems = Elts.size(); | |||
7168 | ||||
7169 | int LastLoadedElt = -1; | |||
7170 | SmallBitVector LoadMask(NumElems, false); | |||
7171 | SmallBitVector ZeroMask(NumElems, false); | |||
7172 | SmallBitVector UndefMask(NumElems, false); | |||
7173 | ||||
7174 | // For each element in the initializer, see if we've found a load, zero or an | |||
7175 | // undef. | |||
7176 | for (unsigned i = 0; i < NumElems; ++i) { | |||
7177 | SDValue Elt = peekThroughBitcasts(Elts[i]); | |||
7178 | if (!Elt.getNode()) | |||
7179 | return SDValue(); | |||
7180 | ||||
7181 | if (Elt.isUndef()) | |||
7182 | UndefMask[i] = true; | |||
7183 | else if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) | |||
7184 | ZeroMask[i] = true; | |||
7185 | else if (ISD::isNON_EXTLoad(Elt.getNode())) { | |||
7186 | LoadMask[i] = true; | |||
7187 | LastLoadedElt = i; | |||
7188 | // Each loaded element must be the correct fractional portion of the | |||
7189 | // requested vector load. | |||
7190 | if ((NumElems * Elt.getValueSizeInBits()) != VT.getSizeInBits()) | |||
7191 | return SDValue(); | |||
7192 | } else | |||
7193 | return SDValue(); | |||
7194 | } | |||
7195 | assert((ZeroMask | UndefMask | LoadMask).count() == NumElems &&(((ZeroMask | UndefMask | LoadMask).count() == NumElems && "Incomplete element masks") ? static_cast<void> (0) : __assert_fail ("(ZeroMask | UndefMask | LoadMask).count() == NumElems && \"Incomplete element masks\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7196, __PRETTY_FUNCTION__)) | |||
7196 | "Incomplete element masks")(((ZeroMask | UndefMask | LoadMask).count() == NumElems && "Incomplete element masks") ? static_cast<void> (0) : __assert_fail ("(ZeroMask | UndefMask | LoadMask).count() == NumElems && \"Incomplete element masks\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7196, __PRETTY_FUNCTION__)); | |||
7197 | ||||
7198 | // Handle Special Cases - all undef or undef/zero. | |||
7199 | if (UndefMask.count() == NumElems) | |||
7200 | return DAG.getUNDEF(VT); | |||
7201 | ||||
7202 | // FIXME: Should we return this as a BUILD_VECTOR instead? | |||
7203 | if ((ZeroMask | UndefMask).count() == NumElems) | |||
7204 | return VT.isInteger() ? DAG.getConstant(0, DL, VT) | |||
7205 | : DAG.getConstantFP(0.0, DL, VT); | |||
7206 | ||||
7207 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
7208 | int FirstLoadedElt = LoadMask.find_first(); | |||
7209 | SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]); | |||
7210 | LoadSDNode *LDBase = cast<LoadSDNode>(EltBase); | |||
7211 | EVT LDBaseVT = EltBase.getValueType(); | |||
7212 | ||||
7213 | // Consecutive loads can contain UNDEFS but not ZERO elements. | |||
7214 | // Consecutive loads with UNDEFs and ZEROs elements require a | |||
7215 | // an additional shuffle stage to clear the ZERO elements. | |||
7216 | bool IsConsecutiveLoad = true; | |||
7217 | bool IsConsecutiveLoadWithZeros = true; | |||
7218 | for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) { | |||
7219 | if (LoadMask[i]) { | |||
7220 | SDValue Elt = peekThroughBitcasts(Elts[i]); | |||
7221 | LoadSDNode *LD = cast<LoadSDNode>(Elt); | |||
7222 | if (!DAG.areNonVolatileConsecutiveLoads( | |||
7223 | LD, LDBase, Elt.getValueType().getStoreSizeInBits() / 8, | |||
7224 | i - FirstLoadedElt)) { | |||
7225 | IsConsecutiveLoad = false; | |||
7226 | IsConsecutiveLoadWithZeros = false; | |||
7227 | break; | |||
7228 | } | |||
7229 | } else if (ZeroMask[i]) { | |||
7230 | IsConsecutiveLoad = false; | |||
7231 | } | |||
7232 | } | |||
7233 | ||||
7234 | SmallVector<LoadSDNode *, 8> Loads; | |||
7235 | for (int i = FirstLoadedElt; i <= LastLoadedElt; ++i) | |||
7236 | if (LoadMask[i]) | |||
7237 | Loads.push_back(cast<LoadSDNode>(peekThroughBitcasts(Elts[i]))); | |||
7238 | ||||
7239 | auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) { | |||
7240 | auto MMOFlags = LDBase->getMemOperand()->getFlags(); | |||
7241 | assert(!(MMOFlags & MachineMemOperand::MOVolatile) &&((!(MMOFlags & MachineMemOperand::MOVolatile) && "Cannot merge volatile loads." ) ? static_cast<void> (0) : __assert_fail ("!(MMOFlags & MachineMemOperand::MOVolatile) && \"Cannot merge volatile loads.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7242, __PRETTY_FUNCTION__)) | |||
7242 | "Cannot merge volatile loads.")((!(MMOFlags & MachineMemOperand::MOVolatile) && "Cannot merge volatile loads." ) ? static_cast<void> (0) : __assert_fail ("!(MMOFlags & MachineMemOperand::MOVolatile) && \"Cannot merge volatile loads.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7242, __PRETTY_FUNCTION__)); | |||
7243 | SDValue NewLd = | |||
7244 | DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(), | |||
7245 | LDBase->getPointerInfo(), LDBase->getAlignment(), MMOFlags); | |||
7246 | for (auto *LD : Loads) | |||
7247 | DAG.makeEquivalentMemoryOrdering(LD, NewLd); | |||
7248 | return NewLd; | |||
7249 | }; | |||
7250 | ||||
7251 | // LOAD - all consecutive load/undefs (must start/end with a load). | |||
7252 | // If we have found an entire vector of loads and undefs, then return a large | |||
7253 | // load of the entire vector width starting at the base pointer. | |||
7254 | // If the vector contains zeros, then attempt to shuffle those elements. | |||
7255 | if (FirstLoadedElt == 0 && LastLoadedElt == (int)(NumElems - 1) && | |||
7256 | (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) { | |||
7257 | assert(LDBase && "Did not find base load for merging consecutive loads")((LDBase && "Did not find base load for merging consecutive loads" ) ? static_cast<void> (0) : __assert_fail ("LDBase && \"Did not find base load for merging consecutive loads\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7257, __PRETTY_FUNCTION__)); | |||
7258 | EVT EltVT = LDBase->getValueType(0); | |||
7259 | // Ensure that the input vector size for the merged loads matches the | |||
7260 | // cumulative size of the input elements. | |||
7261 | if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems) | |||
7262 | return SDValue(); | |||
7263 | ||||
7264 | if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT)) | |||
7265 | return SDValue(); | |||
7266 | ||||
7267 | // Don't create 256-bit non-temporal aligned loads without AVX2 as these | |||
7268 | // will lower to regular temporal loads and use the cache. | |||
7269 | if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 && | |||
7270 | VT.is256BitVector() && !Subtarget.hasInt256()) | |||
7271 | return SDValue(); | |||
7272 | ||||
7273 | if (IsConsecutiveLoad) | |||
7274 | return CreateLoad(VT, LDBase); | |||
7275 | ||||
7276 | // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded | |||
7277 | // vector and a zero vector to clear out the zero elements. | |||
7278 | if (!isAfterLegalize && NumElems == VT.getVectorNumElements()) { | |||
7279 | SmallVector<int, 4> ClearMask(NumElems, -1); | |||
7280 | for (unsigned i = 0; i < NumElems; ++i) { | |||
7281 | if (ZeroMask[i]) | |||
7282 | ClearMask[i] = i + NumElems; | |||
7283 | else if (LoadMask[i]) | |||
7284 | ClearMask[i] = i; | |||
7285 | } | |||
7286 | SDValue V = CreateLoad(VT, LDBase); | |||
7287 | SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT) | |||
7288 | : DAG.getConstantFP(0.0, DL, VT); | |||
7289 | return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask); | |||
7290 | } | |||
7291 | } | |||
7292 | ||||
7293 | int LoadSize = | |||
7294 | (1 + LastLoadedElt - FirstLoadedElt) * LDBaseVT.getStoreSizeInBits(); | |||
7295 | ||||
7296 | // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs. | |||
7297 | if (IsConsecutiveLoad && FirstLoadedElt == 0 && | |||
7298 | (LoadSize == 32 || LoadSize == 64) && | |||
7299 | ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) { | |||
7300 | MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSize) | |||
7301 | : MVT::getIntegerVT(LoadSize); | |||
7302 | MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSize); | |||
7303 | if (TLI.isTypeLegal(VecVT)) { | |||
7304 | SDVTList Tys = DAG.getVTList(VecVT, MVT::Other); | |||
7305 | SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() }; | |||
7306 | SDValue ResNode = | |||
7307 | DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, | |||
7308 | LDBase->getPointerInfo(), | |||
7309 | LDBase->getAlignment(), | |||
7310 | MachineMemOperand::MOLoad); | |||
7311 | for (auto *LD : Loads) | |||
7312 | DAG.makeEquivalentMemoryOrdering(LD, ResNode); | |||
7313 | return DAG.getBitcast(VT, ResNode); | |||
7314 | } | |||
7315 | } | |||
7316 | ||||
7317 | return SDValue(); | |||
7318 | } | |||
| ||||
7319 | ||||
7320 | static Constant *getConstantVector(MVT VT, const APInt &SplatValue, | |||
7321 | unsigned SplatBitSize, LLVMContext &C) { | |||
7322 | unsigned ScalarSize = VT.getScalarSizeInBits(); | |||
7323 | unsigned NumElm = SplatBitSize / ScalarSize; | |||
7324 | ||||
7325 | SmallVector<Constant *, 32> ConstantVec; | |||
7326 | for (unsigned i = 0; i < NumElm; i++) { | |||
7327 | APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i); | |||
7328 | Constant *Const; | |||
7329 | if (VT.isFloatingPoint()) { | |||
7330 | if (ScalarSize == 32) { | |||
7331 | Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val)); | |||
7332 | } else { | |||
7333 | assert(ScalarSize == 64 && "Unsupported floating point scalar size")((ScalarSize == 64 && "Unsupported floating point scalar size" ) ? static_cast<void> (0) : __assert_fail ("ScalarSize == 64 && \"Unsupported floating point scalar size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7333, __PRETTY_FUNCTION__)); | |||
7334 | Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val)); | |||
7335 | } | |||
7336 | } else | |||
7337 | Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val); | |||
7338 | ConstantVec.push_back(Const); | |||
7339 | } | |||
7340 | return ConstantVector::get(ArrayRef<Constant *>(ConstantVec)); | |||
7341 | } | |||
7342 | ||||
7343 | static bool isUseOfShuffle(SDNode *N) { | |||
7344 | for (auto *U : N->uses()) { | |||
7345 | if (isTargetShuffle(U->getOpcode())) | |||
7346 | return true; | |||
7347 | if (U->getOpcode() == ISD::BITCAST) // Ignore bitcasts | |||
7348 | return isUseOfShuffle(U); | |||
7349 | } | |||
7350 | return false; | |||
7351 | } | |||
7352 | ||||
7353 | // Check if the current node of build vector is a zero extended vector. | |||
7354 | // // If so, return the value extended. | |||
7355 | // // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a. | |||
7356 | // // NumElt - return the number of zero extended identical values. | |||
7357 | // // EltType - return the type of the value include the zero extend. | |||
7358 | static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op, | |||
7359 | unsigned &NumElt, MVT &EltType) { | |||
7360 | SDValue ExtValue = Op->getOperand(0); | |||
7361 | unsigned NumElts = Op->getNumOperands(); | |||
7362 | unsigned Delta = NumElts; | |||
7363 | ||||
7364 | for (unsigned i = 1; i < NumElts; i++) { | |||
7365 | if (Op->getOperand(i) == ExtValue) { | |||
7366 | Delta = i; | |||
7367 | break; | |||
7368 | } | |||
7369 | if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i)))) | |||
7370 | return SDValue(); | |||
7371 | } | |||
7372 | if (!isPowerOf2_32(Delta) || Delta == 1) | |||
7373 | return SDValue(); | |||
7374 | ||||
7375 | for (unsigned i = Delta; i < NumElts; i++) { | |||
7376 | if (i % Delta == 0) { | |||
7377 | if (Op->getOperand(i) != ExtValue) | |||
7378 | return SDValue(); | |||
7379 | } else if (!(isNullConstant(Op->getOperand(i)) || | |||
7380 | Op->getOperand(i).isUndef())) | |||
7381 | return SDValue(); | |||
7382 | } | |||
7383 | unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits(); | |||
7384 | unsigned ExtVTSize = EltSize * Delta; | |||
7385 | EltType = MVT::getIntegerVT(ExtVTSize); | |||
7386 | NumElt = NumElts / Delta; | |||
7387 | return ExtValue; | |||
7388 | } | |||
7389 | ||||
7390 | /// Attempt to use the vbroadcast instruction to generate a splat value | |||
7391 | /// from a splat BUILD_VECTOR which uses: | |||
7392 | /// a. A single scalar load, or a constant. | |||
7393 | /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>). | |||
7394 | /// | |||
7395 | /// The VBROADCAST node is returned when a pattern is found, | |||
7396 | /// or SDValue() otherwise. | |||
7397 | static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp, | |||
7398 | const X86Subtarget &Subtarget, | |||
7399 | SelectionDAG &DAG) { | |||
7400 | // VBROADCAST requires AVX. | |||
7401 | // TODO: Splats could be generated for non-AVX CPUs using SSE | |||
7402 | // instructions, but there's less potential gain for only 128-bit vectors. | |||
7403 | if (!Subtarget.hasAVX()) | |||
7404 | return SDValue(); | |||
7405 | ||||
7406 | MVT VT = BVOp->getSimpleValueType(0); | |||
7407 | SDLoc dl(BVOp); | |||
7408 | ||||
7409 | assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector ()) && "Unsupported vector type for broadcast.") ? static_cast <void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Unsupported vector type for broadcast.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7410, __PRETTY_FUNCTION__)) | |||
7410 | "Unsupported vector type for broadcast.")(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector ()) && "Unsupported vector type for broadcast.") ? static_cast <void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Unsupported vector type for broadcast.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7410, __PRETTY_FUNCTION__)); | |||
7411 | ||||
7412 | BitVector UndefElements; | |||
7413 | SDValue Ld = BVOp->getSplatValue(&UndefElements); | |||
7414 | ||||
7415 | // Attempt to use VBROADCASTM | |||
7416 | // From this paterrn: | |||
7417 | // a. t0 = (zext_i64 (bitcast_i8 v2i1 X)) | |||
7418 | // b. t1 = (build_vector t0 t0) | |||
7419 | // | |||
7420 | // Create (VBROADCASTM v2i1 X) | |||
7421 | if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) { | |||
7422 | MVT EltType = VT.getScalarType(); | |||
7423 | unsigned NumElts = VT.getVectorNumElements(); | |||
7424 | SDValue BOperand; | |||
7425 | SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType); | |||
7426 | if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) || | |||
7427 | (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND && | |||
7428 | Ld.getOperand(0).getOpcode() == ISD::BITCAST)) { | |||
7429 | if (ZeroExtended) | |||
7430 | BOperand = ZeroExtended.getOperand(0); | |||
7431 | else | |||
7432 | BOperand = Ld.getOperand(0).getOperand(0); | |||
7433 | MVT MaskVT = BOperand.getSimpleValueType(); | |||
7434 | if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q | |||
7435 | (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d | |||
7436 | SDValue Brdcst = | |||
7437 | DAG.getNode(X86ISD::VBROADCASTM, dl, | |||
7438 | MVT::getVectorVT(EltType, NumElts), BOperand); | |||
7439 | return DAG.getBitcast(VT, Brdcst); | |||
7440 | } | |||
7441 | } | |||
7442 | } | |||
7443 | ||||
7444 | unsigned NumElts = VT.getVectorNumElements(); | |||
7445 | unsigned NumUndefElts = UndefElements.count(); | |||
7446 | if (!Ld || (NumElts - NumUndefElts) <= 1) { | |||
7447 | APInt SplatValue, Undef; | |||
7448 | unsigned SplatBitSize; | |||
7449 | bool HasUndef; | |||
7450 | // Check if this is a repeated constant pattern suitable for broadcasting. | |||
7451 | if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) && | |||
7452 | SplatBitSize > VT.getScalarSizeInBits() && | |||
7453 | SplatBitSize < VT.getSizeInBits()) { | |||
7454 | // Avoid replacing with broadcast when it's a use of a shuffle | |||
7455 | // instruction to preserve the present custom lowering of shuffles. | |||
7456 | if (isUseOfShuffle(BVOp) || BVOp->hasOneUse()) | |||
7457 | return SDValue(); | |||
7458 | // replace BUILD_VECTOR with broadcast of the repeated constants. | |||
7459 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
7460 | LLVMContext *Ctx = DAG.getContext(); | |||
7461 | MVT PVT = TLI.getPointerTy(DAG.getDataLayout()); | |||
7462 | if (Subtarget.hasAVX()) { | |||
7463 | if (SplatBitSize <= 64 && Subtarget.hasAVX2() && | |||
7464 | !(SplatBitSize == 64 && Subtarget.is32Bit())) { | |||
7465 | // Splatted value can fit in one INTEGER constant in constant pool. | |||
7466 | // Load the constant and broadcast it. | |||
7467 | MVT CVT = MVT::getIntegerVT(SplatBitSize); | |||
7468 | Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize); | |||
7469 | Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue); | |||
7470 | SDValue CP = DAG.getConstantPool(C, PVT); | |||
7471 | unsigned Repeat = VT.getSizeInBits() / SplatBitSize; | |||
7472 | ||||
7473 | unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); | |||
7474 | Ld = DAG.getLoad( | |||
7475 | CVT, dl, DAG.getEntryNode(), CP, | |||
7476 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), | |||
7477 | Alignment); | |||
7478 | SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl, | |||
7479 | MVT::getVectorVT(CVT, Repeat), Ld); | |||
7480 | return DAG.getBitcast(VT, Brdcst); | |||
7481 | } else if (SplatBitSize == 32 || SplatBitSize == 64) { | |||
7482 | // Splatted value can fit in one FLOAT constant in constant pool. | |||
7483 | // Load the constant and broadcast it. | |||
7484 | // AVX have support for 32 and 64 bit broadcast for floats only. | |||
7485 | // No 64bit integer in 32bit subtarget. | |||
7486 | MVT CVT = MVT::getFloatingPointVT(SplatBitSize); | |||
7487 | // Lower the splat via APFloat directly, to avoid any conversion. | |||
7488 | Constant *C = | |||
7489 | SplatBitSize == 32 | |||
7490 | ? ConstantFP::get(*Ctx, | |||
7491 | APFloat(APFloat::IEEEsingle(), SplatValue)) | |||
7492 | : ConstantFP::get(*Ctx, | |||
7493 | APFloat(APFloat::IEEEdouble(), SplatValue)); | |||
7494 | SDValue CP = DAG.getConstantPool(C, PVT); | |||
7495 | unsigned Repeat = VT.getSizeInBits() / SplatBitSize; | |||
7496 | ||||
7497 | unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); | |||
7498 | Ld = DAG.getLoad( | |||
7499 | CVT, dl, DAG.getEntryNode(), CP, | |||
7500 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), | |||
7501 | Alignment); | |||
7502 | SDValue Brdcst = DAG.getNode(X86ISD::VBROADCAST, dl, | |||
7503 | MVT::getVectorVT(CVT, Repeat), Ld); | |||
7504 | return DAG.getBitcast(VT, Brdcst); | |||
7505 | } else if (SplatBitSize > 64) { | |||
7506 | // Load the vector of constants and broadcast it. | |||
7507 | MVT CVT = VT.getScalarType(); | |||
7508 | Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize, | |||
7509 | *Ctx); | |||
7510 | SDValue VCP = DAG.getConstantPool(VecC, PVT); | |||
7511 | unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits(); | |||
7512 | unsigned Alignment = cast<ConstantPoolSDNode>(VCP)->getAlignment(); | |||
7513 | Ld = DAG.getLoad( | |||
7514 | MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP, | |||
7515 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), | |||
7516 | Alignment); | |||
7517 | SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld); | |||
7518 | return DAG.getBitcast(VT, Brdcst); | |||
7519 | } | |||
7520 | } | |||
7521 | } | |||
7522 | ||||
7523 | // If we are moving a scalar into a vector (Ld must be set and all elements | |||
7524 | // but 1 are undef) and that operation is not obviously supported by | |||
7525 | // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast. | |||
7526 | // That's better than general shuffling and may eliminate a load to GPR and | |||
7527 | // move from scalar to vector register. | |||
7528 | if (!Ld || NumElts - NumUndefElts != 1) | |||
7529 | return SDValue(); | |||
7530 | unsigned ScalarSize = Ld.getValueSizeInBits(); | |||
7531 | if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64))) | |||
7532 | return SDValue(); | |||
7533 | } | |||
7534 | ||||
7535 | bool ConstSplatVal = | |||
7536 | (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP); | |||
7537 | ||||
7538 | // Make sure that all of the users of a non-constant load are from the | |||
7539 | // BUILD_VECTOR node. | |||
7540 | if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode())) | |||
7541 | return SDValue(); | |||
7542 | ||||
7543 | unsigned ScalarSize = Ld.getValueSizeInBits(); | |||
7544 | bool IsGE256 = (VT.getSizeInBits() >= 256); | |||
7545 | ||||
7546 | // When optimizing for size, generate up to 5 extra bytes for a broadcast | |||
7547 | // instruction to save 8 or more bytes of constant pool data. | |||
7548 | // TODO: If multiple splats are generated to load the same constant, | |||
7549 | // it may be detrimental to overall size. There needs to be a way to detect | |||
7550 | // that condition to know if this is truly a size win. | |||
7551 | bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); | |||
7552 | ||||
7553 | // Handle broadcasting a single constant scalar from the constant pool | |||
7554 | // into a vector. | |||
7555 | // On Sandybridge (no AVX2), it is still better to load a constant vector | |||
7556 | // from the constant pool and not to broadcast it from a scalar. | |||
7557 | // But override that restriction when optimizing for size. | |||
7558 | // TODO: Check if splatting is recommended for other AVX-capable CPUs. | |||
7559 | if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) { | |||
7560 | EVT CVT = Ld.getValueType(); | |||
7561 | assert(!CVT.isVector() && "Must not broadcast a vector type")((!CVT.isVector() && "Must not broadcast a vector type" ) ? static_cast<void> (0) : __assert_fail ("!CVT.isVector() && \"Must not broadcast a vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7561, __PRETTY_FUNCTION__)); | |||
7562 | ||||
7563 | // Splat f32, i32, v4f64, v4i64 in all cases with AVX2. | |||
7564 | // For size optimization, also splat v2f64 and v2i64, and for size opt | |||
7565 | // with AVX2, also splat i8 and i16. | |||
7566 | // With pattern matching, the VBROADCAST node may become a VMOVDDUP. | |||
7567 | if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) || | |||
7568 | (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) { | |||
7569 | const Constant *C = nullptr; | |||
7570 | if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld)) | |||
7571 | C = CI->getConstantIntValue(); | |||
7572 | else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld)) | |||
7573 | C = CF->getConstantFPValue(); | |||
7574 | ||||
7575 | assert(C && "Invalid constant type")((C && "Invalid constant type") ? static_cast<void > (0) : __assert_fail ("C && \"Invalid constant type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7575, __PRETTY_FUNCTION__)); | |||
7576 | ||||
7577 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
7578 | SDValue CP = | |||
7579 | DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout())); | |||
7580 | unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment(); | |||
7581 | Ld = DAG.getLoad( | |||
7582 | CVT, dl, DAG.getEntryNode(), CP, | |||
7583 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), | |||
7584 | Alignment); | |||
7585 | ||||
7586 | return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); | |||
7587 | } | |||
7588 | } | |||
7589 | ||||
7590 | bool IsLoad = ISD::isNormalLoad(Ld.getNode()); | |||
7591 | ||||
7592 | // Handle AVX2 in-register broadcasts. | |||
7593 | if (!IsLoad && Subtarget.hasInt256() && | |||
7594 | (ScalarSize == 32 || (IsGE256 && ScalarSize == 64))) | |||
7595 | return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); | |||
7596 | ||||
7597 | // The scalar source must be a normal load. | |||
7598 | if (!IsLoad) | |||
7599 | return SDValue(); | |||
7600 | ||||
7601 | if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) || | |||
7602 | (Subtarget.hasVLX() && ScalarSize == 64)) | |||
7603 | return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); | |||
7604 | ||||
7605 | // The integer check is needed for the 64-bit into 128-bit so it doesn't match | |||
7606 | // double since there is no vbroadcastsd xmm | |||
7607 | if (Subtarget.hasInt256() && Ld.getValueType().isInteger()) { | |||
7608 | if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64) | |||
7609 | return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld); | |||
7610 | } | |||
7611 | ||||
7612 | // Unsupported broadcast. | |||
7613 | return SDValue(); | |||
7614 | } | |||
7615 | ||||
7616 | /// For an EXTRACT_VECTOR_ELT with a constant index return the real | |||
7617 | /// underlying vector and index. | |||
7618 | /// | |||
7619 | /// Modifies \p ExtractedFromVec to the real vector and returns the real | |||
7620 | /// index. | |||
7621 | static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec, | |||
7622 | SDValue ExtIdx) { | |||
7623 | int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue(); | |||
7624 | if (!isa<ShuffleVectorSDNode>(ExtractedFromVec)) | |||
7625 | return Idx; | |||
7626 | ||||
7627 | // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already | |||
7628 | // lowered this: | |||
7629 | // (extract_vector_elt (v8f32 %1), Constant<6>) | |||
7630 | // to: | |||
7631 | // (extract_vector_elt (vector_shuffle<2,u,u,u> | |||
7632 | // (extract_subvector (v8f32 %0), Constant<4>), | |||
7633 | // undef) | |||
7634 | // Constant<0>) | |||
7635 | // In this case the vector is the extract_subvector expression and the index | |||
7636 | // is 2, as specified by the shuffle. | |||
7637 | ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec); | |||
7638 | SDValue ShuffleVec = SVOp->getOperand(0); | |||
7639 | MVT ShuffleVecVT = ShuffleVec.getSimpleValueType(); | |||
7640 | assert(ShuffleVecVT.getVectorElementType() ==((ShuffleVecVT.getVectorElementType() == ExtractedFromVec.getSimpleValueType ().getVectorElementType()) ? static_cast<void> (0) : __assert_fail ("ShuffleVecVT.getVectorElementType() == ExtractedFromVec.getSimpleValueType().getVectorElementType()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7641, __PRETTY_FUNCTION__)) | |||
7641 | ExtractedFromVec.getSimpleValueType().getVectorElementType())((ShuffleVecVT.getVectorElementType() == ExtractedFromVec.getSimpleValueType ().getVectorElementType()) ? static_cast<void> (0) : __assert_fail ("ShuffleVecVT.getVectorElementType() == ExtractedFromVec.getSimpleValueType().getVectorElementType()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7641, __PRETTY_FUNCTION__)); | |||
7642 | ||||
7643 | int ShuffleIdx = SVOp->getMaskElt(Idx); | |||
7644 | if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) { | |||
7645 | ExtractedFromVec = ShuffleVec; | |||
7646 | return ShuffleIdx; | |||
7647 | } | |||
7648 | return Idx; | |||
7649 | } | |||
7650 | ||||
7651 | static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) { | |||
7652 | MVT VT = Op.getSimpleValueType(); | |||
7653 | ||||
7654 | // Skip if insert_vec_elt is not supported. | |||
7655 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
7656 | if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT)) | |||
7657 | return SDValue(); | |||
7658 | ||||
7659 | SDLoc DL(Op); | |||
7660 | unsigned NumElems = Op.getNumOperands(); | |||
7661 | ||||
7662 | SDValue VecIn1; | |||
7663 | SDValue VecIn2; | |||
7664 | SmallVector<unsigned, 4> InsertIndices; | |||
7665 | SmallVector<int, 8> Mask(NumElems, -1); | |||
7666 | ||||
7667 | for (unsigned i = 0; i != NumElems; ++i) { | |||
7668 | unsigned Opc = Op.getOperand(i).getOpcode(); | |||
7669 | ||||
7670 | if (Opc == ISD::UNDEF) | |||
7671 | continue; | |||
7672 | ||||
7673 | if (Opc != ISD::EXTRACT_VECTOR_ELT) { | |||
7674 | // Quit if more than 1 elements need inserting. | |||
7675 | if (InsertIndices.size() > 1) | |||
7676 | return SDValue(); | |||
7677 | ||||
7678 | InsertIndices.push_back(i); | |||
7679 | continue; | |||
7680 | } | |||
7681 | ||||
7682 | SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0); | |||
7683 | SDValue ExtIdx = Op.getOperand(i).getOperand(1); | |||
7684 | ||||
7685 | // Quit if non-constant index. | |||
7686 | if (!isa<ConstantSDNode>(ExtIdx)) | |||
7687 | return SDValue(); | |||
7688 | int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx); | |||
7689 | ||||
7690 | // Quit if extracted from vector of different type. | |||
7691 | if (ExtractedFromVec.getValueType() != VT) | |||
7692 | return SDValue(); | |||
7693 | ||||
7694 | if (!VecIn1.getNode()) | |||
7695 | VecIn1 = ExtractedFromVec; | |||
7696 | else if (VecIn1 != ExtractedFromVec) { | |||
7697 | if (!VecIn2.getNode()) | |||
7698 | VecIn2 = ExtractedFromVec; | |||
7699 | else if (VecIn2 != ExtractedFromVec) | |||
7700 | // Quit if more than 2 vectors to shuffle | |||
7701 | return SDValue(); | |||
7702 | } | |||
7703 | ||||
7704 | if (ExtractedFromVec == VecIn1) | |||
7705 | Mask[i] = Idx; | |||
7706 | else if (ExtractedFromVec == VecIn2) | |||
7707 | Mask[i] = Idx + NumElems; | |||
7708 | } | |||
7709 | ||||
7710 | if (!VecIn1.getNode()) | |||
7711 | return SDValue(); | |||
7712 | ||||
7713 | VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT); | |||
7714 | SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask); | |||
7715 | ||||
7716 | for (unsigned Idx : InsertIndices) | |||
7717 | NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx), | |||
7718 | DAG.getIntPtrConstant(Idx, DL)); | |||
7719 | ||||
7720 | return NV; | |||
7721 | } | |||
7722 | ||||
7723 | static SDValue ConvertI1VectorToInteger(SDValue Op, SelectionDAG &DAG) { | |||
7724 | assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && Op.getScalarValueSizeInBits() == 1 && "Can not convert non-constant vector" ) ? static_cast<void> (0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && Op.getScalarValueSizeInBits() == 1 && \"Can not convert non-constant vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7726, __PRETTY_FUNCTION__)) | |||
7725 | Op.getScalarValueSizeInBits() == 1 &&((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && Op.getScalarValueSizeInBits() == 1 && "Can not convert non-constant vector" ) ? static_cast<void> (0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && Op.getScalarValueSizeInBits() == 1 && \"Can not convert non-constant vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7726, __PRETTY_FUNCTION__)) | |||
7726 | "Can not convert non-constant vector")((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && Op.getScalarValueSizeInBits() == 1 && "Can not convert non-constant vector" ) ? static_cast<void> (0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && Op.getScalarValueSizeInBits() == 1 && \"Can not convert non-constant vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7726, __PRETTY_FUNCTION__)); | |||
7727 | uint64_t Immediate = 0; | |||
7728 | for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) { | |||
7729 | SDValue In = Op.getOperand(idx); | |||
7730 | if (!In.isUndef()) | |||
7731 | Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx; | |||
7732 | } | |||
7733 | SDLoc dl(Op); | |||
7734 | MVT VT = MVT::getIntegerVT(std::max((int)Op.getValueSizeInBits(), 8)); | |||
7735 | return DAG.getConstant(Immediate, dl, VT); | |||
7736 | } | |||
7737 | // Lower BUILD_VECTOR operation for v8i1 and v16i1 types. | |||
7738 | static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG, | |||
7739 | const X86Subtarget &Subtarget) { | |||
7740 | ||||
7741 | MVT VT = Op.getSimpleValueType(); | |||
7742 | assert((VT.getVectorElementType() == MVT::i1) &&(((VT.getVectorElementType() == MVT::i1) && "Unexpected type in LowerBUILD_VECTORvXi1!" ) ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i1) && \"Unexpected type in LowerBUILD_VECTORvXi1!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7743, __PRETTY_FUNCTION__)) | |||
7743 | "Unexpected type in LowerBUILD_VECTORvXi1!")(((VT.getVectorElementType() == MVT::i1) && "Unexpected type in LowerBUILD_VECTORvXi1!" ) ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i1) && \"Unexpected type in LowerBUILD_VECTORvXi1!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7743, __PRETTY_FUNCTION__)); | |||
7744 | ||||
7745 | SDLoc dl(Op); | |||
7746 | if (ISD::isBuildVectorAllZeros(Op.getNode())) | |||
7747 | return Op; | |||
7748 | ||||
7749 | if (ISD::isBuildVectorAllOnes(Op.getNode())) | |||
7750 | return Op; | |||
7751 | ||||
7752 | if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) { | |||
7753 | if (VT == MVT::v64i1 && !Subtarget.is64Bit()) { | |||
7754 | // Split the pieces. | |||
7755 | SDValue Lower = | |||
7756 | DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(0, 32)); | |||
7757 | SDValue Upper = | |||
7758 | DAG.getBuildVector(MVT::v32i1, dl, Op.getNode()->ops().slice(32, 32)); | |||
7759 | // We have to manually lower both halves so getNode doesn't try to | |||
7760 | // reassemble the build_vector. | |||
7761 | Lower = LowerBUILD_VECTORvXi1(Lower, DAG, Subtarget); | |||
7762 | Upper = LowerBUILD_VECTORvXi1(Upper, DAG, Subtarget); | |||
7763 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lower, Upper); | |||
7764 | } | |||
7765 | SDValue Imm = ConvertI1VectorToInteger(Op, DAG); | |||
7766 | if (Imm.getValueSizeInBits() == VT.getSizeInBits()) | |||
7767 | return DAG.getBitcast(VT, Imm); | |||
7768 | SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm); | |||
7769 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec, | |||
7770 | DAG.getIntPtrConstant(0, dl)); | |||
7771 | } | |||
7772 | ||||
7773 | // Vector has one or more non-const elements | |||
7774 | uint64_t Immediate = 0; | |||
7775 | SmallVector<unsigned, 16> NonConstIdx; | |||
7776 | bool IsSplat = true; | |||
7777 | bool HasConstElts = false; | |||
7778 | int SplatIdx = -1; | |||
7779 | for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) { | |||
7780 | SDValue In = Op.getOperand(idx); | |||
7781 | if (In.isUndef()) | |||
7782 | continue; | |||
7783 | if (!isa<ConstantSDNode>(In)) | |||
7784 | NonConstIdx.push_back(idx); | |||
7785 | else { | |||
7786 | Immediate |= (cast<ConstantSDNode>(In)->getZExtValue() & 0x1) << idx; | |||
7787 | HasConstElts = true; | |||
7788 | } | |||
7789 | if (SplatIdx < 0) | |||
7790 | SplatIdx = idx; | |||
7791 | else if (In != Op.getOperand(SplatIdx)) | |||
7792 | IsSplat = false; | |||
7793 | } | |||
7794 | ||||
7795 | // for splat use " (select i1 splat_elt, all-ones, all-zeroes)" | |||
7796 | if (IsSplat) | |||
7797 | return DAG.getSelect(dl, VT, Op.getOperand(SplatIdx), | |||
7798 | DAG.getConstant(1, dl, VT), | |||
7799 | DAG.getConstant(0, dl, VT)); | |||
7800 | ||||
7801 | // insert elements one by one | |||
7802 | SDValue DstVec; | |||
7803 | SDValue Imm; | |||
7804 | if (Immediate) { | |||
7805 | MVT ImmVT = MVT::getIntegerVT(std::max((int)VT.getSizeInBits(), 8)); | |||
7806 | Imm = DAG.getConstant(Immediate, dl, ImmVT); | |||
7807 | } | |||
7808 | else if (HasConstElts) | |||
7809 | Imm = DAG.getConstant(0, dl, VT); | |||
7810 | else | |||
7811 | Imm = DAG.getUNDEF(VT); | |||
7812 | if (Imm.getValueSizeInBits() == VT.getSizeInBits()) | |||
7813 | DstVec = DAG.getBitcast(VT, Imm); | |||
7814 | else { | |||
7815 | SDValue ExtVec = DAG.getBitcast(MVT::v8i1, Imm); | |||
7816 | DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, ExtVec, | |||
7817 | DAG.getIntPtrConstant(0, dl)); | |||
7818 | } | |||
7819 | ||||
7820 | for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) { | |||
7821 | unsigned InsertIdx = NonConstIdx[i]; | |||
7822 | DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec, | |||
7823 | Op.getOperand(InsertIdx), | |||
7824 | DAG.getIntPtrConstant(InsertIdx, dl)); | |||
7825 | } | |||
7826 | return DstVec; | |||
7827 | } | |||
7828 | ||||
7829 | /// Return true if \p N implements a horizontal binop and return the | |||
7830 | /// operands for the horizontal binop into V0 and V1. | |||
7831 | /// | |||
7832 | /// This is a helper function of LowerToHorizontalOp(). | |||
7833 | /// This function checks that the build_vector \p N in input implements a | |||
7834 | /// horizontal operation. Parameter \p Opcode defines the kind of horizontal | |||
7835 | /// operation to match. | |||
7836 | /// For example, if \p Opcode is equal to ISD::ADD, then this function | |||
7837 | /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode | |||
7838 | /// is equal to ISD::SUB, then this function checks if this is a horizontal | |||
7839 | /// arithmetic sub. | |||
7840 | /// | |||
7841 | /// This function only analyzes elements of \p N whose indices are | |||
7842 | /// in range [BaseIdx, LastIdx). | |||
7843 | static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode, | |||
7844 | SelectionDAG &DAG, | |||
7845 | unsigned BaseIdx, unsigned LastIdx, | |||
7846 | SDValue &V0, SDValue &V1) { | |||
7847 | EVT VT = N->getValueType(0); | |||
7848 | ||||
7849 | assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!")((BaseIdx * 2 <= LastIdx && "Invalid Indices in input!" ) ? static_cast<void> (0) : __assert_fail ("BaseIdx * 2 <= LastIdx && \"Invalid Indices in input!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7849, __PRETTY_FUNCTION__)); | |||
7850 | assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&((VT.isVector() && VT.getVectorNumElements() >= LastIdx && "Invalid Vector in input!") ? static_cast<void > (0) : __assert_fail ("VT.isVector() && VT.getVectorNumElements() >= LastIdx && \"Invalid Vector in input!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7851, __PRETTY_FUNCTION__)) | |||
7851 | "Invalid Vector in input!")((VT.isVector() && VT.getVectorNumElements() >= LastIdx && "Invalid Vector in input!") ? static_cast<void > (0) : __assert_fail ("VT.isVector() && VT.getVectorNumElements() >= LastIdx && \"Invalid Vector in input!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7851, __PRETTY_FUNCTION__)); | |||
7852 | ||||
7853 | bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD); | |||
7854 | bool CanFold = true; | |||
7855 | unsigned ExpectedVExtractIdx = BaseIdx; | |||
7856 | unsigned NumElts = LastIdx - BaseIdx; | |||
7857 | V0 = DAG.getUNDEF(VT); | |||
7858 | V1 = DAG.getUNDEF(VT); | |||
7859 | ||||
7860 | // Check if N implements a horizontal binop. | |||
7861 | for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) { | |||
7862 | SDValue Op = N->getOperand(i + BaseIdx); | |||
7863 | ||||
7864 | // Skip UNDEFs. | |||
7865 | if (Op->isUndef()) { | |||
7866 | // Update the expected vector extract index. | |||
7867 | if (i * 2 == NumElts) | |||
7868 | ExpectedVExtractIdx = BaseIdx; | |||
7869 | ExpectedVExtractIdx += 2; | |||
7870 | continue; | |||
7871 | } | |||
7872 | ||||
7873 | CanFold = Op->getOpcode() == Opcode && Op->hasOneUse(); | |||
7874 | ||||
7875 | if (!CanFold) | |||
7876 | break; | |||
7877 | ||||
7878 | SDValue Op0 = Op.getOperand(0); | |||
7879 | SDValue Op1 = Op.getOperand(1); | |||
7880 | ||||
7881 | // Try to match the following pattern: | |||
7882 | // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1)) | |||
7883 | CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
7884 | Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
7885 | Op0.getOperand(0) == Op1.getOperand(0) && | |||
7886 | isa<ConstantSDNode>(Op0.getOperand(1)) && | |||
7887 | isa<ConstantSDNode>(Op1.getOperand(1))); | |||
7888 | if (!CanFold) | |||
7889 | break; | |||
7890 | ||||
7891 | unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue(); | |||
7892 | unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue(); | |||
7893 | ||||
7894 | if (i * 2 < NumElts) { | |||
7895 | if (V0.isUndef()) { | |||
7896 | V0 = Op0.getOperand(0); | |||
7897 | if (V0.getValueType() != VT) | |||
7898 | return false; | |||
7899 | } | |||
7900 | } else { | |||
7901 | if (V1.isUndef()) { | |||
7902 | V1 = Op0.getOperand(0); | |||
7903 | if (V1.getValueType() != VT) | |||
7904 | return false; | |||
7905 | } | |||
7906 | if (i * 2 == NumElts) | |||
7907 | ExpectedVExtractIdx = BaseIdx; | |||
7908 | } | |||
7909 | ||||
7910 | SDValue Expected = (i * 2 < NumElts) ? V0 : V1; | |||
7911 | if (I0 == ExpectedVExtractIdx) | |||
7912 | CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected; | |||
7913 | else if (IsCommutable && I1 == ExpectedVExtractIdx) { | |||
7914 | // Try to match the following dag sequence: | |||
7915 | // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I)) | |||
7916 | CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected; | |||
7917 | } else | |||
7918 | CanFold = false; | |||
7919 | ||||
7920 | ExpectedVExtractIdx += 2; | |||
7921 | } | |||
7922 | ||||
7923 | return CanFold; | |||
7924 | } | |||
7925 | ||||
7926 | /// Emit a sequence of two 128-bit horizontal add/sub followed by | |||
7927 | /// a concat_vector. | |||
7928 | /// | |||
7929 | /// This is a helper function of LowerToHorizontalOp(). | |||
7930 | /// This function expects two 256-bit vectors called V0 and V1. | |||
7931 | /// At first, each vector is split into two separate 128-bit vectors. | |||
7932 | /// Then, the resulting 128-bit vectors are used to implement two | |||
7933 | /// horizontal binary operations. | |||
7934 | /// | |||
7935 | /// The kind of horizontal binary operation is defined by \p X86Opcode. | |||
7936 | /// | |||
7937 | /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to | |||
7938 | /// the two new horizontal binop. | |||
7939 | /// When Mode is set, the first horizontal binop dag node would take as input | |||
7940 | /// the lower 128-bit of V0 and the upper 128-bit of V0. The second | |||
7941 | /// horizontal binop dag node would take as input the lower 128-bit of V1 | |||
7942 | /// and the upper 128-bit of V1. | |||
7943 | /// Example: | |||
7944 | /// HADD V0_LO, V0_HI | |||
7945 | /// HADD V1_LO, V1_HI | |||
7946 | /// | |||
7947 | /// Otherwise, the first horizontal binop dag node takes as input the lower | |||
7948 | /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop | |||
7949 | /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1. | |||
7950 | /// Example: | |||
7951 | /// HADD V0_LO, V1_LO | |||
7952 | /// HADD V0_HI, V1_HI | |||
7953 | /// | |||
7954 | /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower | |||
7955 | /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to | |||
7956 | /// the upper 128-bits of the result. | |||
7957 | static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1, | |||
7958 | const SDLoc &DL, SelectionDAG &DAG, | |||
7959 | unsigned X86Opcode, bool Mode, | |||
7960 | bool isUndefLO, bool isUndefHI) { | |||
7961 | MVT VT = V0.getSimpleValueType(); | |||
7962 | assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&((VT.is256BitVector() && VT == V1.getSimpleValueType( ) && "Invalid nodes in input!") ? static_cast<void > (0) : __assert_fail ("VT.is256BitVector() && VT == V1.getSimpleValueType() && \"Invalid nodes in input!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7963, __PRETTY_FUNCTION__)) | |||
7963 | "Invalid nodes in input!")((VT.is256BitVector() && VT == V1.getSimpleValueType( ) && "Invalid nodes in input!") ? static_cast<void > (0) : __assert_fail ("VT.is256BitVector() && VT == V1.getSimpleValueType() && \"Invalid nodes in input!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 7963, __PRETTY_FUNCTION__)); | |||
7964 | ||||
7965 | unsigned NumElts = VT.getVectorNumElements(); | |||
7966 | SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL); | |||
7967 | SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL); | |||
7968 | SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL); | |||
7969 | SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL); | |||
7970 | MVT NewVT = V0_LO.getSimpleValueType(); | |||
7971 | ||||
7972 | SDValue LO = DAG.getUNDEF(NewVT); | |||
7973 | SDValue HI = DAG.getUNDEF(NewVT); | |||
7974 | ||||
7975 | if (Mode) { | |||
7976 | // Don't emit a horizontal binop if the result is expected to be UNDEF. | |||
7977 | if (!isUndefLO && !V0->isUndef()) | |||
7978 | LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI); | |||
7979 | if (!isUndefHI && !V1->isUndef()) | |||
7980 | HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI); | |||
7981 | } else { | |||
7982 | // Don't emit a horizontal binop if the result is expected to be UNDEF. | |||
7983 | if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef())) | |||
7984 | LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO); | |||
7985 | ||||
7986 | if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef())) | |||
7987 | HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI); | |||
7988 | } | |||
7989 | ||||
7990 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI); | |||
7991 | } | |||
7992 | ||||
7993 | /// Returns true iff \p BV builds a vector with the result equivalent to | |||
7994 | /// the result of ADDSUB/SUBADD operation. | |||
7995 | /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1 | |||
7996 | /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters | |||
7997 | /// \p Opnd0 and \p Opnd1. | |||
7998 | static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV, | |||
7999 | const X86Subtarget &Subtarget, SelectionDAG &DAG, | |||
8000 | SDValue &Opnd0, SDValue &Opnd1, | |||
8001 | unsigned &NumExtracts, | |||
8002 | bool &IsSubAdd) { | |||
8003 | ||||
8004 | MVT VT = BV->getSimpleValueType(0); | |||
8005 | if (!Subtarget.hasSSE3() || !VT.isFloatingPoint()) | |||
8006 | return false; | |||
8007 | ||||
8008 | unsigned NumElts = VT.getVectorNumElements(); | |||
8009 | SDValue InVec0 = DAG.getUNDEF(VT); | |||
8010 | SDValue InVec1 = DAG.getUNDEF(VT); | |||
8011 | ||||
8012 | NumExtracts = 0; | |||
8013 | ||||
8014 | // Odd-numbered elements in the input build vector are obtained from | |||
8015 | // adding/subtracting two integer/float elements. | |||
8016 | // Even-numbered elements in the input build vector are obtained from | |||
8017 | // subtracting/adding two integer/float elements. | |||
8018 | unsigned Opc[2] = {0, 0}; | |||
8019 | for (unsigned i = 0, e = NumElts; i != e; ++i) { | |||
8020 | SDValue Op = BV->getOperand(i); | |||
8021 | ||||
8022 | // Skip 'undef' values. | |||
8023 | unsigned Opcode = Op.getOpcode(); | |||
8024 | if (Opcode == ISD::UNDEF) | |||
8025 | continue; | |||
8026 | ||||
8027 | // Early exit if we found an unexpected opcode. | |||
8028 | if (Opcode != ISD::FADD && Opcode != ISD::FSUB) | |||
8029 | return false; | |||
8030 | ||||
8031 | SDValue Op0 = Op.getOperand(0); | |||
8032 | SDValue Op1 = Op.getOperand(1); | |||
8033 | ||||
8034 | // Try to match the following pattern: | |||
8035 | // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i)) | |||
8036 | // Early exit if we cannot match that sequence. | |||
8037 | if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
8038 | Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
8039 | !isa<ConstantSDNode>(Op0.getOperand(1)) || | |||
8040 | !isa<ConstantSDNode>(Op1.getOperand(1)) || | |||
8041 | Op0.getOperand(1) != Op1.getOperand(1)) | |||
8042 | return false; | |||
8043 | ||||
8044 | unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue(); | |||
8045 | if (I0 != i) | |||
8046 | return false; | |||
8047 | ||||
8048 | // We found a valid add/sub node, make sure its the same opcode as previous | |||
8049 | // elements for this parity. | |||
8050 | if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode) | |||
8051 | return false; | |||
8052 | Opc[i % 2] = Opcode; | |||
8053 | ||||
8054 | // Update InVec0 and InVec1. | |||
8055 | if (InVec0.isUndef()) { | |||
8056 | InVec0 = Op0.getOperand(0); | |||
8057 | if (InVec0.getSimpleValueType() != VT) | |||
8058 | return false; | |||
8059 | } | |||
8060 | if (InVec1.isUndef()) { | |||
8061 | InVec1 = Op1.getOperand(0); | |||
8062 | if (InVec1.getSimpleValueType() != VT) | |||
8063 | return false; | |||
8064 | } | |||
8065 | ||||
8066 | // Make sure that operands in input to each add/sub node always | |||
8067 | // come from a same pair of vectors. | |||
8068 | if (InVec0 != Op0.getOperand(0)) { | |||
8069 | if (Opcode == ISD::FSUB) | |||
8070 | return false; | |||
8071 | ||||
8072 | // FADD is commutable. Try to commute the operands | |||
8073 | // and then test again. | |||
8074 | std::swap(Op0, Op1); | |||
8075 | if (InVec0 != Op0.getOperand(0)) | |||
8076 | return false; | |||
8077 | } | |||
8078 | ||||
8079 | if (InVec1 != Op1.getOperand(0)) | |||
8080 | return false; | |||
8081 | ||||
8082 | // Increment the number of extractions done. | |||
8083 | ++NumExtracts; | |||
8084 | } | |||
8085 | ||||
8086 | // Ensure we have found an opcode for both parities and that they are | |||
8087 | // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the | |||
8088 | // inputs are undef. | |||
8089 | if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] || | |||
8090 | InVec0.isUndef() || InVec1.isUndef()) | |||
8091 | return false; | |||
8092 | ||||
8093 | IsSubAdd = Opc[0] == ISD::FADD; | |||
8094 | ||||
8095 | Opnd0 = InVec0; | |||
8096 | Opnd1 = InVec1; | |||
8097 | return true; | |||
8098 | } | |||
8099 | ||||
8100 | /// Returns true if is possible to fold MUL and an idiom that has already been | |||
8101 | /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into | |||
8102 | /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the | |||
8103 | /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2. | |||
8104 | /// | |||
8105 | /// Prior to calling this function it should be known that there is some | |||
8106 | /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation | |||
8107 | /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called | |||
8108 | /// before replacement of such SDNode with ADDSUB operation. Thus the number | |||
8109 | /// of \p Opnd0 uses is expected to be equal to 2. | |||
8110 | /// For example, this function may be called for the following IR: | |||
8111 | /// %AB = fmul fast <2 x double> %A, %B | |||
8112 | /// %Sub = fsub fast <2 x double> %AB, %C | |||
8113 | /// %Add = fadd fast <2 x double> %AB, %C | |||
8114 | /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add, | |||
8115 | /// <2 x i32> <i32 0, i32 3> | |||
8116 | /// There is a def for %Addsub here, which potentially can be replaced by | |||
8117 | /// X86ISD::ADDSUB operation: | |||
8118 | /// %Addsub = X86ISD::ADDSUB %AB, %C | |||
8119 | /// and such ADDSUB can further be replaced with FMADDSUB: | |||
8120 | /// %Addsub = FMADDSUB %A, %B, %C. | |||
8121 | /// | |||
8122 | /// The main reason why this method is called before the replacement of the | |||
8123 | /// recognized ADDSUB idiom with ADDSUB operation is that such replacement | |||
8124 | /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit | |||
8125 | /// FMADDSUB is. | |||
8126 | static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget, | |||
8127 | SelectionDAG &DAG, | |||
8128 | SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2, | |||
8129 | unsigned ExpectedUses) { | |||
8130 | if (Opnd0.getOpcode() != ISD::FMUL || | |||
8131 | !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA()) | |||
8132 | return false; | |||
8133 | ||||
8134 | // FIXME: These checks must match the similar ones in | |||
8135 | // DAGCombiner::visitFADDForFMACombine. It would be good to have one | |||
8136 | // function that would answer if it is Ok to fuse MUL + ADD to FMADD | |||
8137 | // or MUL + ADDSUB to FMADDSUB. | |||
8138 | const TargetOptions &Options = DAG.getTarget().Options; | |||
8139 | bool AllowFusion = | |||
8140 | (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath); | |||
8141 | if (!AllowFusion) | |||
8142 | return false; | |||
8143 | ||||
8144 | Opnd2 = Opnd1; | |||
8145 | Opnd1 = Opnd0.getOperand(1); | |||
8146 | Opnd0 = Opnd0.getOperand(0); | |||
8147 | ||||
8148 | return true; | |||
8149 | } | |||
8150 | ||||
8151 | /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or | |||
8152 | /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or | |||
8153 | /// X86ISD::FMSUBADD node. | |||
8154 | static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV, | |||
8155 | const X86Subtarget &Subtarget, | |||
8156 | SelectionDAG &DAG) { | |||
8157 | SDValue Opnd0, Opnd1; | |||
8158 | unsigned NumExtracts; | |||
8159 | bool IsSubAdd; | |||
8160 | if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts, | |||
8161 | IsSubAdd)) | |||
8162 | return SDValue(); | |||
8163 | ||||
8164 | MVT VT = BV->getSimpleValueType(0); | |||
8165 | SDLoc DL(BV); | |||
8166 | ||||
8167 | // Try to generate X86ISD::FMADDSUB node here. | |||
8168 | SDValue Opnd2; | |||
8169 | if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) { | |||
8170 | unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB; | |||
8171 | return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2); | |||
8172 | } | |||
8173 | ||||
8174 | // We only support ADDSUB. | |||
8175 | if (IsSubAdd) | |||
8176 | return SDValue(); | |||
8177 | ||||
8178 | // Do not generate X86ISD::ADDSUB node for 512-bit types even though | |||
8179 | // the ADDSUB idiom has been successfully recognized. There are no known | |||
8180 | // X86 targets with 512-bit ADDSUB instructions! | |||
8181 | // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom | |||
8182 | // recognition. | |||
8183 | if (VT.is512BitVector()) | |||
8184 | return SDValue(); | |||
8185 | ||||
8186 | return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1); | |||
8187 | } | |||
8188 | ||||
8189 | /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible. | |||
8190 | static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV, | |||
8191 | const X86Subtarget &Subtarget, | |||
8192 | SelectionDAG &DAG) { | |||
8193 | MVT VT = BV->getSimpleValueType(0); | |||
8194 | unsigned NumElts = VT.getVectorNumElements(); | |||
8195 | unsigned NumUndefsLO = 0; | |||
8196 | unsigned NumUndefsHI = 0; | |||
8197 | unsigned Half = NumElts/2; | |||
8198 | ||||
8199 | // Count the number of UNDEF operands in the build_vector in input. | |||
8200 | for (unsigned i = 0, e = Half; i != e; ++i) | |||
8201 | if (BV->getOperand(i)->isUndef()) | |||
8202 | NumUndefsLO++; | |||
8203 | ||||
8204 | for (unsigned i = Half, e = NumElts; i != e; ++i) | |||
8205 | if (BV->getOperand(i)->isUndef()) | |||
8206 | NumUndefsHI++; | |||
8207 | ||||
8208 | // Early exit if this is either a build_vector of all UNDEFs or all the | |||
8209 | // operands but one are UNDEF. | |||
8210 | if (NumUndefsLO + NumUndefsHI + 1 >= NumElts) | |||
8211 | return SDValue(); | |||
8212 | ||||
8213 | SDLoc DL(BV); | |||
8214 | SDValue InVec0, InVec1; | |||
8215 | if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) { | |||
8216 | // Try to match an SSE3 float HADD/HSUB. | |||
8217 | if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1)) | |||
8218 | return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1); | |||
8219 | ||||
8220 | if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1)) | |||
8221 | return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1); | |||
8222 | } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget.hasSSSE3()) { | |||
8223 | // Try to match an SSSE3 integer HADD/HSUB. | |||
8224 | if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1)) | |||
8225 | return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1); | |||
8226 | ||||
8227 | if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1)) | |||
8228 | return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1); | |||
8229 | } | |||
8230 | ||||
8231 | if (!Subtarget.hasAVX()) | |||
8232 | return SDValue(); | |||
8233 | ||||
8234 | if ((VT == MVT::v8f32 || VT == MVT::v4f64)) { | |||
8235 | // Try to match an AVX horizontal add/sub of packed single/double | |||
8236 | // precision floating point values from 256-bit vectors. | |||
8237 | SDValue InVec2, InVec3; | |||
8238 | if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) && | |||
8239 | isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) && | |||
8240 | ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) && | |||
8241 | ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3)) | |||
8242 | return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1); | |||
8243 | ||||
8244 | if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) && | |||
8245 | isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) && | |||
8246 | ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) && | |||
8247 | ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3)) | |||
8248 | return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1); | |||
8249 | } else if (VT == MVT::v8i32 || VT == MVT::v16i16) { | |||
8250 | // Try to match an AVX2 horizontal add/sub of signed integers. | |||
8251 | SDValue InVec2, InVec3; | |||
8252 | unsigned X86Opcode; | |||
8253 | bool CanFold = true; | |||
8254 | ||||
8255 | if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) && | |||
8256 | isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) && | |||
8257 | ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) && | |||
8258 | ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3)) | |||
8259 | X86Opcode = X86ISD::HADD; | |||
8260 | else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) && | |||
8261 | isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) && | |||
8262 | ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) && | |||
8263 | ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3)) | |||
8264 | X86Opcode = X86ISD::HSUB; | |||
8265 | else | |||
8266 | CanFold = false; | |||
8267 | ||||
8268 | if (CanFold) { | |||
8269 | // Fold this build_vector into a single horizontal add/sub. | |||
8270 | // Do this only if the target has AVX2. | |||
8271 | if (Subtarget.hasAVX2()) | |||
8272 | return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1); | |||
8273 | ||||
8274 | // Do not try to expand this build_vector into a pair of horizontal | |||
8275 | // add/sub if we can emit a pair of scalar add/sub. | |||
8276 | if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half) | |||
8277 | return SDValue(); | |||
8278 | ||||
8279 | // Convert this build_vector into a pair of horizontal binop followed by | |||
8280 | // a concat vector. | |||
8281 | bool isUndefLO = NumUndefsLO == Half; | |||
8282 | bool isUndefHI = NumUndefsHI == Half; | |||
8283 | return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false, | |||
8284 | isUndefLO, isUndefHI); | |||
8285 | } | |||
8286 | } | |||
8287 | ||||
8288 | if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 || | |||
8289 | VT == MVT::v16i16) && Subtarget.hasAVX()) { | |||
8290 | unsigned X86Opcode; | |||
8291 | if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1)) | |||
8292 | X86Opcode = X86ISD::HADD; | |||
8293 | else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1)) | |||
8294 | X86Opcode = X86ISD::HSUB; | |||
8295 | else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1)) | |||
8296 | X86Opcode = X86ISD::FHADD; | |||
8297 | else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1)) | |||
8298 | X86Opcode = X86ISD::FHSUB; | |||
8299 | else | |||
8300 | return SDValue(); | |||
8301 | ||||
8302 | // Don't try to expand this build_vector into a pair of horizontal add/sub | |||
8303 | // if we can simply emit a pair of scalar add/sub. | |||
8304 | if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half) | |||
8305 | return SDValue(); | |||
8306 | ||||
8307 | // Convert this build_vector into two horizontal add/sub followed by | |||
8308 | // a concat vector. | |||
8309 | bool isUndefLO = NumUndefsLO == Half; | |||
8310 | bool isUndefHI = NumUndefsHI == Half; | |||
8311 | return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true, | |||
8312 | isUndefLO, isUndefHI); | |||
8313 | } | |||
8314 | ||||
8315 | return SDValue(); | |||
8316 | } | |||
8317 | ||||
8318 | /// If a BUILD_VECTOR's source elements all apply the same bit operation and | |||
8319 | /// one of their operands is constant, lower to a pair of BUILD_VECTOR and | |||
8320 | /// just apply the bit to the vectors. | |||
8321 | /// NOTE: Its not in our interest to start make a general purpose vectorizer | |||
8322 | /// from this, but enough scalar bit operations are created from the later | |||
8323 | /// legalization + scalarization stages to need basic support. | |||
8324 | static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op, | |||
8325 | SelectionDAG &DAG) { | |||
8326 | SDLoc DL(Op); | |||
8327 | MVT VT = Op->getSimpleValueType(0); | |||
8328 | unsigned NumElems = VT.getVectorNumElements(); | |||
8329 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
8330 | ||||
8331 | // Check that all elements have the same opcode. | |||
8332 | // TODO: Should we allow UNDEFS and if so how many? | |||
8333 | unsigned Opcode = Op->getOperand(0).getOpcode(); | |||
8334 | for (unsigned i = 1; i < NumElems; ++i) | |||
8335 | if (Opcode != Op->getOperand(i).getOpcode()) | |||
8336 | return SDValue(); | |||
8337 | ||||
8338 | // TODO: We may be able to add support for other Ops (ADD/SUB + shifts). | |||
8339 | switch (Opcode) { | |||
8340 | default: | |||
8341 | return SDValue(); | |||
8342 | case ISD::AND: | |||
8343 | case ISD::XOR: | |||
8344 | case ISD::OR: | |||
8345 | // Don't do this if the buildvector is a splat - we'd replace one | |||
8346 | // constant with an entire vector. | |||
8347 | if (Op->getSplatValue()) | |||
8348 | return SDValue(); | |||
8349 | if (!TLI.isOperationLegalOrPromote(Opcode, VT)) | |||
8350 | return SDValue(); | |||
8351 | break; | |||
8352 | } | |||
8353 | ||||
8354 | SmallVector<SDValue, 4> LHSElts, RHSElts; | |||
8355 | for (SDValue Elt : Op->ops()) { | |||
8356 | SDValue LHS = Elt.getOperand(0); | |||
8357 | SDValue RHS = Elt.getOperand(1); | |||
8358 | ||||
8359 | // We expect the canonicalized RHS operand to be the constant. | |||
8360 | if (!isa<ConstantSDNode>(RHS)) | |||
8361 | return SDValue(); | |||
8362 | LHSElts.push_back(LHS); | |||
8363 | RHSElts.push_back(RHS); | |||
8364 | } | |||
8365 | ||||
8366 | SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts); | |||
8367 | SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts); | |||
8368 | return DAG.getNode(Opcode, DL, VT, LHS, RHS); | |||
8369 | } | |||
8370 | ||||
8371 | /// Create a vector constant without a load. SSE/AVX provide the bare minimum | |||
8372 | /// functionality to do this, so it's all zeros, all ones, or some derivation | |||
8373 | /// that is cheap to calculate. | |||
8374 | static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG, | |||
8375 | const X86Subtarget &Subtarget) { | |||
8376 | SDLoc DL(Op); | |||
8377 | MVT VT = Op.getSimpleValueType(); | |||
8378 | ||||
8379 | // Vectors containing all zeros can be matched by pxor and xorps. | |||
8380 | if (ISD::isBuildVectorAllZeros(Op.getNode())) { | |||
8381 | // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd | |||
8382 | // and 2) ensure that i64 scalars are eliminated on x86-32 hosts. | |||
8383 | if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) | |||
8384 | return Op; | |||
8385 | ||||
8386 | return getZeroVector(VT, Subtarget, DAG, DL); | |||
8387 | } | |||
8388 | ||||
8389 | // Vectors containing all ones can be matched by pcmpeqd on 128-bit width | |||
8390 | // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use | |||
8391 | // vpcmpeqd on 256-bit vectors. | |||
8392 | if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) { | |||
8393 | if (VT == MVT::v4i32 || VT == MVT::v16i32 || | |||
8394 | (VT == MVT::v8i32 && Subtarget.hasInt256())) | |||
8395 | return Op; | |||
8396 | ||||
8397 | return getOnesVector(VT, DAG, DL); | |||
8398 | } | |||
8399 | ||||
8400 | return SDValue(); | |||
8401 | } | |||
8402 | ||||
8403 | /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute | |||
8404 | /// from a vector of source values and a vector of extraction indices. | |||
8405 | /// The vectors might be manipulated to match the type of the permute op. | |||
8406 | static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec, | |||
8407 | SDLoc &DL, SelectionDAG &DAG, | |||
8408 | const X86Subtarget &Subtarget) { | |||
8409 | MVT ShuffleVT = VT; | |||
8410 | EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger(); | |||
8411 | unsigned NumElts = VT.getVectorNumElements(); | |||
8412 | unsigned SizeInBits = VT.getSizeInBits(); | |||
8413 | ||||
8414 | // Adjust IndicesVec to match VT size. | |||
8415 | assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&((IndicesVec.getValueType().getVectorNumElements() >= NumElts && "Illegal variable permute mask size") ? static_cast <void> (0) : __assert_fail ("IndicesVec.getValueType().getVectorNumElements() >= NumElts && \"Illegal variable permute mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8416, __PRETTY_FUNCTION__)) | |||
8416 | "Illegal variable permute mask size")((IndicesVec.getValueType().getVectorNumElements() >= NumElts && "Illegal variable permute mask size") ? static_cast <void> (0) : __assert_fail ("IndicesVec.getValueType().getVectorNumElements() >= NumElts && \"Illegal variable permute mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8416, __PRETTY_FUNCTION__)); | |||
8417 | if (IndicesVec.getValueType().getVectorNumElements() > NumElts) | |||
8418 | IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec), | |||
8419 | NumElts * VT.getScalarSizeInBits()); | |||
8420 | IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT); | |||
8421 | ||||
8422 | // Handle SrcVec that don't match VT type. | |||
8423 | if (SrcVec.getValueSizeInBits() != SizeInBits) { | |||
8424 | if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) { | |||
8425 | // Handle larger SrcVec by treating it as a larger permute. | |||
8426 | unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits; | |||
8427 | VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts); | |||
8428 | IndicesVT = EVT(VT).changeVectorElementTypeToInteger(); | |||
8429 | IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false, | |||
8430 | Subtarget, DAG, SDLoc(IndicesVec)); | |||
8431 | return extractSubVector( | |||
8432 | createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget), 0, | |||
8433 | DAG, DL, SizeInBits); | |||
8434 | } else if (SrcVec.getValueSizeInBits() < SizeInBits) { | |||
8435 | // Widen smaller SrcVec to match VT. | |||
8436 | SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec)); | |||
8437 | } else | |||
8438 | return SDValue(); | |||
8439 | } | |||
8440 | ||||
8441 | auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) { | |||
8442 | assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale")((isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_64(Scale) && \"Illegal variable permute shuffle scale\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8442, __PRETTY_FUNCTION__)); | |||
8443 | EVT SrcVT = Idx.getValueType(); | |||
8444 | unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale; | |||
8445 | uint64_t IndexScale = 0; | |||
8446 | uint64_t IndexOffset = 0; | |||
8447 | ||||
8448 | // If we're scaling a smaller permute op, then we need to repeat the | |||
8449 | // indices, scaling and offsetting them as well. | |||
8450 | // e.g. v4i32 -> v16i8 (Scale = 4) | |||
8451 | // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4) | |||
8452 | // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0) | |||
8453 | for (uint64_t i = 0; i != Scale; ++i) { | |||
8454 | IndexScale |= Scale << (i * NumDstBits); | |||
8455 | IndexOffset |= i << (i * NumDstBits); | |||
8456 | } | |||
8457 | ||||
8458 | Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx, | |||
8459 | DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT)); | |||
8460 | Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx, | |||
8461 | DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT)); | |||
8462 | return Idx; | |||
8463 | }; | |||
8464 | ||||
8465 | unsigned Opcode = 0; | |||
8466 | switch (VT.SimpleTy) { | |||
8467 | default: | |||
8468 | break; | |||
8469 | case MVT::v16i8: | |||
8470 | if (Subtarget.hasSSSE3()) | |||
8471 | Opcode = X86ISD::PSHUFB; | |||
8472 | break; | |||
8473 | case MVT::v8i16: | |||
8474 | if (Subtarget.hasVLX() && Subtarget.hasBWI()) | |||
8475 | Opcode = X86ISD::VPERMV; | |||
8476 | else if (Subtarget.hasSSSE3()) { | |||
8477 | Opcode = X86ISD::PSHUFB; | |||
8478 | ShuffleVT = MVT::v16i8; | |||
8479 | } | |||
8480 | break; | |||
8481 | case MVT::v4f32: | |||
8482 | case MVT::v4i32: | |||
8483 | if (Subtarget.hasAVX()) { | |||
8484 | Opcode = X86ISD::VPERMILPV; | |||
8485 | ShuffleVT = MVT::v4f32; | |||
8486 | } else if (Subtarget.hasSSSE3()) { | |||
8487 | Opcode = X86ISD::PSHUFB; | |||
8488 | ShuffleVT = MVT::v16i8; | |||
8489 | } | |||
8490 | break; | |||
8491 | case MVT::v2f64: | |||
8492 | case MVT::v2i64: | |||
8493 | if (Subtarget.hasAVX()) { | |||
8494 | // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec. | |||
8495 | IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec); | |||
8496 | Opcode = X86ISD::VPERMILPV; | |||
8497 | ShuffleVT = MVT::v2f64; | |||
8498 | } else if (Subtarget.hasSSE41()) { | |||
8499 | // SSE41 can compare v2i64 - select between indices 0 and 1. | |||
8500 | return DAG.getSelectCC( | |||
8501 | DL, IndicesVec, | |||
8502 | getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL), | |||
8503 | DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}), | |||
8504 | DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}), | |||
8505 | ISD::CondCode::SETEQ); | |||
8506 | } | |||
8507 | break; | |||
8508 | case MVT::v32i8: | |||
8509 | if (Subtarget.hasVLX() && Subtarget.hasVBMI()) | |||
8510 | Opcode = X86ISD::VPERMV; | |||
8511 | else if (Subtarget.hasXOP()) { | |||
8512 | SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL); | |||
8513 | SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL); | |||
8514 | SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL); | |||
8515 | SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL); | |||
8516 | return DAG.getNode( | |||
8517 | ISD::CONCAT_VECTORS, DL, VT, | |||
8518 | DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx), | |||
8519 | DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx)); | |||
8520 | } else if (Subtarget.hasAVX()) { | |||
8521 | SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL); | |||
8522 | SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL); | |||
8523 | SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo); | |||
8524 | SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi); | |||
8525 | auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
8526 | ArrayRef<SDValue> Ops) { | |||
8527 | // Permute Lo and Hi and then select based on index range. | |||
8528 | // This works as SHUFB uses bits[3:0] to permute elements and we don't | |||
8529 | // care about the bit[7] as its just an index vector. | |||
8530 | SDValue Idx = Ops[2]; | |||
8531 | EVT VT = Idx.getValueType(); | |||
8532 | return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT), | |||
8533 | DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx), | |||
8534 | DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx), | |||
8535 | ISD::CondCode::SETGT); | |||
8536 | }; | |||
8537 | SDValue Ops[] = {LoLo, HiHi, IndicesVec}; | |||
8538 | return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops, | |||
8539 | PSHUFBBuilder); | |||
8540 | } | |||
8541 | break; | |||
8542 | case MVT::v16i16: | |||
8543 | if (Subtarget.hasVLX() && Subtarget.hasBWI()) | |||
8544 | Opcode = X86ISD::VPERMV; | |||
8545 | else if (Subtarget.hasAVX()) { | |||
8546 | // Scale to v32i8 and perform as v32i8. | |||
8547 | IndicesVec = ScaleIndices(IndicesVec, 2); | |||
8548 | return DAG.getBitcast( | |||
8549 | VT, createVariablePermute( | |||
8550 | MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec), | |||
8551 | DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget)); | |||
8552 | } | |||
8553 | break; | |||
8554 | case MVT::v8f32: | |||
8555 | case MVT::v8i32: | |||
8556 | if (Subtarget.hasAVX2()) | |||
8557 | Opcode = X86ISD::VPERMV; | |||
8558 | else if (Subtarget.hasAVX()) { | |||
8559 | SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec); | |||
8560 | SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec, | |||
8561 | {0, 1, 2, 3, 0, 1, 2, 3}); | |||
8562 | SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec, | |||
8563 | {4, 5, 6, 7, 4, 5, 6, 7}); | |||
8564 | if (Subtarget.hasXOP()) | |||
8565 | return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, | |||
8566 | LoLo, HiHi, IndicesVec, | |||
8567 | DAG.getConstant(0, DL, MVT::i8))); | |||
8568 | // Permute Lo and Hi and then select based on index range. | |||
8569 | // This works as VPERMILPS only uses index bits[0:1] to permute elements. | |||
8570 | SDValue Res = DAG.getSelectCC( | |||
8571 | DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32), | |||
8572 | DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec), | |||
8573 | DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec), | |||
8574 | ISD::CondCode::SETGT); | |||
8575 | return DAG.getBitcast(VT, Res); | |||
8576 | } | |||
8577 | break; | |||
8578 | case MVT::v4i64: | |||
8579 | case MVT::v4f64: | |||
8580 | if (Subtarget.hasAVX512()) { | |||
8581 | if (!Subtarget.hasVLX()) { | |||
8582 | MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8); | |||
8583 | SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG, | |||
8584 | SDLoc(SrcVec)); | |||
8585 | IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget, | |||
8586 | DAG, SDLoc(IndicesVec)); | |||
8587 | SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL, | |||
8588 | DAG, Subtarget); | |||
8589 | return extract256BitVector(Res, 0, DAG, DL); | |||
8590 | } | |||
8591 | Opcode = X86ISD::VPERMV; | |||
8592 | } else if (Subtarget.hasAVX()) { | |||
8593 | SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec); | |||
8594 | SDValue LoLo = | |||
8595 | DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1}); | |||
8596 | SDValue HiHi = | |||
8597 | DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3}); | |||
8598 | // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec. | |||
8599 | IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec); | |||
8600 | if (Subtarget.hasXOP()) | |||
8601 | return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, | |||
8602 | LoLo, HiHi, IndicesVec, | |||
8603 | DAG.getConstant(0, DL, MVT::i8))); | |||
8604 | // Permute Lo and Hi and then select based on index range. | |||
8605 | // This works as VPERMILPD only uses index bit[1] to permute elements. | |||
8606 | SDValue Res = DAG.getSelectCC( | |||
8607 | DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64), | |||
8608 | DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec), | |||
8609 | DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec), | |||
8610 | ISD::CondCode::SETGT); | |||
8611 | return DAG.getBitcast(VT, Res); | |||
8612 | } | |||
8613 | break; | |||
8614 | case MVT::v64i8: | |||
8615 | if (Subtarget.hasVBMI()) | |||
8616 | Opcode = X86ISD::VPERMV; | |||
8617 | break; | |||
8618 | case MVT::v32i16: | |||
8619 | if (Subtarget.hasBWI()) | |||
8620 | Opcode = X86ISD::VPERMV; | |||
8621 | break; | |||
8622 | case MVT::v16f32: | |||
8623 | case MVT::v16i32: | |||
8624 | case MVT::v8f64: | |||
8625 | case MVT::v8i64: | |||
8626 | if (Subtarget.hasAVX512()) | |||
8627 | Opcode = X86ISD::VPERMV; | |||
8628 | break; | |||
8629 | } | |||
8630 | if (!Opcode) | |||
8631 | return SDValue(); | |||
8632 | ||||
8633 | assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&(((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) && (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 && "Illegal variable permute shuffle type") ? static_cast <void> (0) : __assert_fail ("(VT.getSizeInBits() == ShuffleVT.getSizeInBits()) && (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 && \"Illegal variable permute shuffle type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8635, __PRETTY_FUNCTION__)) | |||
8634 | (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&(((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) && (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 && "Illegal variable permute shuffle type") ? static_cast <void> (0) : __assert_fail ("(VT.getSizeInBits() == ShuffleVT.getSizeInBits()) && (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 && \"Illegal variable permute shuffle type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8635, __PRETTY_FUNCTION__)) | |||
8635 | "Illegal variable permute shuffle type")(((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) && (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 && "Illegal variable permute shuffle type") ? static_cast <void> (0) : __assert_fail ("(VT.getSizeInBits() == ShuffleVT.getSizeInBits()) && (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 && \"Illegal variable permute shuffle type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8635, __PRETTY_FUNCTION__)); | |||
8636 | ||||
8637 | uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits(); | |||
8638 | if (Scale > 1) | |||
8639 | IndicesVec = ScaleIndices(IndicesVec, Scale); | |||
8640 | ||||
8641 | EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger(); | |||
8642 | IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec); | |||
8643 | ||||
8644 | SrcVec = DAG.getBitcast(ShuffleVT, SrcVec); | |||
8645 | SDValue Res = Opcode == X86ISD::VPERMV | |||
8646 | ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec) | |||
8647 | : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec); | |||
8648 | return DAG.getBitcast(VT, Res); | |||
8649 | } | |||
8650 | ||||
8651 | // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be | |||
8652 | // reasoned to be a permutation of a vector by indices in a non-constant vector. | |||
8653 | // (build_vector (extract_elt V, (extract_elt I, 0)), | |||
8654 | // (extract_elt V, (extract_elt I, 1)), | |||
8655 | // ... | |||
8656 | // -> | |||
8657 | // (vpermv I, V) | |||
8658 | // | |||
8659 | // TODO: Handle undefs | |||
8660 | // TODO: Utilize pshufb and zero mask blending to support more efficient | |||
8661 | // construction of vectors with constant-0 elements. | |||
8662 | static SDValue | |||
8663 | LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG, | |||
8664 | const X86Subtarget &Subtarget) { | |||
8665 | SDValue SrcVec, IndicesVec; | |||
8666 | // Check for a match of the permute source vector and permute index elements. | |||
8667 | // This is done by checking that the i-th build_vector operand is of the form: | |||
8668 | // (extract_elt SrcVec, (extract_elt IndicesVec, i)). | |||
8669 | for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) { | |||
8670 | SDValue Op = V.getOperand(Idx); | |||
8671 | if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
8672 | return SDValue(); | |||
8673 | ||||
8674 | // If this is the first extract encountered in V, set the source vector, | |||
8675 | // otherwise verify the extract is from the previously defined source | |||
8676 | // vector. | |||
8677 | if (!SrcVec) | |||
8678 | SrcVec = Op.getOperand(0); | |||
8679 | else if (SrcVec != Op.getOperand(0)) | |||
8680 | return SDValue(); | |||
8681 | SDValue ExtractedIndex = Op->getOperand(1); | |||
8682 | // Peek through extends. | |||
8683 | if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND || | |||
8684 | ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND) | |||
8685 | ExtractedIndex = ExtractedIndex.getOperand(0); | |||
8686 | if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
8687 | return SDValue(); | |||
8688 | ||||
8689 | // If this is the first extract from the index vector candidate, set the | |||
8690 | // indices vector, otherwise verify the extract is from the previously | |||
8691 | // defined indices vector. | |||
8692 | if (!IndicesVec) | |||
8693 | IndicesVec = ExtractedIndex.getOperand(0); | |||
8694 | else if (IndicesVec != ExtractedIndex.getOperand(0)) | |||
8695 | return SDValue(); | |||
8696 | ||||
8697 | auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1)); | |||
8698 | if (!PermIdx || PermIdx->getZExtValue() != Idx) | |||
8699 | return SDValue(); | |||
8700 | } | |||
8701 | ||||
8702 | SDLoc DL(V); | |||
8703 | MVT VT = V.getSimpleValueType(); | |||
8704 | return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget); | |||
8705 | } | |||
8706 | ||||
8707 | SDValue | |||
8708 | X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { | |||
8709 | SDLoc dl(Op); | |||
8710 | ||||
8711 | MVT VT = Op.getSimpleValueType(); | |||
8712 | MVT EltVT = VT.getVectorElementType(); | |||
8713 | unsigned NumElems = Op.getNumOperands(); | |||
8714 | ||||
8715 | // Generate vectors for predicate vectors. | |||
8716 | if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) | |||
| ||||
8717 | return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget); | |||
8718 | ||||
8719 | if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget)) | |||
8720 | return VectorConstant; | |||
8721 | ||||
8722 | BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode()); | |||
8723 | if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG)) | |||
8724 | return AddSub; | |||
8725 | if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG)) | |||
8726 | return HorizontalOp; | |||
8727 | if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG)) | |||
8728 | return Broadcast; | |||
8729 | if (SDValue BitOp = lowerBuildVectorToBitOp(BV, DAG)) | |||
8730 | return BitOp; | |||
8731 | ||||
8732 | unsigned EVTBits = EltVT.getSizeInBits(); | |||
8733 | ||||
8734 | unsigned NumZero = 0; | |||
8735 | unsigned NumNonZero = 0; | |||
8736 | uint64_t NonZeros = 0; | |||
8737 | bool IsAllConstants = true; | |||
8738 | SmallSet<SDValue, 8> Values; | |||
8739 | unsigned NumConstants = NumElems; | |||
8740 | for (unsigned i = 0; i < NumElems; ++i) { | |||
8741 | SDValue Elt = Op.getOperand(i); | |||
8742 | if (Elt.isUndef()) | |||
8743 | continue; | |||
8744 | Values.insert(Elt); | |||
8745 | if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) { | |||
8746 | IsAllConstants = false; | |||
8747 | NumConstants--; | |||
8748 | } | |||
8749 | if (X86::isZeroNode(Elt)) | |||
8750 | NumZero++; | |||
8751 | else { | |||
8752 | assert(i < sizeof(NonZeros) * 8)((i < sizeof(NonZeros) * 8) ? static_cast<void> (0) : __assert_fail ("i < sizeof(NonZeros) * 8", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8752, __PRETTY_FUNCTION__)); // Make sure the shift is within range. | |||
8753 | NonZeros |= ((uint64_t)1 << i); | |||
8754 | NumNonZero++; | |||
8755 | } | |||
8756 | } | |||
8757 | ||||
8758 | // All undef vector. Return an UNDEF. All zero vectors were handled above. | |||
8759 | if (NumNonZero == 0) | |||
8760 | return DAG.getUNDEF(VT); | |||
8761 | ||||
8762 | // If we are inserting one variable into a vector of non-zero constants, try | |||
8763 | // to avoid loading each constant element as a scalar. Load the constants as a | |||
8764 | // vector and then insert the variable scalar element. If insertion is not | |||
8765 | // supported, fall back to a shuffle to get the scalar blended with the | |||
8766 | // constants. Insertion into a zero vector is handled as a special-case | |||
8767 | // somewhere below here. | |||
8768 | if (NumConstants == NumElems - 1 && NumNonZero != 1 && | |||
8769 | (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) || | |||
8770 | isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) { | |||
8771 | // Create an all-constant vector. The variable element in the old | |||
8772 | // build vector is replaced by undef in the constant vector. Save the | |||
8773 | // variable scalar element and its index for use in the insertelement. | |||
8774 | LLVMContext &Context = *DAG.getContext(); | |||
8775 | Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context); | |||
8776 | SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType)); | |||
8777 | SDValue VarElt; | |||
8778 | SDValue InsIndex; | |||
8779 | for (unsigned i = 0; i != NumElems; ++i) { | |||
8780 | SDValue Elt = Op.getOperand(i); | |||
8781 | if (auto *C = dyn_cast<ConstantSDNode>(Elt)) | |||
8782 | ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue()); | |||
8783 | else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt)) | |||
8784 | ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF()); | |||
8785 | else if (!Elt.isUndef()) { | |||
8786 | assert(!VarElt.getNode() && !InsIndex.getNode() &&((!VarElt.getNode() && !InsIndex.getNode() && "Expected one variable element in this vector") ? static_cast <void> (0) : __assert_fail ("!VarElt.getNode() && !InsIndex.getNode() && \"Expected one variable element in this vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8787, __PRETTY_FUNCTION__)) | |||
8787 | "Expected one variable element in this vector")((!VarElt.getNode() && !InsIndex.getNode() && "Expected one variable element in this vector") ? static_cast <void> (0) : __assert_fail ("!VarElt.getNode() && !InsIndex.getNode() && \"Expected one variable element in this vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8787, __PRETTY_FUNCTION__)); | |||
8788 | VarElt = Elt; | |||
8789 | InsIndex = DAG.getConstant(i, dl, getVectorIdxTy(DAG.getDataLayout())); | |||
8790 | } | |||
8791 | } | |||
8792 | Constant *CV = ConstantVector::get(ConstVecOps); | |||
8793 | SDValue DAGConstVec = DAG.getConstantPool(CV, VT); | |||
8794 | ||||
8795 | // The constants we just created may not be legal (eg, floating point). We | |||
8796 | // must lower the vector right here because we can not guarantee that we'll | |||
8797 | // legalize it before loading it. This is also why we could not just create | |||
8798 | // a new build vector here. If the build vector contains illegal constants, | |||
8799 | // it could get split back up into a series of insert elements. | |||
8800 | // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD. | |||
8801 | SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG); | |||
8802 | MachineFunction &MF = DAG.getMachineFunction(); | |||
8803 | MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF); | |||
8804 | SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI); | |||
8805 | unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue(); | |||
8806 | unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits(); | |||
8807 | if (InsertC < NumEltsInLow128Bits) | |||
8808 | return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex); | |||
8809 | ||||
8810 | // There's no good way to insert into the high elements of a >128-bit | |||
8811 | // vector, so use shuffles to avoid an extract/insert sequence. | |||
8812 | assert(VT.getSizeInBits() > 128 && "Invalid insertion index?")((VT.getSizeInBits() > 128 && "Invalid insertion index?" ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() > 128 && \"Invalid insertion index?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8812, __PRETTY_FUNCTION__)); | |||
8813 | assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector")((Subtarget.hasAVX() && "Must have AVX with >16-byte vector" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX() && \"Must have AVX with >16-byte vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8813, __PRETTY_FUNCTION__)); | |||
8814 | SmallVector<int, 8> ShuffleMask; | |||
8815 | unsigned NumElts = VT.getVectorNumElements(); | |||
8816 | for (unsigned i = 0; i != NumElts; ++i) | |||
8817 | ShuffleMask.push_back(i == InsertC ? NumElts : i); | |||
8818 | SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt); | |||
8819 | return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask); | |||
8820 | } | |||
8821 | ||||
8822 | // Special case for single non-zero, non-undef, element. | |||
8823 | if (NumNonZero == 1) { | |||
8824 | unsigned Idx = countTrailingZeros(NonZeros); | |||
8825 | SDValue Item = Op.getOperand(Idx); | |||
8826 | ||||
8827 | // If we have a constant or non-constant insertion into the low element of | |||
8828 | // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into | |||
8829 | // the rest of the elements. This will be matched as movd/movq/movss/movsd | |||
8830 | // depending on what the source datatype is. | |||
8831 | if (Idx == 0) { | |||
8832 | if (NumZero == 0) | |||
8833 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); | |||
8834 | ||||
8835 | if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 || | |||
8836 | (EltVT == MVT::i64 && Subtarget.is64Bit())) { | |||
8837 | assert((VT.is128BitVector() || VT.is256BitVector() ||(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector ()) && "Expected an SSE value type!") ? static_cast< void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected an SSE value type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8839, __PRETTY_FUNCTION__)) | |||
8838 | VT.is512BitVector()) &&(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector ()) && "Expected an SSE value type!") ? static_cast< void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected an SSE value type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8839, __PRETTY_FUNCTION__)) | |||
8839 | "Expected an SSE value type!")(((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector ()) && "Expected an SSE value type!") ? static_cast< void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) && \"Expected an SSE value type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8839, __PRETTY_FUNCTION__)); | |||
8840 | Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); | |||
8841 | // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector. | |||
8842 | return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); | |||
8843 | } | |||
8844 | ||||
8845 | // We can't directly insert an i8 or i16 into a vector, so zero extend | |||
8846 | // it to i32 first. | |||
8847 | if (EltVT == MVT::i16 || EltVT == MVT::i8) { | |||
8848 | Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item); | |||
8849 | if (VT.getSizeInBits() >= 256) { | |||
8850 | MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32); | |||
8851 | if (Subtarget.hasAVX()) { | |||
8852 | Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item); | |||
8853 | Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); | |||
8854 | } else { | |||
8855 | // Without AVX, we need to extend to a 128-bit vector and then | |||
8856 | // insert into the 256-bit vector. | |||
8857 | Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); | |||
8858 | SDValue ZeroVec = getZeroVector(ShufVT, Subtarget, DAG, dl); | |||
8859 | Item = insert128BitVector(ZeroVec, Item, 0, DAG, dl); | |||
8860 | } | |||
8861 | } else { | |||
8862 | assert(VT.is128BitVector() && "Expected an SSE value type!")((VT.is128BitVector() && "Expected an SSE value type!" ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Expected an SSE value type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 8862, __PRETTY_FUNCTION__)); | |||
8863 | Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item); | |||
8864 | Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG); | |||
8865 | } | |||
8866 | return DAG.getBitcast(VT, Item); | |||
8867 | } | |||
8868 | } | |||
8869 | ||||
8870 | // Is it a vector logical left shift? | |||
8871 | if (NumElems == 2 && Idx == 1 && | |||
8872 | X86::isZeroNode(Op.getOperand(0)) && | |||
8873 | !X86::isZeroNode(Op.getOperand(1))) { | |||
8874 | unsigned NumBits = VT.getSizeInBits(); | |||
8875 | return getVShift(true, VT, | |||
8876 | DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, | |||
8877 | VT, Op.getOperand(1)), | |||
8878 | NumBits/2, DAG, *this, dl); | |||
8879 | } | |||
8880 | ||||
8881 | if (IsAllConstants) // Otherwise, it's better to do a constpool load. | |||
8882 | return SDValue(); | |||
8883 | ||||
8884 | // Otherwise, if this is a vector with i32 or f32 elements, and the element | |||
8885 | // is a non-constant being inserted into an element other than the low one, | |||
8886 | // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka | |||
8887 | // movd/movss) to move this into the low element, then shuffle it into | |||
8888 | // place. | |||
8889 | if (EVTBits == 32) { | |||
8890 | Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item); | |||
8891 | return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG); | |||
8892 | } | |||
8893 | } | |||
8894 | ||||
8895 | // Splat is obviously ok. Let legalizer expand it to a shuffle. | |||
8896 | if (Values.size() == 1) { | |||
8897 | if (EVTBits == 32) { | |||
8898 | // Instead of a shuffle like this: | |||
8899 | // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0> | |||
8900 | // Check if it's possible to issue this instead. | |||
8901 | // shuffle (vload ptr)), undef, <1, 1, 1, 1> | |||
8902 | unsigned Idx = countTrailingZeros(NonZeros); | |||
8903 | SDValue Item = Op.getOperand(Idx); | |||
8904 | if (Op.getNode()->isOnlyUserOf(Item.getNode())) | |||
8905 | return LowerAsSplatVectorLoad(Item, VT, dl, DAG); | |||
8906 | } | |||
8907 | return SDValue(); | |||
8908 | } | |||
8909 | ||||
8910 | // A vector full of immediates; various special cases are already | |||
8911 | // handled, so this is best done with a single constant-pool load. | |||
8912 | if (IsAllConstants) | |||
8913 | return SDValue(); | |||
8914 | ||||
8915 | if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget)) | |||
8916 | return V; | |||
8917 | ||||
8918 | // See if we can use a vector load to get all of the elements. | |||
8919 | { | |||
8920 | SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems); | |||
8921 | if (SDValue LD = | |||
8922 | EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false)) | |||
8923 | return LD; | |||
8924 | } | |||
8925 | ||||
8926 | // If this is a splat of pairs of 32-bit elements, we can use a narrower | |||
8927 | // build_vector and broadcast it. | |||
8928 | // TODO: We could probably generalize this more. | |||
8929 | if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) { | |||
8930 | SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1), | |||
8931 | DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) }; | |||
8932 | auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) { | |||
8933 | // Make sure all the even/odd operands match. | |||
8934 | for (unsigned i = 2; i != NumElems; ++i) | |||
8935 | if (Ops[i % 2] != Op.getOperand(i)) | |||
8936 | return false; | |||
8937 | return true; | |||
8938 | }; | |||
8939 | if (CanSplat(Op, NumElems, Ops)) { | |||
8940 | MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64; | |||
8941 | MVT NarrowVT = MVT::getVectorVT(EltVT, 4); | |||
8942 | // Create a new build vector and cast to v2i64/v2f64. | |||
8943 | SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2), | |||
8944 | DAG.getBuildVector(NarrowVT, dl, Ops)); | |||
8945 | // Broadcast from v2i64/v2f64 and cast to final VT. | |||
8946 | MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2); | |||
8947 | return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT, | |||
8948 | NewBV)); | |||
8949 | } | |||
8950 | } | |||
8951 | ||||
8952 | // For AVX-length vectors, build the individual 128-bit pieces and use | |||
8953 | // shuffles to put them in place. | |||
8954 | if (VT.getSizeInBits() > 128) { | |||
8955 | MVT HVT = MVT::getVectorVT(EltVT, NumElems/2); | |||
8956 | ||||
8957 | // Build both the lower and upper subvector. | |||
8958 | SDValue Lower = | |||
8959 | DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2)); | |||
8960 | SDValue Upper = DAG.getBuildVector( | |||
8961 | HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2)); | |||
8962 | ||||
8963 | // Recreate the wider vector with the lower and upper part. | |||
8964 | return concatSubVectors(Lower, Upper, VT, NumElems, DAG, dl, | |||
8965 | VT.getSizeInBits() / 2); | |||
8966 | } | |||
8967 | ||||
8968 | // Let legalizer expand 2-wide build_vectors. | |||
8969 | if (EVTBits == 64) { | |||
8970 | if (NumNonZero == 1) { | |||
8971 | // One half is zero or undef. | |||
8972 | unsigned Idx = countTrailingZeros(NonZeros); | |||
8973 | SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, | |||
8974 | Op.getOperand(Idx)); | |||
8975 | return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG); | |||
8976 | } | |||
8977 | return SDValue(); | |||
8978 | } | |||
8979 | ||||
8980 | // If element VT is < 32 bits, convert it to inserts into a zero vector. | |||
8981 | if (EVTBits == 8 && NumElems == 16) | |||
8982 | if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero, | |||
8983 | DAG, Subtarget)) | |||
8984 | return V; | |||
8985 | ||||
8986 | if (EVTBits == 16 && NumElems == 8) | |||
8987 | if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero, | |||
8988 | DAG, Subtarget)) | |||
8989 | return V; | |||
8990 | ||||
8991 | // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS | |||
8992 | if (EVTBits == 32 && NumElems == 4) | |||
8993 | if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget)) | |||
8994 | return V; | |||
8995 | ||||
8996 | // If element VT is == 32 bits, turn it into a number of shuffles. | |||
8997 | if (NumElems == 4 && NumZero > 0) { | |||
8998 | SmallVector<SDValue, 8> Ops(NumElems); | |||
8999 | for (unsigned i = 0; i < 4; ++i) { | |||
9000 | bool isZero = !(NonZeros & (1ULL << i)); | |||
9001 | if (isZero) | |||
9002 | Ops[i] = getZeroVector(VT, Subtarget, DAG, dl); | |||
9003 | else | |||
9004 | Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); | |||
9005 | } | |||
9006 | ||||
9007 | for (unsigned i = 0; i < 2; ++i) { | |||
9008 | switch ((NonZeros >> (i*2)) & 0x3) { | |||
9009 | default: llvm_unreachable("Unexpected NonZero count")::llvm::llvm_unreachable_internal("Unexpected NonZero count", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9009); | |||
9010 | case 0: | |||
9011 | Ops[i] = Ops[i*2]; // Must be a zero vector. | |||
9012 | break; | |||
9013 | case 1: | |||
9014 | Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]); | |||
9015 | break; | |||
9016 | case 2: | |||
9017 | Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]); | |||
9018 | break; | |||
9019 | case 3: | |||
9020 | Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]); | |||
9021 | break; | |||
9022 | } | |||
9023 | } | |||
9024 | ||||
9025 | bool Reverse1 = (NonZeros & 0x3) == 2; | |||
9026 | bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2; | |||
9027 | int MaskVec[] = { | |||
9028 | Reverse1 ? 1 : 0, | |||
9029 | Reverse1 ? 0 : 1, | |||
9030 | static_cast<int>(Reverse2 ? NumElems+1 : NumElems), | |||
9031 | static_cast<int>(Reverse2 ? NumElems : NumElems+1) | |||
9032 | }; | |||
9033 | return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec); | |||
9034 | } | |||
9035 | ||||
9036 | assert(Values.size() > 1 && "Expected non-undef and non-splat vector")((Values.size() > 1 && "Expected non-undef and non-splat vector" ) ? static_cast<void> (0) : __assert_fail ("Values.size() > 1 && \"Expected non-undef and non-splat vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9036, __PRETTY_FUNCTION__)); | |||
9037 | ||||
9038 | // Check for a build vector from mostly shuffle plus few inserting. | |||
9039 | if (SDValue Sh = buildFromShuffleMostly(Op, DAG)) | |||
9040 | return Sh; | |||
9041 | ||||
9042 | // For SSE 4.1, use insertps to put the high elements into the low element. | |||
9043 | if (Subtarget.hasSSE41()) { | |||
9044 | SDValue Result; | |||
9045 | if (!Op.getOperand(0).isUndef()) | |||
9046 | Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); | |||
9047 | else | |||
9048 | Result = DAG.getUNDEF(VT); | |||
9049 | ||||
9050 | for (unsigned i = 1; i < NumElems; ++i) { | |||
9051 | if (Op.getOperand(i).isUndef()) continue; | |||
9052 | Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, | |||
9053 | Op.getOperand(i), DAG.getIntPtrConstant(i, dl)); | |||
9054 | } | |||
9055 | return Result; | |||
9056 | } | |||
9057 | ||||
9058 | // Otherwise, expand into a number of unpckl*, start by extending each of | |||
9059 | // our (non-undef) elements to the full vector width with the element in the | |||
9060 | // bottom slot of the vector (which generates no code for SSE). | |||
9061 | SmallVector<SDValue, 8> Ops(NumElems); | |||
9062 | for (unsigned i = 0; i < NumElems; ++i) { | |||
9063 | if (!Op.getOperand(i).isUndef()) | |||
9064 | Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); | |||
9065 | else | |||
9066 | Ops[i] = DAG.getUNDEF(VT); | |||
9067 | } | |||
9068 | ||||
9069 | // Next, we iteratively mix elements, e.g. for v4f32: | |||
9070 | // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0> | |||
9071 | // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2> | |||
9072 | // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0> | |||
9073 | for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) { | |||
9074 | // Generate scaled UNPCKL shuffle mask. | |||
9075 | SmallVector<int, 16> Mask; | |||
9076 | for(unsigned i = 0; i != Scale; ++i) | |||
9077 | Mask.push_back(i); | |||
9078 | for (unsigned i = 0; i != Scale; ++i) | |||
9079 | Mask.push_back(NumElems+i); | |||
9080 | Mask.append(NumElems - Mask.size(), SM_SentinelUndef); | |||
9081 | ||||
9082 | for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i) | |||
9083 | Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask); | |||
9084 | } | |||
9085 | return Ops[0]; | |||
9086 | } | |||
9087 | ||||
9088 | // 256-bit AVX can use the vinsertf128 instruction | |||
9089 | // to create 256-bit vectors from two other 128-bit ones. | |||
9090 | // TODO: Detect subvector broadcast here instead of DAG combine? | |||
9091 | static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG, | |||
9092 | const X86Subtarget &Subtarget) { | |||
9093 | SDLoc dl(Op); | |||
9094 | MVT ResVT = Op.getSimpleValueType(); | |||
9095 | ||||
9096 | assert((ResVT.is256BitVector() ||(((ResVT.is256BitVector() || ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide") ? static_cast<void > (0) : __assert_fail ("(ResVT.is256BitVector() || ResVT.is512BitVector()) && \"Value type must be 256-/512-bit wide\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9097, __PRETTY_FUNCTION__)) | |||
9097 | ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide")(((ResVT.is256BitVector() || ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide") ? static_cast<void > (0) : __assert_fail ("(ResVT.is256BitVector() || ResVT.is512BitVector()) && \"Value type must be 256-/512-bit wide\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9097, __PRETTY_FUNCTION__)); | |||
9098 | ||||
9099 | unsigned NumOperands = Op.getNumOperands(); | |||
9100 | unsigned NumZero = 0; | |||
9101 | unsigned NumNonZero = 0; | |||
9102 | unsigned NonZeros = 0; | |||
9103 | for (unsigned i = 0; i != NumOperands; ++i) { | |||
9104 | SDValue SubVec = Op.getOperand(i); | |||
9105 | if (SubVec.isUndef()) | |||
9106 | continue; | |||
9107 | if (ISD::isBuildVectorAllZeros(SubVec.getNode())) | |||
9108 | ++NumZero; | |||
9109 | else { | |||
9110 | assert(i < sizeof(NonZeros) * CHAR_BIT)((i < sizeof(NonZeros) * 8) ? static_cast<void> (0) : __assert_fail ("i < sizeof(NonZeros) * CHAR_BIT", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9110, __PRETTY_FUNCTION__)); // Ensure the shift is in range. | |||
9111 | NonZeros |= 1 << i; | |||
9112 | ++NumNonZero; | |||
9113 | } | |||
9114 | } | |||
9115 | ||||
9116 | // If we have more than 2 non-zeros, build each half separately. | |||
9117 | if (NumNonZero > 2) { | |||
9118 | MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(), | |||
9119 | ResVT.getVectorNumElements()/2); | |||
9120 | ArrayRef<SDUse> Ops = Op->ops(); | |||
9121 | SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, | |||
9122 | Ops.slice(0, NumOperands/2)); | |||
9123 | SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, | |||
9124 | Ops.slice(NumOperands/2)); | |||
9125 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi); | |||
9126 | } | |||
9127 | ||||
9128 | // Otherwise, build it up through insert_subvectors. | |||
9129 | SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl) | |||
9130 | : DAG.getUNDEF(ResVT); | |||
9131 | ||||
9132 | MVT SubVT = Op.getOperand(0).getSimpleValueType(); | |||
9133 | unsigned NumSubElems = SubVT.getVectorNumElements(); | |||
9134 | for (unsigned i = 0; i != NumOperands; ++i) { | |||
9135 | if ((NonZeros & (1 << i)) == 0) | |||
9136 | continue; | |||
9137 | ||||
9138 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, | |||
9139 | Op.getOperand(i), | |||
9140 | DAG.getIntPtrConstant(i * NumSubElems, dl)); | |||
9141 | } | |||
9142 | ||||
9143 | return Vec; | |||
9144 | } | |||
9145 | ||||
9146 | // Return true if all the operands of the given CONCAT_VECTORS node are zeros | |||
9147 | // except for the first one. (CONCAT_VECTORS Op, 0, 0,...,0) | |||
9148 | static bool isExpandWithZeros(const SDValue &Op) { | |||
9149 | assert(Op.getOpcode() == ISD::CONCAT_VECTORS &&((Op.getOpcode() == ISD::CONCAT_VECTORS && "Expand with zeros only possible in CONCAT_VECTORS nodes!" ) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::CONCAT_VECTORS && \"Expand with zeros only possible in CONCAT_VECTORS nodes!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9150, __PRETTY_FUNCTION__)) | |||
9150 | "Expand with zeros only possible in CONCAT_VECTORS nodes!")((Op.getOpcode() == ISD::CONCAT_VECTORS && "Expand with zeros only possible in CONCAT_VECTORS nodes!" ) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::CONCAT_VECTORS && \"Expand with zeros only possible in CONCAT_VECTORS nodes!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9150, __PRETTY_FUNCTION__)); | |||
9151 | ||||
9152 | for (unsigned i = 1; i < Op.getNumOperands(); i++) | |||
9153 | if (!ISD::isBuildVectorAllZeros(Op.getOperand(i).getNode())) | |||
9154 | return false; | |||
9155 | ||||
9156 | return true; | |||
9157 | } | |||
9158 | ||||
9159 | // Returns true if the given node is a type promotion (by concatenating i1 | |||
9160 | // zeros) of the result of a node that already zeros all upper bits of | |||
9161 | // k-register. | |||
9162 | static SDValue isTypePromotionOfi1ZeroUpBits(SDValue Op) { | |||
9163 | unsigned Opc = Op.getOpcode(); | |||
9164 | ||||
9165 | assert(Opc == ISD::CONCAT_VECTORS &&((Opc == ISD::CONCAT_VECTORS && Op.getSimpleValueType ().getVectorElementType() == MVT::i1 && "Unexpected node to check for type promotion!" ) ? static_cast<void> (0) : __assert_fail ("Opc == ISD::CONCAT_VECTORS && Op.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Unexpected node to check for type promotion!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9167, __PRETTY_FUNCTION__)) | |||
9166 | Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&((Opc == ISD::CONCAT_VECTORS && Op.getSimpleValueType ().getVectorElementType() == MVT::i1 && "Unexpected node to check for type promotion!" ) ? static_cast<void> (0) : __assert_fail ("Opc == ISD::CONCAT_VECTORS && Op.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Unexpected node to check for type promotion!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9167, __PRETTY_FUNCTION__)) | |||
9167 | "Unexpected node to check for type promotion!")((Opc == ISD::CONCAT_VECTORS && Op.getSimpleValueType ().getVectorElementType() == MVT::i1 && "Unexpected node to check for type promotion!" ) ? static_cast<void> (0) : __assert_fail ("Opc == ISD::CONCAT_VECTORS && Op.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Unexpected node to check for type promotion!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9167, __PRETTY_FUNCTION__)); | |||
9168 | ||||
9169 | // As long as we are concatenating zeros to the upper part of a previous node | |||
9170 | // result, climb up the tree until a node with different opcode is | |||
9171 | // encountered | |||
9172 | while (Opc == ISD::INSERT_SUBVECTOR || Opc == ISD::CONCAT_VECTORS) { | |||
9173 | if (Opc == ISD::INSERT_SUBVECTOR) { | |||
9174 | if (ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()) && | |||
9175 | Op.getConstantOperandVal(2) == 0) | |||
9176 | Op = Op.getOperand(1); | |||
9177 | else | |||
9178 | return SDValue(); | |||
9179 | } else { // Opc == ISD::CONCAT_VECTORS | |||
9180 | if (isExpandWithZeros(Op)) | |||
9181 | Op = Op.getOperand(0); | |||
9182 | else | |||
9183 | return SDValue(); | |||
9184 | } | |||
9185 | Opc = Op.getOpcode(); | |||
9186 | } | |||
9187 | ||||
9188 | // Check if the first inserted node zeroes the upper bits, or an 'and' result | |||
9189 | // of a node that zeros the upper bits (its masked version). | |||
9190 | if (isMaskedZeroUpperBitsvXi1(Op.getOpcode()) || | |||
9191 | (Op.getOpcode() == ISD::AND && | |||
9192 | (isMaskedZeroUpperBitsvXi1(Op.getOperand(0).getOpcode()) || | |||
9193 | isMaskedZeroUpperBitsvXi1(Op.getOperand(1).getOpcode())))) { | |||
9194 | return Op; | |||
9195 | } | |||
9196 | ||||
9197 | return SDValue(); | |||
9198 | } | |||
9199 | ||||
9200 | // TODO: Merge this with LowerAVXCONCAT_VECTORS? | |||
9201 | static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op, | |||
9202 | const X86Subtarget &Subtarget, | |||
9203 | SelectionDAG & DAG) { | |||
9204 | SDLoc dl(Op); | |||
9205 | MVT ResVT = Op.getSimpleValueType(); | |||
9206 | unsigned NumOperands = Op.getNumOperands(); | |||
9207 | ||||
9208 | assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&((NumOperands > 1 && isPowerOf2_32(NumOperands) && "Unexpected number of operands in CONCAT_VECTORS") ? static_cast <void> (0) : __assert_fail ("NumOperands > 1 && isPowerOf2_32(NumOperands) && \"Unexpected number of operands in CONCAT_VECTORS\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9209, __PRETTY_FUNCTION__)) | |||
9209 | "Unexpected number of operands in CONCAT_VECTORS")((NumOperands > 1 && isPowerOf2_32(NumOperands) && "Unexpected number of operands in CONCAT_VECTORS") ? static_cast <void> (0) : __assert_fail ("NumOperands > 1 && isPowerOf2_32(NumOperands) && \"Unexpected number of operands in CONCAT_VECTORS\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9209, __PRETTY_FUNCTION__)); | |||
9210 | ||||
9211 | // If this node promotes - by concatenating zeroes - the type of the result | |||
9212 | // of a node with instruction that zeroes all upper (irrelevant) bits of the | |||
9213 | // output register, mark it as legal and catch the pattern in instruction | |||
9214 | // selection to avoid emitting extra instructions (for zeroing upper bits). | |||
9215 | if (SDValue Promoted = isTypePromotionOfi1ZeroUpBits(Op)) | |||
9216 | return widenSubVector(ResVT, Promoted, true, Subtarget, DAG, dl); | |||
9217 | ||||
9218 | unsigned NumZero = 0; | |||
9219 | unsigned NumNonZero = 0; | |||
9220 | uint64_t NonZeros = 0; | |||
9221 | for (unsigned i = 0; i != NumOperands; ++i) { | |||
9222 | SDValue SubVec = Op.getOperand(i); | |||
9223 | if (SubVec.isUndef()) | |||
9224 | continue; | |||
9225 | if (ISD::isBuildVectorAllZeros(SubVec.getNode())) | |||
9226 | ++NumZero; | |||
9227 | else { | |||
9228 | assert(i < sizeof(NonZeros) * CHAR_BIT)((i < sizeof(NonZeros) * 8) ? static_cast<void> (0) : __assert_fail ("i < sizeof(NonZeros) * CHAR_BIT", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9228, __PRETTY_FUNCTION__)); // Ensure the shift is in range. | |||
9229 | NonZeros |= (uint64_t)1 << i; | |||
9230 | ++NumNonZero; | |||
9231 | } | |||
9232 | } | |||
9233 | ||||
9234 | ||||
9235 | // If there are zero or one non-zeros we can handle this very simply. | |||
9236 | if (NumNonZero <= 1) { | |||
9237 | SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl) | |||
9238 | : DAG.getUNDEF(ResVT); | |||
9239 | if (!NumNonZero) | |||
9240 | return Vec; | |||
9241 | unsigned Idx = countTrailingZeros(NonZeros); | |||
9242 | SDValue SubVec = Op.getOperand(Idx); | |||
9243 | unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements(); | |||
9244 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec, | |||
9245 | DAG.getIntPtrConstant(Idx * SubVecNumElts, dl)); | |||
9246 | } | |||
9247 | ||||
9248 | if (NumOperands > 2) { | |||
9249 | MVT HalfVT = MVT::getVectorVT(ResVT.getVectorElementType(), | |||
9250 | ResVT.getVectorNumElements()/2); | |||
9251 | ArrayRef<SDUse> Ops = Op->ops(); | |||
9252 | SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, | |||
9253 | Ops.slice(0, NumOperands/2)); | |||
9254 | SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT, | |||
9255 | Ops.slice(NumOperands/2)); | |||
9256 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi); | |||
9257 | } | |||
9258 | ||||
9259 | assert(NumNonZero == 2 && "Simple cases not handled?")((NumNonZero == 2 && "Simple cases not handled?") ? static_cast <void> (0) : __assert_fail ("NumNonZero == 2 && \"Simple cases not handled?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9259, __PRETTY_FUNCTION__)); | |||
9260 | ||||
9261 | if (ResVT.getVectorNumElements() >= 16) | |||
9262 | return Op; // The operation is legal with KUNPCK | |||
9263 | ||||
9264 | SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, | |||
9265 | DAG.getUNDEF(ResVT), Op.getOperand(0), | |||
9266 | DAG.getIntPtrConstant(0, dl)); | |||
9267 | unsigned NumElems = ResVT.getVectorNumElements(); | |||
9268 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1), | |||
9269 | DAG.getIntPtrConstant(NumElems/2, dl)); | |||
9270 | } | |||
9271 | ||||
9272 | static SDValue LowerCONCAT_VECTORS(SDValue Op, | |||
9273 | const X86Subtarget &Subtarget, | |||
9274 | SelectionDAG &DAG) { | |||
9275 | MVT VT = Op.getSimpleValueType(); | |||
9276 | if (VT.getVectorElementType() == MVT::i1) | |||
9277 | return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG); | |||
9278 | ||||
9279 | assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||(((VT.is256BitVector() && Op.getNumOperands() == 2) || (VT.is512BitVector() && (Op.getNumOperands() == 2 || Op.getNumOperands() == 4))) ? static_cast<void> (0) : __assert_fail ("(VT.is256BitVector() && Op.getNumOperands() == 2) || (VT.is512BitVector() && (Op.getNumOperands() == 2 || Op.getNumOperands() == 4))" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9281, __PRETTY_FUNCTION__)) | |||
9280 | (VT.is512BitVector() && (Op.getNumOperands() == 2 ||(((VT.is256BitVector() && Op.getNumOperands() == 2) || (VT.is512BitVector() && (Op.getNumOperands() == 2 || Op.getNumOperands() == 4))) ? static_cast<void> (0) : __assert_fail ("(VT.is256BitVector() && Op.getNumOperands() == 2) || (VT.is512BitVector() && (Op.getNumOperands() == 2 || Op.getNumOperands() == 4))" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9281, __PRETTY_FUNCTION__)) | |||
9281 | Op.getNumOperands() == 4)))(((VT.is256BitVector() && Op.getNumOperands() == 2) || (VT.is512BitVector() && (Op.getNumOperands() == 2 || Op.getNumOperands() == 4))) ? static_cast<void> (0) : __assert_fail ("(VT.is256BitVector() && Op.getNumOperands() == 2) || (VT.is512BitVector() && (Op.getNumOperands() == 2 || Op.getNumOperands() == 4))" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9281, __PRETTY_FUNCTION__)); | |||
9282 | ||||
9283 | // AVX can use the vinsertf128 instruction to create 256-bit vectors | |||
9284 | // from two other 128-bit ones. | |||
9285 | ||||
9286 | // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors | |||
9287 | return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget); | |||
9288 | } | |||
9289 | ||||
9290 | //===----------------------------------------------------------------------===// | |||
9291 | // Vector shuffle lowering | |||
9292 | // | |||
9293 | // This is an experimental code path for lowering vector shuffles on x86. It is | |||
9294 | // designed to handle arbitrary vector shuffles and blends, gracefully | |||
9295 | // degrading performance as necessary. It works hard to recognize idiomatic | |||
9296 | // shuffles and lower them to optimal instruction patterns without leaving | |||
9297 | // a framework that allows reasonably efficient handling of all vector shuffle | |||
9298 | // patterns. | |||
9299 | //===----------------------------------------------------------------------===// | |||
9300 | ||||
9301 | /// Tiny helper function to identify a no-op mask. | |||
9302 | /// | |||
9303 | /// This is a somewhat boring predicate function. It checks whether the mask | |||
9304 | /// array input, which is assumed to be a single-input shuffle mask of the kind | |||
9305 | /// used by the X86 shuffle instructions (not a fully general | |||
9306 | /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an | |||
9307 | /// in-place shuffle are 'no-op's. | |||
9308 | static bool isNoopShuffleMask(ArrayRef<int> Mask) { | |||
9309 | for (int i = 0, Size = Mask.size(); i < Size; ++i) { | |||
9310 | assert(Mask[i] >= -1 && "Out of bound mask element!")((Mask[i] >= -1 && "Out of bound mask element!") ? static_cast<void> (0) : __assert_fail ("Mask[i] >= -1 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9310, __PRETTY_FUNCTION__)); | |||
9311 | if (Mask[i] >= 0 && Mask[i] != i) | |||
9312 | return false; | |||
9313 | } | |||
9314 | return true; | |||
9315 | } | |||
9316 | ||||
9317 | /// Test whether there are elements crossing 128-bit lanes in this | |||
9318 | /// shuffle mask. | |||
9319 | /// | |||
9320 | /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations | |||
9321 | /// and we routinely test for these. | |||
9322 | static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) { | |||
9323 | int LaneSize = 128 / VT.getScalarSizeInBits(); | |||
9324 | int Size = Mask.size(); | |||
9325 | for (int i = 0; i < Size; ++i) | |||
9326 | if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize) | |||
9327 | return true; | |||
9328 | return false; | |||
9329 | } | |||
9330 | ||||
9331 | /// Test whether a shuffle mask is equivalent within each sub-lane. | |||
9332 | /// | |||
9333 | /// This checks a shuffle mask to see if it is performing the same | |||
9334 | /// lane-relative shuffle in each sub-lane. This trivially implies | |||
9335 | /// that it is also not lane-crossing. It may however involve a blend from the | |||
9336 | /// same lane of a second vector. | |||
9337 | /// | |||
9338 | /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is | |||
9339 | /// non-trivial to compute in the face of undef lanes. The representation is | |||
9340 | /// suitable for use with existing 128-bit shuffles as entries from the second | |||
9341 | /// vector have been remapped to [LaneSize, 2*LaneSize). | |||
9342 | static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT, | |||
9343 | ArrayRef<int> Mask, | |||
9344 | SmallVectorImpl<int> &RepeatedMask) { | |||
9345 | auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits(); | |||
9346 | RepeatedMask.assign(LaneSize, -1); | |||
9347 | int Size = Mask.size(); | |||
9348 | for (int i = 0; i < Size; ++i) { | |||
9349 | assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0)((Mask[i] == SM_SentinelUndef || Mask[i] >= 0) ? static_cast <void> (0) : __assert_fail ("Mask[i] == SM_SentinelUndef || Mask[i] >= 0" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9349, __PRETTY_FUNCTION__)); | |||
9350 | if (Mask[i] < 0) | |||
9351 | continue; | |||
9352 | if ((Mask[i] % Size) / LaneSize != i / LaneSize) | |||
9353 | // This entry crosses lanes, so there is no way to model this shuffle. | |||
9354 | return false; | |||
9355 | ||||
9356 | // Ok, handle the in-lane shuffles by detecting if and when they repeat. | |||
9357 | // Adjust second vector indices to start at LaneSize instead of Size. | |||
9358 | int LocalM = Mask[i] < Size ? Mask[i] % LaneSize | |||
9359 | : Mask[i] % LaneSize + LaneSize; | |||
9360 | if (RepeatedMask[i % LaneSize] < 0) | |||
9361 | // This is the first non-undef entry in this slot of a 128-bit lane. | |||
9362 | RepeatedMask[i % LaneSize] = LocalM; | |||
9363 | else if (RepeatedMask[i % LaneSize] != LocalM) | |||
9364 | // Found a mismatch with the repeated mask. | |||
9365 | return false; | |||
9366 | } | |||
9367 | return true; | |||
9368 | } | |||
9369 | ||||
9370 | /// Test whether a shuffle mask is equivalent within each 128-bit lane. | |||
9371 | static bool | |||
9372 | is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask, | |||
9373 | SmallVectorImpl<int> &RepeatedMask) { | |||
9374 | return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask); | |||
9375 | } | |||
9376 | ||||
9377 | static bool | |||
9378 | is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) { | |||
9379 | SmallVector<int, 32> RepeatedMask; | |||
9380 | return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask); | |||
9381 | } | |||
9382 | ||||
9383 | /// Test whether a shuffle mask is equivalent within each 256-bit lane. | |||
9384 | static bool | |||
9385 | is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask, | |||
9386 | SmallVectorImpl<int> &RepeatedMask) { | |||
9387 | return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask); | |||
9388 | } | |||
9389 | ||||
9390 | /// Test whether a target shuffle mask is equivalent within each sub-lane. | |||
9391 | /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero. | |||
9392 | static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT, | |||
9393 | ArrayRef<int> Mask, | |||
9394 | SmallVectorImpl<int> &RepeatedMask) { | |||
9395 | int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits(); | |||
9396 | RepeatedMask.assign(LaneSize, SM_SentinelUndef); | |||
9397 | int Size = Mask.size(); | |||
9398 | for (int i = 0; i < Size; ++i) { | |||
9399 | assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0))((isUndefOrZero(Mask[i]) || (Mask[i] >= 0)) ? static_cast< void> (0) : __assert_fail ("isUndefOrZero(Mask[i]) || (Mask[i] >= 0)" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9399, __PRETTY_FUNCTION__)); | |||
9400 | if (Mask[i] == SM_SentinelUndef) | |||
9401 | continue; | |||
9402 | if (Mask[i] == SM_SentinelZero) { | |||
9403 | if (!isUndefOrZero(RepeatedMask[i % LaneSize])) | |||
9404 | return false; | |||
9405 | RepeatedMask[i % LaneSize] = SM_SentinelZero; | |||
9406 | continue; | |||
9407 | } | |||
9408 | if ((Mask[i] % Size) / LaneSize != i / LaneSize) | |||
9409 | // This entry crosses lanes, so there is no way to model this shuffle. | |||
9410 | return false; | |||
9411 | ||||
9412 | // Ok, handle the in-lane shuffles by detecting if and when they repeat. | |||
9413 | // Adjust second vector indices to start at LaneSize instead of Size. | |||
9414 | int LocalM = | |||
9415 | Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize; | |||
9416 | if (RepeatedMask[i % LaneSize] == SM_SentinelUndef) | |||
9417 | // This is the first non-undef entry in this slot of a 128-bit lane. | |||
9418 | RepeatedMask[i % LaneSize] = LocalM; | |||
9419 | else if (RepeatedMask[i % LaneSize] != LocalM) | |||
9420 | // Found a mismatch with the repeated mask. | |||
9421 | return false; | |||
9422 | } | |||
9423 | return true; | |||
9424 | } | |||
9425 | ||||
9426 | /// Checks whether a shuffle mask is equivalent to an explicit list of | |||
9427 | /// arguments. | |||
9428 | /// | |||
9429 | /// This is a fast way to test a shuffle mask against a fixed pattern: | |||
9430 | /// | |||
9431 | /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... } | |||
9432 | /// | |||
9433 | /// It returns true if the mask is exactly as wide as the argument list, and | |||
9434 | /// each element of the mask is either -1 (signifying undef) or the value given | |||
9435 | /// in the argument. | |||
9436 | static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask, | |||
9437 | ArrayRef<int> ExpectedMask) { | |||
9438 | if (Mask.size() != ExpectedMask.size()) | |||
9439 | return false; | |||
9440 | ||||
9441 | int Size = Mask.size(); | |||
9442 | ||||
9443 | // If the values are build vectors, we can look through them to find | |||
9444 | // equivalent inputs that make the shuffles equivalent. | |||
9445 | auto *BV1 = dyn_cast<BuildVectorSDNode>(V1); | |||
9446 | auto *BV2 = dyn_cast<BuildVectorSDNode>(V2); | |||
9447 | ||||
9448 | for (int i = 0; i < Size; ++i) { | |||
9449 | assert(Mask[i] >= -1 && "Out of bound mask element!")((Mask[i] >= -1 && "Out of bound mask element!") ? static_cast<void> (0) : __assert_fail ("Mask[i] >= -1 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9449, __PRETTY_FUNCTION__)); | |||
9450 | if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) { | |||
9451 | auto *MaskBV = Mask[i] < Size ? BV1 : BV2; | |||
9452 | auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2; | |||
9453 | if (!MaskBV || !ExpectedBV || | |||
9454 | MaskBV->getOperand(Mask[i] % Size) != | |||
9455 | ExpectedBV->getOperand(ExpectedMask[i] % Size)) | |||
9456 | return false; | |||
9457 | } | |||
9458 | } | |||
9459 | ||||
9460 | return true; | |||
9461 | } | |||
9462 | ||||
9463 | /// Checks whether a target shuffle mask is equivalent to an explicit pattern. | |||
9464 | /// | |||
9465 | /// The masks must be exactly the same width. | |||
9466 | /// | |||
9467 | /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding | |||
9468 | /// value in ExpectedMask is always accepted. Otherwise the indices must match. | |||
9469 | /// | |||
9470 | /// SM_SentinelZero is accepted as a valid negative index but must match in both. | |||
9471 | static bool isTargetShuffleEquivalent(ArrayRef<int> Mask, | |||
9472 | ArrayRef<int> ExpectedMask) { | |||
9473 | int Size = Mask.size(); | |||
9474 | if (Size != (int)ExpectedMask.size()) | |||
9475 | return false; | |||
9476 | ||||
9477 | for (int i = 0; i < Size; ++i) | |||
9478 | if (Mask[i] == SM_SentinelUndef) | |||
9479 | continue; | |||
9480 | else if (Mask[i] < 0 && Mask[i] != SM_SentinelZero) | |||
9481 | return false; | |||
9482 | else if (Mask[i] != ExpectedMask[i]) | |||
9483 | return false; | |||
9484 | ||||
9485 | return true; | |||
9486 | } | |||
9487 | ||||
9488 | // Merges a general DAG shuffle mask and zeroable bit mask into a target shuffle | |||
9489 | // mask. | |||
9490 | static SmallVector<int, 64> createTargetShuffleMask(ArrayRef<int> Mask, | |||
9491 | const APInt &Zeroable) { | |||
9492 | int NumElts = Mask.size(); | |||
9493 | assert(NumElts == (int)Zeroable.getBitWidth() && "Mismatch mask sizes")((NumElts == (int)Zeroable.getBitWidth() && "Mismatch mask sizes" ) ? static_cast<void> (0) : __assert_fail ("NumElts == (int)Zeroable.getBitWidth() && \"Mismatch mask sizes\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9493, __PRETTY_FUNCTION__)); | |||
9494 | ||||
9495 | SmallVector<int, 64> TargetMask(NumElts, SM_SentinelUndef); | |||
9496 | for (int i = 0; i != NumElts; ++i) { | |||
9497 | int M = Mask[i]; | |||
9498 | if (M == SM_SentinelUndef) | |||
9499 | continue; | |||
9500 | assert(0 <= M && M < (2 * NumElts) && "Out of range shuffle index")((0 <= M && M < (2 * NumElts) && "Out of range shuffle index" ) ? static_cast<void> (0) : __assert_fail ("0 <= M && M < (2 * NumElts) && \"Out of range shuffle index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9500, __PRETTY_FUNCTION__)); | |||
9501 | TargetMask[i] = (Zeroable[i] ? SM_SentinelZero : M); | |||
9502 | } | |||
9503 | return TargetMask; | |||
9504 | } | |||
9505 | ||||
9506 | // Attempt to create a shuffle mask from a VSELECT condition mask. | |||
9507 | static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask, | |||
9508 | SDValue Cond) { | |||
9509 | if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) | |||
9510 | return false; | |||
9511 | ||||
9512 | unsigned Size = Cond.getValueType().getVectorNumElements(); | |||
9513 | Mask.resize(Size, SM_SentinelUndef); | |||
9514 | ||||
9515 | for (int i = 0; i != (int)Size; ++i) { | |||
9516 | SDValue CondElt = Cond.getOperand(i); | |||
9517 | Mask[i] = i; | |||
9518 | // Arbitrarily choose from the 2nd operand if the select condition element | |||
9519 | // is undef. | |||
9520 | // TODO: Can we do better by matching patterns such as even/odd? | |||
9521 | if (CondElt.isUndef() || isNullConstant(CondElt)) | |||
9522 | Mask[i] += Size; | |||
9523 | } | |||
9524 | ||||
9525 | return true; | |||
9526 | } | |||
9527 | ||||
9528 | // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd | |||
9529 | // instructions. | |||
9530 | static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) { | |||
9531 | if (VT != MVT::v8i32 && VT != MVT::v8f32) | |||
9532 | return false; | |||
9533 | ||||
9534 | SmallVector<int, 8> Unpcklwd; | |||
9535 | createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true, | |||
9536 | /* Unary = */ false); | |||
9537 | SmallVector<int, 8> Unpckhwd; | |||
9538 | createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false, | |||
9539 | /* Unary = */ false); | |||
9540 | bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) || | |||
9541 | isTargetShuffleEquivalent(Mask, Unpckhwd)); | |||
9542 | return IsUnpackwdMask; | |||
9543 | } | |||
9544 | ||||
9545 | /// Get a 4-lane 8-bit shuffle immediate for a mask. | |||
9546 | /// | |||
9547 | /// This helper function produces an 8-bit shuffle immediate corresponding to | |||
9548 | /// the ubiquitous shuffle encoding scheme used in x86 instructions for | |||
9549 | /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for | |||
9550 | /// example. | |||
9551 | /// | |||
9552 | /// NB: We rely heavily on "undef" masks preserving the input lane. | |||
9553 | static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) { | |||
9554 | assert(Mask.size() == 4 && "Only 4-lane shuffle masks")((Mask.size() == 4 && "Only 4-lane shuffle masks") ? static_cast <void> (0) : __assert_fail ("Mask.size() == 4 && \"Only 4-lane shuffle masks\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9554, __PRETTY_FUNCTION__)); | |||
9555 | assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!")((Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!" ) ? static_cast<void> (0) : __assert_fail ("Mask[0] >= -1 && Mask[0] < 4 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9555, __PRETTY_FUNCTION__)); | |||
9556 | assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!")((Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!" ) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= -1 && Mask[1] < 4 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9556, __PRETTY_FUNCTION__)); | |||
9557 | assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!")((Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!" ) ? static_cast<void> (0) : __assert_fail ("Mask[2] >= -1 && Mask[2] < 4 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9557, __PRETTY_FUNCTION__)); | |||
9558 | assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!")((Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!" ) ? static_cast<void> (0) : __assert_fail ("Mask[3] >= -1 && Mask[3] < 4 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9558, __PRETTY_FUNCTION__)); | |||
9559 | ||||
9560 | unsigned Imm = 0; | |||
9561 | Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0; | |||
9562 | Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2; | |||
9563 | Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4; | |||
9564 | Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6; | |||
9565 | return Imm; | |||
9566 | } | |||
9567 | ||||
9568 | static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL, | |||
9569 | SelectionDAG &DAG) { | |||
9570 | return DAG.getConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8); | |||
9571 | } | |||
9572 | ||||
9573 | /// Compute whether each element of a shuffle is zeroable. | |||
9574 | /// | |||
9575 | /// A "zeroable" vector shuffle element is one which can be lowered to zero. | |||
9576 | /// Either it is an undef element in the shuffle mask, the element of the input | |||
9577 | /// referenced is undef, or the element of the input referenced is known to be | |||
9578 | /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle | |||
9579 | /// as many lanes with this technique as possible to simplify the remaining | |||
9580 | /// shuffle. | |||
9581 | static APInt computeZeroableShuffleElements(ArrayRef<int> Mask, | |||
9582 | SDValue V1, SDValue V2) { | |||
9583 | APInt Zeroable(Mask.size(), 0); | |||
9584 | V1 = peekThroughBitcasts(V1); | |||
9585 | V2 = peekThroughBitcasts(V2); | |||
9586 | ||||
9587 | bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode()); | |||
9588 | bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode()); | |||
9589 | ||||
9590 | int VectorSizeInBits = V1.getValueSizeInBits(); | |||
9591 | int ScalarSizeInBits = VectorSizeInBits / Mask.size(); | |||
9592 | assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size")((!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size" ) ? static_cast<void> (0) : __assert_fail ("!(VectorSizeInBits % ScalarSizeInBits) && \"Illegal shuffle mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9592, __PRETTY_FUNCTION__)); | |||
9593 | ||||
9594 | for (int i = 0, Size = Mask.size(); i < Size; ++i) { | |||
9595 | int M = Mask[i]; | |||
9596 | // Handle the easy cases. | |||
9597 | if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) { | |||
9598 | Zeroable.setBit(i); | |||
9599 | continue; | |||
9600 | } | |||
9601 | ||||
9602 | // Determine shuffle input and normalize the mask. | |||
9603 | SDValue V = M < Size ? V1 : V2; | |||
9604 | M %= Size; | |||
9605 | ||||
9606 | // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements. | |||
9607 | if (V.getOpcode() != ISD::BUILD_VECTOR) | |||
9608 | continue; | |||
9609 | ||||
9610 | // If the BUILD_VECTOR has fewer elements then the bitcasted portion of | |||
9611 | // the (larger) source element must be UNDEF/ZERO. | |||
9612 | if ((Size % V.getNumOperands()) == 0) { | |||
9613 | int Scale = Size / V->getNumOperands(); | |||
9614 | SDValue Op = V.getOperand(M / Scale); | |||
9615 | if (Op.isUndef() || X86::isZeroNode(Op)) | |||
9616 | Zeroable.setBit(i); | |||
9617 | else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) { | |||
9618 | APInt Val = Cst->getAPIntValue(); | |||
9619 | Val.lshrInPlace((M % Scale) * ScalarSizeInBits); | |||
9620 | Val = Val.getLoBits(ScalarSizeInBits); | |||
9621 | if (Val == 0) | |||
9622 | Zeroable.setBit(i); | |||
9623 | } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) { | |||
9624 | APInt Val = Cst->getValueAPF().bitcastToAPInt(); | |||
9625 | Val.lshrInPlace((M % Scale) * ScalarSizeInBits); | |||
9626 | Val = Val.getLoBits(ScalarSizeInBits); | |||
9627 | if (Val == 0) | |||
9628 | Zeroable.setBit(i); | |||
9629 | } | |||
9630 | continue; | |||
9631 | } | |||
9632 | ||||
9633 | // If the BUILD_VECTOR has more elements then all the (smaller) source | |||
9634 | // elements must be UNDEF or ZERO. | |||
9635 | if ((V.getNumOperands() % Size) == 0) { | |||
9636 | int Scale = V->getNumOperands() / Size; | |||
9637 | bool AllZeroable = true; | |||
9638 | for (int j = 0; j < Scale; ++j) { | |||
9639 | SDValue Op = V.getOperand((M * Scale) + j); | |||
9640 | AllZeroable &= (Op.isUndef() || X86::isZeroNode(Op)); | |||
9641 | } | |||
9642 | if (AllZeroable) | |||
9643 | Zeroable.setBit(i); | |||
9644 | continue; | |||
9645 | } | |||
9646 | } | |||
9647 | ||||
9648 | return Zeroable; | |||
9649 | } | |||
9650 | ||||
9651 | // The Shuffle result is as follow: | |||
9652 | // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order. | |||
9653 | // Each Zeroable's element correspond to a particular Mask's element. | |||
9654 | // As described in computeZeroableShuffleElements function. | |||
9655 | // | |||
9656 | // The function looks for a sub-mask that the nonzero elements are in | |||
9657 | // increasing order. If such sub-mask exist. The function returns true. | |||
9658 | static bool isNonZeroElementsInOrder(const APInt &Zeroable, | |||
9659 | ArrayRef<int> Mask, const EVT &VectorType, | |||
9660 | bool &IsZeroSideLeft) { | |||
9661 | int NextElement = -1; | |||
9662 | // Check if the Mask's nonzero elements are in increasing order. | |||
9663 | for (int i = 0, e = Mask.size(); i < e; i++) { | |||
9664 | // Checks if the mask's zeros elements are built from only zeros. | |||
9665 | assert(Mask[i] >= -1 && "Out of bound mask element!")((Mask[i] >= -1 && "Out of bound mask element!") ? static_cast<void> (0) : __assert_fail ("Mask[i] >= -1 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9665, __PRETTY_FUNCTION__)); | |||
9666 | if (Mask[i] < 0) | |||
9667 | return false; | |||
9668 | if (Zeroable[i]) | |||
9669 | continue; | |||
9670 | // Find the lowest non zero element | |||
9671 | if (NextElement < 0) { | |||
9672 | NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0; | |||
9673 | IsZeroSideLeft = NextElement != 0; | |||
9674 | } | |||
9675 | // Exit if the mask's non zero elements are not in increasing order. | |||
9676 | if (NextElement != Mask[i]) | |||
9677 | return false; | |||
9678 | NextElement++; | |||
9679 | } | |||
9680 | return true; | |||
9681 | } | |||
9682 | ||||
9683 | /// Try to lower a shuffle with a single PSHUFB of V1 or V2. | |||
9684 | static SDValue lowerVectorShuffleWithPSHUFB(const SDLoc &DL, MVT VT, | |||
9685 | ArrayRef<int> Mask, SDValue V1, | |||
9686 | SDValue V2, | |||
9687 | const APInt &Zeroable, | |||
9688 | const X86Subtarget &Subtarget, | |||
9689 | SelectionDAG &DAG) { | |||
9690 | int Size = Mask.size(); | |||
9691 | int LaneSize = 128 / VT.getScalarSizeInBits(); | |||
9692 | const int NumBytes = VT.getSizeInBits() / 8; | |||
9693 | const int NumEltBytes = VT.getScalarSizeInBits() / 8; | |||
9694 | ||||
9695 | assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||(((Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget .hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI () && VT.is512BitVector())) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget.hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI() && VT.is512BitVector())" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9697, __PRETTY_FUNCTION__)) | |||
9696 | (Subtarget.hasAVX2() && VT.is256BitVector()) ||(((Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget .hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI () && VT.is512BitVector())) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget.hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI() && VT.is512BitVector())" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9697, __PRETTY_FUNCTION__)) | |||
9697 | (Subtarget.hasBWI() && VT.is512BitVector()))(((Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget .hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI () && VT.is512BitVector())) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasSSSE3() && VT.is128BitVector()) || (Subtarget.hasAVX2() && VT.is256BitVector()) || (Subtarget.hasBWI() && VT.is512BitVector())" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9697, __PRETTY_FUNCTION__)); | |||
9698 | ||||
9699 | SmallVector<SDValue, 64> PSHUFBMask(NumBytes); | |||
9700 | // Sign bit set in i8 mask means zero element. | |||
9701 | SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8); | |||
9702 | ||||
9703 | SDValue V; | |||
9704 | for (int i = 0; i < NumBytes; ++i) { | |||
9705 | int M = Mask[i / NumEltBytes]; | |||
9706 | if (M < 0) { | |||
9707 | PSHUFBMask[i] = DAG.getUNDEF(MVT::i8); | |||
9708 | continue; | |||
9709 | } | |||
9710 | if (Zeroable[i / NumEltBytes]) { | |||
9711 | PSHUFBMask[i] = ZeroMask; | |||
9712 | continue; | |||
9713 | } | |||
9714 | ||||
9715 | // We can only use a single input of V1 or V2. | |||
9716 | SDValue SrcV = (M >= Size ? V2 : V1); | |||
9717 | if (V && V != SrcV) | |||
9718 | return SDValue(); | |||
9719 | V = SrcV; | |||
9720 | M %= Size; | |||
9721 | ||||
9722 | // PSHUFB can't cross lanes, ensure this doesn't happen. | |||
9723 | if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize)) | |||
9724 | return SDValue(); | |||
9725 | ||||
9726 | M = M % LaneSize; | |||
9727 | M = M * NumEltBytes + (i % NumEltBytes); | |||
9728 | PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8); | |||
9729 | } | |||
9730 | assert(V && "Failed to find a source input")((V && "Failed to find a source input") ? static_cast <void> (0) : __assert_fail ("V && \"Failed to find a source input\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9730, __PRETTY_FUNCTION__)); | |||
9731 | ||||
9732 | MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes); | |||
9733 | return DAG.getBitcast( | |||
9734 | VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V), | |||
9735 | DAG.getBuildVector(I8VT, DL, PSHUFBMask))); | |||
9736 | } | |||
9737 | ||||
9738 | static SDValue getMaskNode(SDValue Mask, MVT MaskVT, | |||
9739 | const X86Subtarget &Subtarget, SelectionDAG &DAG, | |||
9740 | const SDLoc &dl); | |||
9741 | ||||
9742 | // X86 has dedicated shuffle that can be lowered to VEXPAND | |||
9743 | static SDValue lowerVectorShuffleToEXPAND(const SDLoc &DL, MVT VT, | |||
9744 | const APInt &Zeroable, | |||
9745 | ArrayRef<int> Mask, SDValue &V1, | |||
9746 | SDValue &V2, SelectionDAG &DAG, | |||
9747 | const X86Subtarget &Subtarget) { | |||
9748 | bool IsLeftZeroSide = true; | |||
9749 | if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(), | |||
9750 | IsLeftZeroSide)) | |||
9751 | return SDValue(); | |||
9752 | unsigned VEXPANDMask = (~Zeroable).getZExtValue(); | |||
9753 | MVT IntegerType = | |||
9754 | MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8)); | |||
9755 | SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType); | |||
9756 | unsigned NumElts = VT.getVectorNumElements(); | |||
9757 | assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&(((NumElts == 4 || NumElts == 8 || NumElts == 16) && "Unexpected number of vector elements" ) ? static_cast<void> (0) : __assert_fail ("(NumElts == 4 || NumElts == 8 || NumElts == 16) && \"Unexpected number of vector elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9758, __PRETTY_FUNCTION__)) | |||
9758 | "Unexpected number of vector elements")(((NumElts == 4 || NumElts == 8 || NumElts == 16) && "Unexpected number of vector elements" ) ? static_cast<void> (0) : __assert_fail ("(NumElts == 4 || NumElts == 8 || NumElts == 16) && \"Unexpected number of vector elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9758, __PRETTY_FUNCTION__)); | |||
9759 | SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts), | |||
9760 | Subtarget, DAG, DL); | |||
9761 | SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL); | |||
9762 | SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1; | |||
9763 | return DAG.getSelect(DL, VT, VMask, | |||
9764 | DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector), | |||
9765 | ZeroVector); | |||
9766 | } | |||
9767 | ||||
9768 | static bool matchVectorShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2, | |||
9769 | unsigned &UnpackOpcode, bool IsUnary, | |||
9770 | ArrayRef<int> TargetMask, | |||
9771 | const SDLoc &DL, SelectionDAG &DAG, | |||
9772 | const X86Subtarget &Subtarget) { | |||
9773 | int NumElts = VT.getVectorNumElements(); | |||
9774 | ||||
9775 | bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true; | |||
9776 | for (int i = 0; i != NumElts; i += 2) { | |||
9777 | int M1 = TargetMask[i + 0]; | |||
9778 | int M2 = TargetMask[i + 1]; | |||
9779 | Undef1 &= (SM_SentinelUndef == M1); | |||
9780 | Undef2 &= (SM_SentinelUndef == M2); | |||
9781 | Zero1 &= isUndefOrZero(M1); | |||
9782 | Zero2 &= isUndefOrZero(M2); | |||
9783 | } | |||
9784 | assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&((!((Undef1 || Zero1) && (Undef2 || Zero2)) && "Zeroable shuffle detected") ? static_cast<void> (0) : __assert_fail ("!((Undef1 || Zero1) && (Undef2 || Zero2)) && \"Zeroable shuffle detected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9785, __PRETTY_FUNCTION__)) | |||
9785 | "Zeroable shuffle detected")((!((Undef1 || Zero1) && (Undef2 || Zero2)) && "Zeroable shuffle detected") ? static_cast<void> (0) : __assert_fail ("!((Undef1 || Zero1) && (Undef2 || Zero2)) && \"Zeroable shuffle detected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 9785, __PRETTY_FUNCTION__)); | |||
9786 | ||||
9787 | // Attempt to match the target mask against the unpack lo/hi mask patterns. | |||
9788 | SmallVector<int, 64> Unpckl, Unpckh; | |||
9789 | createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary); | |||
9790 | if (isTargetShuffleEquivalent(TargetMask, Unpckl)) { | |||
9791 | UnpackOpcode = X86ISD::UNPCKL; | |||
9792 | V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2)); | |||
9793 | V1 = (Undef1 ? DAG.getUNDEF(VT) : V1); | |||
9794 | return true; | |||
9795 | } | |||
9796 | ||||
9797 | createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary); | |||
9798 | if (isTargetShuffleEquivalent(TargetMask, Unpckh)) { | |||
9799 | UnpackOpcode = X86ISD::UNPCKH; | |||
9800 | V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2)); | |||
9801 | V1 = (Undef1 ? DAG.getUNDEF(VT) : V1); | |||
9802 | return true; | |||
9803 | } | |||
9804 | ||||
9805 | // If an unary shuffle, attempt to match as an unpack lo/hi with zero. | |||
9806 | if (IsUnary && (Zero1 || Zero2)) { | |||
9807 | // Don't bother if we can blend instead. | |||
9808 | if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) && | |||
9809 | isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0)) | |||
9810 | return false; | |||
9811 | ||||
9812 | bool MatchLo = true, MatchHi = true; | |||
9813 | for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) { | |||
9814 | int M = TargetMask[i]; | |||
9815 | ||||
9816 | // Ignore if the input is known to be zero or the index is undef. | |||
9817 | if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) || | |||
9818 | (M == SM_SentinelUndef)) | |||
9819 | continue; | |||
9820 | ||||
9821 | MatchLo &= (M == Unpckl[i]); | |||
9822 | MatchHi &= (M == Unpckh[i]); | |||
9823 | } | |||
9824 | ||||
9825 | if (MatchLo || MatchHi) { | |||
9826 | UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH; | |||
9827 | V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1; | |||
9828 | V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1; | |||
9829 | return true; | |||
9830 | } | |||
9831 | } | |||
9832 | ||||
9833 | // If a binary shuffle, commute and try again. | |||
9834 | if (!IsUnary) { | |||
9835 | ShuffleVectorSDNode::commuteMask(Unpckl); | |||
9836 | if (isTargetShuffleEquivalent(TargetMask, Unpckl)) { | |||
9837 | UnpackOpcode = X86ISD::UNPCKL; | |||
9838 | std::swap(V1, V2); | |||
9839 | return true; | |||
9840 | } | |||
9841 | ||||
9842 | ShuffleVectorSDNode::commuteMask(Unpckh); | |||
9843 | if (isTargetShuffleEquivalent(TargetMask, Unpckh)) { | |||
9844 | UnpackOpcode = X86ISD::UNPCKH; | |||
9845 | std::swap(V1, V2); | |||
9846 | return true; | |||
9847 | } | |||
9848 | } | |||
9849 | ||||
9850 | return false; | |||
9851 | } | |||
9852 | ||||
9853 | // X86 has dedicated unpack instructions that can handle specific blend | |||
9854 | // operations: UNPCKH and UNPCKL. | |||
9855 | static SDValue lowerVectorShuffleWithUNPCK(const SDLoc &DL, MVT VT, | |||
9856 | ArrayRef<int> Mask, SDValue V1, | |||
9857 | SDValue V2, SelectionDAG &DAG) { | |||
9858 | SmallVector<int, 8> Unpckl; | |||
9859 | createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false); | |||
9860 | if (isShuffleEquivalent(V1, V2, Mask, Unpckl)) | |||
9861 | return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2); | |||
9862 | ||||
9863 | SmallVector<int, 8> Unpckh; | |||
9864 | createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false); | |||
9865 | if (isShuffleEquivalent(V1, V2, Mask, Unpckh)) | |||
9866 | return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2); | |||
9867 | ||||
9868 | // Commute and try again. | |||
9869 | ShuffleVectorSDNode::commuteMask(Unpckl); | |||
9870 | if (isShuffleEquivalent(V1, V2, Mask, Unpckl)) | |||
9871 | return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1); | |||
9872 | ||||
9873 | ShuffleVectorSDNode::commuteMask(Unpckh); | |||
9874 | if (isShuffleEquivalent(V1, V2, Mask, Unpckh)) | |||
9875 | return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1); | |||
9876 | ||||
9877 | return SDValue(); | |||
9878 | } | |||
9879 | ||||
9880 | static bool matchVectorShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps, | |||
9881 | int Delta) { | |||
9882 | int Size = (int)Mask.size(); | |||
9883 | int Split = Size / Delta; | |||
9884 | int TruncatedVectorStart = SwappedOps ? Size : 0; | |||
9885 | ||||
9886 | // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,... | |||
9887 | if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta)) | |||
9888 | return false; | |||
9889 | ||||
9890 | // The rest of the mask should not refer to the truncated vector's elements. | |||
9891 | if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart, | |||
9892 | TruncatedVectorStart + Size)) | |||
9893 | return false; | |||
9894 | ||||
9895 | return true; | |||
9896 | } | |||
9897 | ||||
9898 | // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction. | |||
9899 | // | |||
9900 | // An example is the following: | |||
9901 | // | |||
9902 | // t0: ch = EntryToken | |||
9903 | // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0 | |||
9904 | // t25: v4i32 = truncate t2 | |||
9905 | // t41: v8i16 = bitcast t25 | |||
9906 | // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16, | |||
9907 | // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0> | |||
9908 | // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21 | |||
9909 | // t18: v2i64 = bitcast t51 | |||
9910 | // | |||
9911 | // Without avx512vl, this is lowered to: | |||
9912 | // | |||
9913 | // vpmovqd %zmm0, %ymm0 | |||
9914 | // vpshufb {{.*#+}} xmm0 = | |||
9915 | // xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero | |||
9916 | // | |||
9917 | // But when avx512vl is available, one can just use a single vpmovdw | |||
9918 | // instruction. | |||
9919 | static SDValue lowerVectorShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask, | |||
9920 | MVT VT, SDValue V1, SDValue V2, | |||
9921 | SelectionDAG &DAG, | |||
9922 | const X86Subtarget &Subtarget) { | |||
9923 | if (VT != MVT::v16i8 && VT != MVT::v8i16) | |||
9924 | return SDValue(); | |||
9925 | ||||
9926 | if (Mask.size() != VT.getVectorNumElements()) | |||
9927 | return SDValue(); | |||
9928 | ||||
9929 | bool SwappedOps = false; | |||
9930 | ||||
9931 | if (!ISD::isBuildVectorAllZeros(V2.getNode())) { | |||
9932 | if (!ISD::isBuildVectorAllZeros(V1.getNode())) | |||
9933 | return SDValue(); | |||
9934 | ||||
9935 | std::swap(V1, V2); | |||
9936 | SwappedOps = true; | |||
9937 | } | |||
9938 | ||||
9939 | // Look for: | |||
9940 | // | |||
9941 | // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8> | |||
9942 | // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16> | |||
9943 | // | |||
9944 | // and similar ones. | |||
9945 | if (V1.getOpcode() != ISD::BITCAST) | |||
9946 | return SDValue(); | |||
9947 | if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE) | |||
9948 | return SDValue(); | |||
9949 | ||||
9950 | SDValue Src = V1.getOperand(0).getOperand(0); | |||
9951 | MVT SrcVT = Src.getSimpleValueType(); | |||
9952 | ||||
9953 | // The vptrunc** instructions truncating 128 bit and 256 bit vectors | |||
9954 | // are only available with avx512vl. | |||
9955 | if (!SrcVT.is512BitVector() && !Subtarget.hasVLX()) | |||
9956 | return SDValue(); | |||
9957 | ||||
9958 | // Down Convert Word to Byte is only available with avx512bw. The case with | |||
9959 | // 256-bit output doesn't contain a shuffle and is therefore not handled here. | |||
9960 | if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 && | |||
9961 | !Subtarget.hasBWI()) | |||
9962 | return SDValue(); | |||
9963 | ||||
9964 | // The first half/quarter of the mask should refer to every second/fourth | |||
9965 | // element of the vector truncated and bitcasted. | |||
9966 | if (!matchVectorShuffleAsVPMOV(Mask, SwappedOps, 2) && | |||
9967 | !matchVectorShuffleAsVPMOV(Mask, SwappedOps, 4)) | |||
9968 | return SDValue(); | |||
9969 | ||||
9970 | return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src); | |||
9971 | } | |||
9972 | ||||
9973 | // X86 has dedicated pack instructions that can handle specific truncation | |||
9974 | // operations: PACKSS and PACKUS. | |||
9975 | static bool matchVectorShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, | |||
9976 | SDValue &V2, unsigned &PackOpcode, | |||
9977 | ArrayRef<int> TargetMask, | |||
9978 | SelectionDAG &DAG, | |||
9979 | const X86Subtarget &Subtarget) { | |||
9980 | unsigned NumElts = VT.getVectorNumElements(); | |||
9981 | unsigned BitSize = VT.getScalarSizeInBits(); | |||
9982 | MVT PackSVT = MVT::getIntegerVT(BitSize * 2); | |||
9983 | MVT PackVT = MVT::getVectorVT(PackSVT, NumElts / 2); | |||
9984 | ||||
9985 | auto MatchPACK = [&](SDValue N1, SDValue N2) { | |||
9986 | SDValue VV1 = DAG.getBitcast(PackVT, N1); | |||
9987 | SDValue VV2 = DAG.getBitcast(PackVT, N2); | |||
9988 | if (Subtarget.hasSSE41() || PackSVT == MVT::i16) { | |||
9989 | APInt ZeroMask = APInt::getHighBitsSet(BitSize * 2, BitSize); | |||
9990 | if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) && | |||
9991 | (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) { | |||
9992 | V1 = VV1; | |||
9993 | V2 = VV2; | |||
9994 | SrcVT = PackVT; | |||
9995 | PackOpcode = X86ISD::PACKUS; | |||
9996 | return true; | |||
9997 | } | |||
9998 | } | |||
9999 | if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > BitSize) && | |||
10000 | (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > BitSize)) { | |||
10001 | V1 = VV1; | |||
10002 | V2 = VV2; | |||
10003 | SrcVT = PackVT; | |||
10004 | PackOpcode = X86ISD::PACKSS; | |||
10005 | return true; | |||
10006 | } | |||
10007 | return false; | |||
10008 | }; | |||
10009 | ||||
10010 | // Try binary shuffle. | |||
10011 | SmallVector<int, 32> BinaryMask; | |||
10012 | createPackShuffleMask(VT, BinaryMask, false); | |||
10013 | if (isTargetShuffleEquivalent(TargetMask, BinaryMask)) | |||
10014 | if (MatchPACK(V1, V2)) | |||
10015 | return true; | |||
10016 | ||||
10017 | // Try unary shuffle. | |||
10018 | SmallVector<int, 32> UnaryMask; | |||
10019 | createPackShuffleMask(VT, UnaryMask, true); | |||
10020 | if (isTargetShuffleEquivalent(TargetMask, UnaryMask)) | |||
10021 | if (MatchPACK(V1, V1)) | |||
10022 | return true; | |||
10023 | ||||
10024 | return false; | |||
10025 | } | |||
10026 | ||||
10027 | static SDValue lowerVectorShuffleWithPACK(const SDLoc &DL, MVT VT, | |||
10028 | ArrayRef<int> Mask, SDValue V1, | |||
10029 | SDValue V2, SelectionDAG &DAG, | |||
10030 | const X86Subtarget &Subtarget) { | |||
10031 | MVT PackVT; | |||
10032 | unsigned PackOpcode; | |||
10033 | if (matchVectorShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG, | |||
10034 | Subtarget)) | |||
10035 | return DAG.getNode(PackOpcode, DL, VT, DAG.getBitcast(PackVT, V1), | |||
10036 | DAG.getBitcast(PackVT, V2)); | |||
10037 | ||||
10038 | return SDValue(); | |||
10039 | } | |||
10040 | ||||
10041 | /// Try to emit a bitmask instruction for a shuffle. | |||
10042 | /// | |||
10043 | /// This handles cases where we can model a blend exactly as a bitmask due to | |||
10044 | /// one of the inputs being zeroable. | |||
10045 | static SDValue lowerVectorShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1, | |||
10046 | SDValue V2, ArrayRef<int> Mask, | |||
10047 | const APInt &Zeroable, | |||
10048 | SelectionDAG &DAG) { | |||
10049 | assert(!VT.isFloatingPoint() && "Floating point types are not supported")((!VT.isFloatingPoint() && "Floating point types are not supported" ) ? static_cast<void> (0) : __assert_fail ("!VT.isFloatingPoint() && \"Floating point types are not supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10049, __PRETTY_FUNCTION__)); | |||
10050 | MVT EltVT = VT.getVectorElementType(); | |||
10051 | SDValue Zero = DAG.getConstant(0, DL, EltVT); | |||
10052 | SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT); | |||
10053 | SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero); | |||
10054 | SDValue V; | |||
10055 | for (int i = 0, Size = Mask.size(); i < Size; ++i) { | |||
10056 | if (Zeroable[i]) | |||
10057 | continue; | |||
10058 | if (Mask[i] % Size != i) | |||
10059 | return SDValue(); // Not a blend. | |||
10060 | if (!V) | |||
10061 | V = Mask[i] < Size ? V1 : V2; | |||
10062 | else if (V != (Mask[i] < Size ? V1 : V2)) | |||
10063 | return SDValue(); // Can only let one input through the mask. | |||
10064 | ||||
10065 | VMaskOps[i] = AllOnes; | |||
10066 | } | |||
10067 | if (!V) | |||
10068 | return SDValue(); // No non-zeroable elements! | |||
10069 | ||||
10070 | SDValue VMask = DAG.getBuildVector(VT, DL, VMaskOps); | |||
10071 | return DAG.getNode(ISD::AND, DL, VT, V, VMask); | |||
10072 | } | |||
10073 | ||||
10074 | /// Try to emit a blend instruction for a shuffle using bit math. | |||
10075 | /// | |||
10076 | /// This is used as a fallback approach when first class blend instructions are | |||
10077 | /// unavailable. Currently it is only suitable for integer vectors, but could | |||
10078 | /// be generalized for floating point vectors if desirable. | |||
10079 | static SDValue lowerVectorShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1, | |||
10080 | SDValue V2, ArrayRef<int> Mask, | |||
10081 | SelectionDAG &DAG) { | |||
10082 | assert(VT.isInteger() && "Only supports integer vector types!")((VT.isInteger() && "Only supports integer vector types!" ) ? static_cast<void> (0) : __assert_fail ("VT.isInteger() && \"Only supports integer vector types!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10082, __PRETTY_FUNCTION__)); | |||
10083 | MVT EltVT = VT.getVectorElementType(); | |||
10084 | SDValue Zero = DAG.getConstant(0, DL, EltVT); | |||
10085 | SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT); | |||
10086 | SmallVector<SDValue, 16> MaskOps; | |||
10087 | for (int i = 0, Size = Mask.size(); i < Size; ++i) { | |||
10088 | if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size) | |||
10089 | return SDValue(); // Shuffled input! | |||
10090 | MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero); | |||
10091 | } | |||
10092 | ||||
10093 | SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps); | |||
10094 | V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask); | |||
10095 | V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2); | |||
10096 | return DAG.getNode(ISD::OR, DL, VT, V1, V2); | |||
10097 | } | |||
10098 | ||||
10099 | static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask, | |||
10100 | SDValue PreservedSrc, | |||
10101 | const X86Subtarget &Subtarget, | |||
10102 | SelectionDAG &DAG); | |||
10103 | ||||
10104 | static bool matchVectorShuffleAsBlend(SDValue V1, SDValue V2, | |||
10105 | MutableArrayRef<int> TargetMask, | |||
10106 | bool &ForceV1Zero, bool &ForceV2Zero, | |||
10107 | uint64_t &BlendMask) { | |||
10108 | bool V1IsZeroOrUndef = | |||
10109 | V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode()); | |||
10110 | bool V2IsZeroOrUndef = | |||
10111 | V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode()); | |||
10112 | ||||
10113 | BlendMask = 0; | |||
10114 | ForceV1Zero = false, ForceV2Zero = false; | |||
10115 | assert(TargetMask.size() <= 64 && "Shuffle mask too big for blend mask")((TargetMask.size() <= 64 && "Shuffle mask too big for blend mask" ) ? static_cast<void> (0) : __assert_fail ("TargetMask.size() <= 64 && \"Shuffle mask too big for blend mask\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10115, __PRETTY_FUNCTION__)); | |||
10116 | ||||
10117 | // Attempt to generate the binary blend mask. If an input is zero then | |||
10118 | // we can use any lane. | |||
10119 | // TODO: generalize the zero matching to any scalar like isShuffleEquivalent. | |||
10120 | for (int i = 0, Size = TargetMask.size(); i < Size; ++i) { | |||
10121 | int M = TargetMask[i]; | |||
10122 | if (M == SM_SentinelUndef) | |||
10123 | continue; | |||
10124 | if (M == i) | |||
10125 | continue; | |||
10126 | if (M == i + Size) { | |||
10127 | BlendMask |= 1ull << i; | |||
10128 | continue; | |||
10129 | } | |||
10130 | if (M == SM_SentinelZero) { | |||
10131 | if (V1IsZeroOrUndef) { | |||
10132 | ForceV1Zero = true; | |||
10133 | TargetMask[i] = i; | |||
10134 | continue; | |||
10135 | } | |||
10136 | if (V2IsZeroOrUndef) { | |||
10137 | ForceV2Zero = true; | |||
10138 | BlendMask |= 1ull << i; | |||
10139 | TargetMask[i] = i + Size; | |||
10140 | continue; | |||
10141 | } | |||
10142 | } | |||
10143 | return false; | |||
10144 | } | |||
10145 | return true; | |||
10146 | } | |||
10147 | ||||
10148 | static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size, | |||
10149 | int Scale) { | |||
10150 | uint64_t ScaledMask = 0; | |||
10151 | for (int i = 0; i != Size; ++i) | |||
10152 | if (BlendMask & (1ull << i)) | |||
10153 | ScaledMask |= ((1ull << Scale) - 1) << (i * Scale); | |||
10154 | return ScaledMask; | |||
10155 | } | |||
10156 | ||||
10157 | /// Try to emit a blend instruction for a shuffle. | |||
10158 | /// | |||
10159 | /// This doesn't do any checks for the availability of instructions for blending | |||
10160 | /// these values. It relies on the availability of the X86ISD::BLENDI pattern to | |||
10161 | /// be matched in the backend with the type given. What it does check for is | |||
10162 | /// that the shuffle mask is a blend, or convertible into a blend with zero. | |||
10163 | static SDValue lowerVectorShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1, | |||
10164 | SDValue V2, ArrayRef<int> Original, | |||
10165 | const APInt &Zeroable, | |||
10166 | const X86Subtarget &Subtarget, | |||
10167 | SelectionDAG &DAG) { | |||
10168 | SmallVector<int, 64> Mask = createTargetShuffleMask(Original, Zeroable); | |||
10169 | ||||
10170 | uint64_t BlendMask = 0; | |||
10171 | bool ForceV1Zero = false, ForceV2Zero = false; | |||
10172 | if (!matchVectorShuffleAsBlend(V1, V2, Mask, ForceV1Zero, ForceV2Zero, | |||
10173 | BlendMask)) | |||
10174 | return SDValue(); | |||
10175 | ||||
10176 | // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs. | |||
10177 | if (ForceV1Zero) | |||
10178 | V1 = getZeroVector(VT, Subtarget, DAG, DL); | |||
10179 | if (ForceV2Zero) | |||
10180 | V2 = getZeroVector(VT, Subtarget, DAG, DL); | |||
10181 | ||||
10182 | switch (VT.SimpleTy) { | |||
10183 | case MVT::v2f64: | |||
10184 | case MVT::v4f32: | |||
10185 | case MVT::v4f64: | |||
10186 | case MVT::v8f32: | |||
10187 | return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2, | |||
10188 | DAG.getConstant(BlendMask, DL, MVT::i8)); | |||
10189 | case MVT::v4i64: | |||
10190 | case MVT::v8i32: | |||
10191 | assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!")((Subtarget.hasAVX2() && "256-bit integer blends require AVX2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"256-bit integer blends require AVX2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10191, __PRETTY_FUNCTION__)); | |||
10192 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
10193 | case MVT::v2i64: | |||
10194 | case MVT::v4i32: | |||
10195 | // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into | |||
10196 | // that instruction. | |||
10197 | if (Subtarget.hasAVX2()) { | |||
10198 | // Scale the blend by the number of 32-bit dwords per element. | |||
10199 | int Scale = VT.getScalarSizeInBits() / 32; | |||
10200 | BlendMask = scaleVectorShuffleBlendMask(BlendMask, Mask.size(), Scale); | |||
10201 | MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32; | |||
10202 | V1 = DAG.getBitcast(BlendVT, V1); | |||
10203 | V2 = DAG.getBitcast(BlendVT, V2); | |||
10204 | return DAG.getBitcast( | |||
10205 | VT, DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2, | |||
10206 | DAG.getConstant(BlendMask, DL, MVT::i8))); | |||
10207 | } | |||
10208 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
10209 | case MVT::v8i16: { | |||
10210 | // For integer shuffles we need to expand the mask and cast the inputs to | |||
10211 | // v8i16s prior to blending. | |||
10212 | int Scale = 8 / VT.getVectorNumElements(); | |||
10213 | BlendMask = scaleVectorShuffleBlendMask(BlendMask, Mask.size(), Scale); | |||
10214 | V1 = DAG.getBitcast(MVT::v8i16, V1); | |||
10215 | V2 = DAG.getBitcast(MVT::v8i16, V2); | |||
10216 | return DAG.getBitcast(VT, | |||
10217 | DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2, | |||
10218 | DAG.getConstant(BlendMask, DL, MVT::i8))); | |||
10219 | } | |||
10220 | case MVT::v16i16: { | |||
10221 | assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!")((Subtarget.hasAVX2() && "256-bit integer blends require AVX2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"256-bit integer blends require AVX2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10221, __PRETTY_FUNCTION__)); | |||
10222 | SmallVector<int, 8> RepeatedMask; | |||
10223 | if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) { | |||
10224 | // We can lower these with PBLENDW which is mirrored across 128-bit lanes. | |||
10225 | assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!")((RepeatedMask.size() == 8 && "Repeated mask size doesn't match!" ) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 8 && \"Repeated mask size doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10225, __PRETTY_FUNCTION__)); | |||
10226 | BlendMask = 0; | |||
10227 | for (int i = 0; i < 8; ++i) | |||
10228 | if (RepeatedMask[i] >= 8) | |||
10229 | BlendMask |= 1ull << i; | |||
10230 | return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2, | |||
10231 | DAG.getConstant(BlendMask, DL, MVT::i8)); | |||
10232 | } | |||
10233 | // Use PBLENDW for lower/upper lanes and then blend lanes. | |||
10234 | // TODO - we should allow 2 PBLENDW here and leave shuffle combine to | |||
10235 | // merge to VSELECT where useful. | |||
10236 | uint64_t LoMask = BlendMask & 0xFF; | |||
10237 | uint64_t HiMask = (BlendMask >> 8) & 0xFF; | |||
10238 | if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) { | |||
10239 | SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2, | |||
10240 | DAG.getConstant(LoMask, DL, MVT::i8)); | |||
10241 | SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2, | |||
10242 | DAG.getConstant(HiMask, DL, MVT::i8)); | |||
10243 | return DAG.getVectorShuffle( | |||
10244 | MVT::v16i16, DL, Lo, Hi, | |||
10245 | {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31}); | |||
10246 | } | |||
10247 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
10248 | } | |||
10249 | case MVT::v16i8: | |||
10250 | case MVT::v32i8: { | |||
10251 | assert((VT.is128BitVector() || Subtarget.hasAVX2()) &&(((VT.is128BitVector() || Subtarget.hasAVX2()) && "256-bit byte-blends require AVX2 support!" ) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || Subtarget.hasAVX2()) && \"256-bit byte-blends require AVX2 support!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10252, __PRETTY_FUNCTION__)) | |||
10252 | "256-bit byte-blends require AVX2 support!")(((VT.is128BitVector() || Subtarget.hasAVX2()) && "256-bit byte-blends require AVX2 support!" ) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || Subtarget.hasAVX2()) && \"256-bit byte-blends require AVX2 support!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10252, __PRETTY_FUNCTION__)); | |||
10253 | ||||
10254 | // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB. | |||
10255 | if (SDValue Masked = | |||
10256 | lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, DAG)) | |||
10257 | return Masked; | |||
10258 | ||||
10259 | if (Subtarget.hasBWI() && Subtarget.hasVLX()) { | |||
10260 | MVT IntegerType = | |||
10261 | MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8)); | |||
10262 | SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType); | |||
10263 | return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG); | |||
10264 | } | |||
10265 | ||||
10266 | // Scale the blend by the number of bytes per element. | |||
10267 | int Scale = VT.getScalarSizeInBits() / 8; | |||
10268 | ||||
10269 | // This form of blend is always done on bytes. Compute the byte vector | |||
10270 | // type. | |||
10271 | MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8); | |||
10272 | ||||
10273 | // x86 allows load folding with blendvb from the 2nd source operand. But | |||
10274 | // we are still using LLVM select here (see comment below), so that's V1. | |||
10275 | // If V2 can be load-folded and V1 cannot be load-folded, then commute to | |||
10276 | // allow that load-folding possibility. | |||
10277 | if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) { | |||
10278 | ShuffleVectorSDNode::commuteMask(Mask); | |||
10279 | std::swap(V1, V2); | |||
10280 | } | |||
10281 | ||||
10282 | // Compute the VSELECT mask. Note that VSELECT is really confusing in the | |||
10283 | // mix of LLVM's code generator and the x86 backend. We tell the code | |||
10284 | // generator that boolean values in the elements of an x86 vector register | |||
10285 | // are -1 for true and 0 for false. We then use the LLVM semantics of 'true' | |||
10286 | // mapping a select to operand #1, and 'false' mapping to operand #2. The | |||
10287 | // reality in x86 is that vector masks (pre-AVX-512) use only the high bit | |||
10288 | // of the element (the remaining are ignored) and 0 in that high bit would | |||
10289 | // mean operand #1 while 1 in the high bit would mean operand #2. So while | |||
10290 | // the LLVM model for boolean values in vector elements gets the relevant | |||
10291 | // bit set, it is set backwards and over constrained relative to x86's | |||
10292 | // actual model. | |||
10293 | SmallVector<SDValue, 32> VSELECTMask; | |||
10294 | for (int i = 0, Size = Mask.size(); i < Size; ++i) | |||
10295 | for (int j = 0; j < Scale; ++j) | |||
10296 | VSELECTMask.push_back( | |||
10297 | Mask[i] < 0 ? DAG.getUNDEF(MVT::i8) | |||
10298 | : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL, | |||
10299 | MVT::i8)); | |||
10300 | ||||
10301 | V1 = DAG.getBitcast(BlendVT, V1); | |||
10302 | V2 = DAG.getBitcast(BlendVT, V2); | |||
10303 | return DAG.getBitcast( | |||
10304 | VT, | |||
10305 | DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask), | |||
10306 | V1, V2)); | |||
10307 | } | |||
10308 | case MVT::v16f32: | |||
10309 | case MVT::v8f64: | |||
10310 | case MVT::v8i64: | |||
10311 | case MVT::v16i32: | |||
10312 | case MVT::v32i16: | |||
10313 | case MVT::v64i8: { | |||
10314 | MVT IntegerType = | |||
10315 | MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8)); | |||
10316 | SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType); | |||
10317 | return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG); | |||
10318 | } | |||
10319 | default: | |||
10320 | llvm_unreachable("Not a supported integer vector type!")::llvm::llvm_unreachable_internal("Not a supported integer vector type!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10320); | |||
10321 | } | |||
10322 | } | |||
10323 | ||||
10324 | /// Try to lower as a blend of elements from two inputs followed by | |||
10325 | /// a single-input permutation. | |||
10326 | /// | |||
10327 | /// This matches the pattern where we can blend elements from two inputs and | |||
10328 | /// then reduce the shuffle to a single-input permutation. | |||
10329 | static SDValue lowerVectorShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT, | |||
10330 | SDValue V1, SDValue V2, | |||
10331 | ArrayRef<int> Mask, | |||
10332 | SelectionDAG &DAG, | |||
10333 | bool ImmBlends = false) { | |||
10334 | // We build up the blend mask while checking whether a blend is a viable way | |||
10335 | // to reduce the shuffle. | |||
10336 | SmallVector<int, 32> BlendMask(Mask.size(), -1); | |||
10337 | SmallVector<int, 32> PermuteMask(Mask.size(), -1); | |||
10338 | ||||
10339 | for (int i = 0, Size = Mask.size(); i < Size; ++i) { | |||
10340 | if (Mask[i] < 0) | |||
10341 | continue; | |||
10342 | ||||
10343 | assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.")((Mask[i] < Size * 2 && "Shuffle input is out of bounds." ) ? static_cast<void> (0) : __assert_fail ("Mask[i] < Size * 2 && \"Shuffle input is out of bounds.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10343, __PRETTY_FUNCTION__)); | |||
10344 | ||||
10345 | if (BlendMask[Mask[i] % Size] < 0) | |||
10346 | BlendMask[Mask[i] % Size] = Mask[i]; | |||
10347 | else if (BlendMask[Mask[i] % Size] != Mask[i]) | |||
10348 | return SDValue(); // Can't blend in the needed input! | |||
10349 | ||||
10350 | PermuteMask[i] = Mask[i] % Size; | |||
10351 | } | |||
10352 | ||||
10353 | // If only immediate blends, then bail if the blend mask can't be widened to | |||
10354 | // i16. | |||
10355 | unsigned EltSize = VT.getScalarSizeInBits(); | |||
10356 | if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask)) | |||
10357 | return SDValue(); | |||
10358 | ||||
10359 | SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask); | |||
10360 | return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask); | |||
10361 | } | |||
10362 | ||||
10363 | /// Try to lower as an unpack of elements from two inputs followed by | |||
10364 | /// a single-input permutation. | |||
10365 | /// | |||
10366 | /// This matches the pattern where we can unpack elements from two inputs and | |||
10367 | /// then reduce the shuffle to a single-input (wider) permutation. | |||
10368 | static SDValue lowerVectorShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT, | |||
10369 | SDValue V1, SDValue V2, | |||
10370 | ArrayRef<int> Mask, | |||
10371 | SelectionDAG &DAG) { | |||
10372 | int NumElts = Mask.size(); | |||
10373 | int NumLanes = VT.getSizeInBits() / 128; | |||
10374 | int NumLaneElts = NumElts / NumLanes; | |||
10375 | int NumHalfLaneElts = NumLaneElts / 2; | |||
10376 | ||||
10377 | bool MatchLo = true, MatchHi = true; | |||
10378 | SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)}; | |||
10379 | ||||
10380 | // Determine UNPCKL/UNPCKH type and operand order. | |||
10381 | for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) { | |||
10382 | for (int Elt = 0; Elt != NumLaneElts; ++Elt) { | |||
10383 | int M = Mask[Lane + Elt]; | |||
10384 | if (M < 0) | |||
10385 | continue; | |||
10386 | ||||
10387 | SDValue &Op = Ops[Elt & 1]; | |||
10388 | if (M < NumElts && (Op.isUndef() || Op == V1)) | |||
10389 | Op = V1; | |||
10390 | else if (NumElts <= M && (Op.isUndef() || Op == V2)) | |||
10391 | Op = V2; | |||
10392 | else | |||
10393 | return SDValue(); | |||
10394 | ||||
10395 | int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts; | |||
10396 | MatchLo &= isUndefOrInRange(M, Lo, Mid) || | |||
10397 | isUndefOrInRange(M, NumElts + Lo, NumElts + Mid); | |||
10398 | MatchHi &= isUndefOrInRange(M, Mid, Hi) || | |||
10399 | isUndefOrInRange(M, NumElts + Mid, NumElts + Hi); | |||
10400 | if (!MatchLo && !MatchHi) | |||
10401 | return SDValue(); | |||
10402 | } | |||
10403 | } | |||
10404 | assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI")(((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI" ) ? static_cast<void> (0) : __assert_fail ("(MatchLo ^ MatchHi) && \"Failed to match UNPCKLO/UNPCKHI\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10404, __PRETTY_FUNCTION__)); | |||
10405 | ||||
10406 | // Now check that each pair of elts come from the same unpack pair | |||
10407 | // and set the permute mask based on each pair. | |||
10408 | // TODO - Investigate cases where we permute individual elements. | |||
10409 | SmallVector<int, 32> PermuteMask(NumElts, -1); | |||
10410 | for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) { | |||
10411 | for (int Elt = 0; Elt != NumLaneElts; Elt += 2) { | |||
10412 | int M0 = Mask[Lane + Elt + 0]; | |||
10413 | int M1 = Mask[Lane + Elt + 1]; | |||
10414 | if (0 <= M0 && 0 <= M1 && | |||
10415 | (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts)) | |||
10416 | return SDValue(); | |||
10417 | if (0 <= M0) | |||
10418 | PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts)); | |||
10419 | if (0 <= M1) | |||
10420 | PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1; | |||
10421 | } | |||
10422 | } | |||
10423 | ||||
10424 | unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH; | |||
10425 | SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops); | |||
10426 | return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask); | |||
10427 | } | |||
10428 | ||||
10429 | /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then | |||
10430 | /// permuting the elements of the result in place. | |||
10431 | static SDValue lowerVectorShuffleAsByteRotateAndPermute( | |||
10432 | const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, | |||
10433 | const X86Subtarget &Subtarget, SelectionDAG &DAG) { | |||
10434 | if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) || | |||
10435 | (VT.is256BitVector() && !Subtarget.hasAVX2()) || | |||
10436 | (VT.is512BitVector() && !Subtarget.hasBWI())) | |||
10437 | return SDValue(); | |||
10438 | ||||
10439 | // We don't currently support lane crossing permutes. | |||
10440 | if (is128BitLaneCrossingShuffleMask(VT, Mask)) | |||
10441 | return SDValue(); | |||
10442 | ||||
10443 | int Scale = VT.getScalarSizeInBits() / 8; | |||
10444 | int NumLanes = VT.getSizeInBits() / 128; | |||
10445 | int NumElts = VT.getVectorNumElements(); | |||
10446 | int NumEltsPerLane = NumElts / NumLanes; | |||
10447 | ||||
10448 | // Determine range of mask elts. | |||
10449 | bool Blend1 = true; | |||
10450 | bool Blend2 = true; | |||
10451 | std::pair<int, int> Range1 = std::make_pair(INT_MAX2147483647, INT_MIN(-2147483647 -1)); | |||
10452 | std::pair<int, int> Range2 = std::make_pair(INT_MAX2147483647, INT_MIN(-2147483647 -1)); | |||
10453 | for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) { | |||
10454 | for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) { | |||
10455 | int M = Mask[Lane + Elt]; | |||
10456 | if (M < 0) | |||
10457 | continue; | |||
10458 | if (M < NumElts) { | |||
10459 | Blend1 &= (M == (Lane + Elt)); | |||
10460 | assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask")((Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask") ? static_cast<void> (0) : __assert_fail ("Lane <= M && M < (Lane + NumEltsPerLane) && \"Out of range mask\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10460, __PRETTY_FUNCTION__)); | |||
10461 | M = M % NumEltsPerLane; | |||
10462 | Range1.first = std::min(Range1.first, M); | |||
10463 | Range1.second = std::max(Range1.second, M); | |||
10464 | } else { | |||
10465 | M -= NumElts; | |||
10466 | Blend2 &= (M == (Lane + Elt)); | |||
10467 | assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask")((Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask") ? static_cast<void> (0) : __assert_fail ("Lane <= M && M < (Lane + NumEltsPerLane) && \"Out of range mask\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10467, __PRETTY_FUNCTION__)); | |||
10468 | M = M % NumEltsPerLane; | |||
10469 | Range2.first = std::min(Range2.first, M); | |||
10470 | Range2.second = std::max(Range2.second, M); | |||
10471 | } | |||
10472 | } | |||
10473 | } | |||
10474 | ||||
10475 | // Bail if we don't need both elements. | |||
10476 | // TODO - it might be worth doing this for unary shuffles if the permute | |||
10477 | // can be widened. | |||
10478 | if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) || | |||
10479 | !(0 <= Range2.first && Range2.second < NumEltsPerLane)) | |||
10480 | return SDValue(); | |||
10481 | ||||
10482 | if (VT.getSizeInBits() > 128 && (Blend1 || Blend2)) | |||
10483 | return SDValue(); | |||
10484 | ||||
10485 | // Rotate the 2 ops so we can access both ranges, then permute the result. | |||
10486 | auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) { | |||
10487 | MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8); | |||
10488 | SDValue Rotate = DAG.getBitcast( | |||
10489 | VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi), | |||
10490 | DAG.getBitcast(ByteVT, Lo), | |||
10491 | DAG.getConstant(Scale * RotAmt, DL, MVT::i8))); | |||
10492 | SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef); | |||
10493 | for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) { | |||
10494 | for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) { | |||
10495 | int M = Mask[Lane + Elt]; | |||
10496 | if (M < 0) | |||
10497 | continue; | |||
10498 | if (M < NumElts) | |||
10499 | PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane); | |||
10500 | else | |||
10501 | PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane); | |||
10502 | } | |||
10503 | } | |||
10504 | return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask); | |||
10505 | }; | |||
10506 | ||||
10507 | // Check if the ranges are small enough to rotate from either direction. | |||
10508 | if (Range2.second < Range1.first) | |||
10509 | return RotateAndPermute(V1, V2, Range1.first, 0); | |||
10510 | if (Range1.second < Range2.first) | |||
10511 | return RotateAndPermute(V2, V1, Range2.first, NumElts); | |||
10512 | return SDValue(); | |||
10513 | } | |||
10514 | ||||
10515 | /// Generic routine to decompose a shuffle and blend into independent | |||
10516 | /// blends and permutes. | |||
10517 | /// | |||
10518 | /// This matches the extremely common pattern for handling combined | |||
10519 | /// shuffle+blend operations on newer X86 ISAs where we have very fast blend | |||
10520 | /// operations. It will try to pick the best arrangement of shuffles and | |||
10521 | /// blends. | |||
10522 | static SDValue lowerVectorShuffleAsDecomposedShuffleBlend( | |||
10523 | const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, | |||
10524 | const X86Subtarget &Subtarget, SelectionDAG &DAG) { | |||
10525 | // Shuffle the input elements into the desired positions in V1 and V2 and | |||
10526 | // blend them together. | |||
10527 | SmallVector<int, 32> V1Mask(Mask.size(), -1); | |||
10528 | SmallVector<int, 32> V2Mask(Mask.size(), -1); | |||
10529 | SmallVector<int, 32> BlendMask(Mask.size(), -1); | |||
10530 | for (int i = 0, Size = Mask.size(); i < Size; ++i) | |||
10531 | if (Mask[i] >= 0 && Mask[i] < Size) { | |||
10532 | V1Mask[i] = Mask[i]; | |||
10533 | BlendMask[i] = i; | |||
10534 | } else if (Mask[i] >= Size) { | |||
10535 | V2Mask[i] = Mask[i] - Size; | |||
10536 | BlendMask[i] = i + Size; | |||
10537 | } | |||
10538 | ||||
10539 | // Try to lower with the simpler initial blend/unpack/rotate strategies unless | |||
10540 | // one of the input shuffles would be a no-op. We prefer to shuffle inputs as | |||
10541 | // the shuffle may be able to fold with a load or other benefit. However, when | |||
10542 | // we'll have to do 2x as many shuffles in order to achieve this, a 2-input | |||
10543 | // pre-shuffle first is a better strategy. | |||
10544 | if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) { | |||
10545 | // Only prefer immediate blends to unpack/rotate. | |||
10546 | if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute( | |||
10547 | DL, VT, V1, V2, Mask, DAG, true)) | |||
10548 | return BlendPerm; | |||
10549 | if (SDValue UnpackPerm = | |||
10550 | lowerVectorShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask, DAG)) | |||
10551 | return UnpackPerm; | |||
10552 | if (SDValue RotatePerm = lowerVectorShuffleAsByteRotateAndPermute( | |||
10553 | DL, VT, V1, V2, Mask, Subtarget, DAG)) | |||
10554 | return RotatePerm; | |||
10555 | // Unpack/rotate failed - try again with variable blends. | |||
10556 | if (SDValue BlendPerm = | |||
10557 | lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG)) | |||
10558 | return BlendPerm; | |||
10559 | } | |||
10560 | ||||
10561 | V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask); | |||
10562 | V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask); | |||
10563 | return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask); | |||
10564 | } | |||
10565 | ||||
10566 | /// Try to lower a vector shuffle as a rotation. | |||
10567 | /// | |||
10568 | /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512. | |||
10569 | static int matchVectorShuffleAsRotate(SDValue &V1, SDValue &V2, | |||
10570 | ArrayRef<int> Mask) { | |||
10571 | int NumElts = Mask.size(); | |||
10572 | ||||
10573 | // We need to detect various ways of spelling a rotation: | |||
10574 | // [11, 12, 13, 14, 15, 0, 1, 2] | |||
10575 | // [-1, 12, 13, 14, -1, -1, 1, -1] | |||
10576 | // [-1, -1, -1, -1, -1, -1, 1, 2] | |||
10577 | // [ 3, 4, 5, 6, 7, 8, 9, 10] | |||
10578 | // [-1, 4, 5, 6, -1, -1, 9, -1] | |||
10579 | // [-1, 4, 5, 6, -1, -1, -1, -1] | |||
10580 | int Rotation = 0; | |||
10581 | SDValue Lo, Hi; | |||
10582 | for (int i = 0; i < NumElts; ++i) { | |||
10583 | int M = Mask[i]; | |||
10584 | assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&(((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts ))) && "Unexpected mask index.") ? static_cast<void > (0) : __assert_fail ("(M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) && \"Unexpected mask index.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10585, __PRETTY_FUNCTION__)) | |||
10585 | "Unexpected mask index.")(((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts ))) && "Unexpected mask index.") ? static_cast<void > (0) : __assert_fail ("(M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) && \"Unexpected mask index.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10585, __PRETTY_FUNCTION__)); | |||
10586 | if (M < 0) | |||
10587 | continue; | |||
10588 | ||||
10589 | // Determine where a rotated vector would have started. | |||
10590 | int StartIdx = i - (M % NumElts); | |||
10591 | if (StartIdx == 0) | |||
10592 | // The identity rotation isn't interesting, stop. | |||
10593 | return -1; | |||
10594 | ||||
10595 | // If we found the tail of a vector the rotation must be the missing | |||
10596 | // front. If we found the head of a vector, it must be how much of the | |||
10597 | // head. | |||
10598 | int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx; | |||
10599 | ||||
10600 | if (Rotation == 0) | |||
10601 | Rotation = CandidateRotation; | |||
10602 | else if (Rotation != CandidateRotation) | |||
10603 | // The rotations don't match, so we can't match this mask. | |||
10604 | return -1; | |||
10605 | ||||
10606 | // Compute which value this mask is pointing at. | |||
10607 | SDValue MaskV = M < NumElts ? V1 : V2; | |||
10608 | ||||
10609 | // Compute which of the two target values this index should be assigned | |||
10610 | // to. This reflects whether the high elements are remaining or the low | |||
10611 | // elements are remaining. | |||
10612 | SDValue &TargetV = StartIdx < 0 ? Hi : Lo; | |||
10613 | ||||
10614 | // Either set up this value if we've not encountered it before, or check | |||
10615 | // that it remains consistent. | |||
10616 | if (!TargetV) | |||
10617 | TargetV = MaskV; | |||
10618 | else if (TargetV != MaskV) | |||
10619 | // This may be a rotation, but it pulls from the inputs in some | |||
10620 | // unsupported interleaving. | |||
10621 | return -1; | |||
10622 | } | |||
10623 | ||||
10624 | // Check that we successfully analyzed the mask, and normalize the results. | |||
10625 | assert(Rotation != 0 && "Failed to locate a viable rotation!")((Rotation != 0 && "Failed to locate a viable rotation!" ) ? static_cast<void> (0) : __assert_fail ("Rotation != 0 && \"Failed to locate a viable rotation!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10625, __PRETTY_FUNCTION__)); | |||
10626 | assert((Lo || Hi) && "Failed to find a rotated input vector!")(((Lo || Hi) && "Failed to find a rotated input vector!" ) ? static_cast<void> (0) : __assert_fail ("(Lo || Hi) && \"Failed to find a rotated input vector!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10626, __PRETTY_FUNCTION__)); | |||
10627 | if (!Lo) | |||
10628 | Lo = Hi; | |||
10629 | else if (!Hi) | |||
10630 | Hi = Lo; | |||
10631 | ||||
10632 | V1 = Lo; | |||
10633 | V2 = Hi; | |||
10634 | ||||
10635 | return Rotation; | |||
10636 | } | |||
10637 | ||||
10638 | /// Try to lower a vector shuffle as a byte rotation. | |||
10639 | /// | |||
10640 | /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary | |||
10641 | /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use | |||
10642 | /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will | |||
10643 | /// try to generically lower a vector shuffle through such an pattern. It | |||
10644 | /// does not check for the profitability of lowering either as PALIGNR or | |||
10645 | /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form. | |||
10646 | /// This matches shuffle vectors that look like: | |||
10647 | /// | |||
10648 | /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2] | |||
10649 | /// | |||
10650 | /// Essentially it concatenates V1 and V2, shifts right by some number of | |||
10651 | /// elements, and takes the low elements as the result. Note that while this is | |||
10652 | /// specified as a *right shift* because x86 is little-endian, it is a *left | |||
10653 | /// rotate* of the vector lanes. | |||
10654 | static int matchVectorShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2, | |||
10655 | ArrayRef<int> Mask) { | |||
10656 | // Don't accept any shuffles with zero elements. | |||
10657 | if (any_of(Mask, [](int M) { return M == SM_SentinelZero; })) | |||
10658 | return -1; | |||
10659 | ||||
10660 | // PALIGNR works on 128-bit lanes. | |||
10661 | SmallVector<int, 16> RepeatedMask; | |||
10662 | if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask)) | |||
10663 | return -1; | |||
10664 | ||||
10665 | int Rotation = matchVectorShuffleAsRotate(V1, V2, RepeatedMask); | |||
10666 | if (Rotation <= 0) | |||
10667 | return -1; | |||
10668 | ||||
10669 | // PALIGNR rotates bytes, so we need to scale the | |||
10670 | // rotation based on how many bytes are in the vector lane. | |||
10671 | int NumElts = RepeatedMask.size(); | |||
10672 | int Scale = 16 / NumElts; | |||
10673 | return Rotation * Scale; | |||
10674 | } | |||
10675 | ||||
10676 | static SDValue lowerVectorShuffleAsByteRotate(const SDLoc &DL, MVT VT, | |||
10677 | SDValue V1, SDValue V2, | |||
10678 | ArrayRef<int> Mask, | |||
10679 | const X86Subtarget &Subtarget, | |||
10680 | SelectionDAG &DAG) { | |||
10681 | assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!")((!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!" ) ? static_cast<void> (0) : __assert_fail ("!isNoopShuffleMask(Mask) && \"We shouldn't lower no-op shuffles!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10681, __PRETTY_FUNCTION__)); | |||
10682 | ||||
10683 | SDValue Lo = V1, Hi = V2; | |||
10684 | int ByteRotation = matchVectorShuffleAsByteRotate(VT, Lo, Hi, Mask); | |||
10685 | if (ByteRotation <= 0) | |||
10686 | return SDValue(); | |||
10687 | ||||
10688 | // Cast the inputs to i8 vector of correct length to match PALIGNR or | |||
10689 | // PSLLDQ/PSRLDQ. | |||
10690 | MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8); | |||
10691 | Lo = DAG.getBitcast(ByteVT, Lo); | |||
10692 | Hi = DAG.getBitcast(ByteVT, Hi); | |||
10693 | ||||
10694 | // SSSE3 targets can use the palignr instruction. | |||
10695 | if (Subtarget.hasSSSE3()) { | |||
10696 | assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&(((!VT.is512BitVector() || Subtarget.hasBWI()) && "512-bit PALIGNR requires BWI instructions" ) ? static_cast<void> (0) : __assert_fail ("(!VT.is512BitVector() || Subtarget.hasBWI()) && \"512-bit PALIGNR requires BWI instructions\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10697, __PRETTY_FUNCTION__)) | |||
10697 | "512-bit PALIGNR requires BWI instructions")(((!VT.is512BitVector() || Subtarget.hasBWI()) && "512-bit PALIGNR requires BWI instructions" ) ? static_cast<void> (0) : __assert_fail ("(!VT.is512BitVector() || Subtarget.hasBWI()) && \"512-bit PALIGNR requires BWI instructions\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10697, __PRETTY_FUNCTION__)); | |||
10698 | return DAG.getBitcast( | |||
10699 | VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi, | |||
10700 | DAG.getConstant(ByteRotation, DL, MVT::i8))); | |||
10701 | } | |||
10702 | ||||
10703 | assert(VT.is128BitVector() &&((VT.is128BitVector() && "Rotate-based lowering only supports 128-bit lowering!" ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Rotate-based lowering only supports 128-bit lowering!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10704, __PRETTY_FUNCTION__)) | |||
10704 | "Rotate-based lowering only supports 128-bit lowering!")((VT.is128BitVector() && "Rotate-based lowering only supports 128-bit lowering!" ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Rotate-based lowering only supports 128-bit lowering!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10704, __PRETTY_FUNCTION__)); | |||
10705 | assert(Mask.size() <= 16 &&((Mask.size() <= 16 && "Can shuffle at most 16 bytes in a 128-bit vector!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() <= 16 && \"Can shuffle at most 16 bytes in a 128-bit vector!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10706, __PRETTY_FUNCTION__)) | |||
10706 | "Can shuffle at most 16 bytes in a 128-bit vector!")((Mask.size() <= 16 && "Can shuffle at most 16 bytes in a 128-bit vector!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() <= 16 && \"Can shuffle at most 16 bytes in a 128-bit vector!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10706, __PRETTY_FUNCTION__)); | |||
10707 | assert(ByteVT == MVT::v16i8 &&((ByteVT == MVT::v16i8 && "SSE2 rotate lowering only needed for v16i8!" ) ? static_cast<void> (0) : __assert_fail ("ByteVT == MVT::v16i8 && \"SSE2 rotate lowering only needed for v16i8!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10708, __PRETTY_FUNCTION__)) | |||
10708 | "SSE2 rotate lowering only needed for v16i8!")((ByteVT == MVT::v16i8 && "SSE2 rotate lowering only needed for v16i8!" ) ? static_cast<void> (0) : __assert_fail ("ByteVT == MVT::v16i8 && \"SSE2 rotate lowering only needed for v16i8!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10708, __PRETTY_FUNCTION__)); | |||
10709 | ||||
10710 | // Default SSE2 implementation | |||
10711 | int LoByteShift = 16 - ByteRotation; | |||
10712 | int HiByteShift = ByteRotation; | |||
10713 | ||||
10714 | SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo, | |||
10715 | DAG.getConstant(LoByteShift, DL, MVT::i8)); | |||
10716 | SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi, | |||
10717 | DAG.getConstant(HiByteShift, DL, MVT::i8)); | |||
10718 | return DAG.getBitcast(VT, | |||
10719 | DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift)); | |||
10720 | } | |||
10721 | ||||
10722 | /// Try to lower a vector shuffle as a dword/qword rotation. | |||
10723 | /// | |||
10724 | /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary | |||
10725 | /// rotation of the concatenation of two vectors; This routine will | |||
10726 | /// try to generically lower a vector shuffle through such an pattern. | |||
10727 | /// | |||
10728 | /// Essentially it concatenates V1 and V2, shifts right by some number of | |||
10729 | /// elements, and takes the low elements as the result. Note that while this is | |||
10730 | /// specified as a *right shift* because x86 is little-endian, it is a *left | |||
10731 | /// rotate* of the vector lanes. | |||
10732 | static SDValue lowerVectorShuffleAsRotate(const SDLoc &DL, MVT VT, | |||
10733 | SDValue V1, SDValue V2, | |||
10734 | ArrayRef<int> Mask, | |||
10735 | const X86Subtarget &Subtarget, | |||
10736 | SelectionDAG &DAG) { | |||
10737 | assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&(((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT ::i64) && "Only 32-bit and 64-bit elements are supported!" ) ? static_cast<void> (0) : __assert_fail ("(VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) && \"Only 32-bit and 64-bit elements are supported!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10738, __PRETTY_FUNCTION__)) | |||
10738 | "Only 32-bit and 64-bit elements are supported!")(((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT ::i64) && "Only 32-bit and 64-bit elements are supported!" ) ? static_cast<void> (0) : __assert_fail ("(VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) && \"Only 32-bit and 64-bit elements are supported!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10738, __PRETTY_FUNCTION__)); | |||
10739 | ||||
10740 | // 128/256-bit vectors are only supported with VLX. | |||
10741 | assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))(((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT .is256BitVector())) && "VLX required for 128/256-bit vectors" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector())) && \"VLX required for 128/256-bit vectors\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10742, __PRETTY_FUNCTION__)) | |||
10742 | && "VLX required for 128/256-bit vectors")(((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT .is256BitVector())) && "VLX required for 128/256-bit vectors" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector())) && \"VLX required for 128/256-bit vectors\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10742, __PRETTY_FUNCTION__)); | |||
10743 | ||||
10744 | SDValue Lo = V1, Hi = V2; | |||
10745 | int Rotation = matchVectorShuffleAsRotate(Lo, Hi, Mask); | |||
10746 | if (Rotation <= 0) | |||
10747 | return SDValue(); | |||
10748 | ||||
10749 | return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi, | |||
10750 | DAG.getConstant(Rotation, DL, MVT::i8)); | |||
10751 | } | |||
10752 | ||||
10753 | /// Try to lower a vector shuffle as a bit shift (shifts in zeros). | |||
10754 | /// | |||
10755 | /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and | |||
10756 | /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function | |||
10757 | /// matches elements from one of the input vectors shuffled to the left or | |||
10758 | /// right with zeroable elements 'shifted in'. It handles both the strictly | |||
10759 | /// bit-wise element shifts and the byte shift across an entire 128-bit double | |||
10760 | /// quad word lane. | |||
10761 | /// | |||
10762 | /// PSHL : (little-endian) left bit shift. | |||
10763 | /// [ zz, 0, zz, 2 ] | |||
10764 | /// [ -1, 4, zz, -1 ] | |||
10765 | /// PSRL : (little-endian) right bit shift. | |||
10766 | /// [ 1, zz, 3, zz] | |||
10767 | /// [ -1, -1, 7, zz] | |||
10768 | /// PSLLDQ : (little-endian) left byte shift | |||
10769 | /// [ zz, 0, 1, 2, 3, 4, 5, 6] | |||
10770 | /// [ zz, zz, -1, -1, 2, 3, 4, -1] | |||
10771 | /// [ zz, zz, zz, zz, zz, zz, -1, 1] | |||
10772 | /// PSRLDQ : (little-endian) right byte shift | |||
10773 | /// [ 5, 6, 7, zz, zz, zz, zz, zz] | |||
10774 | /// [ -1, 5, 6, 7, zz, zz, zz, zz] | |||
10775 | /// [ 1, 2, -1, -1, -1, -1, zz, zz] | |||
10776 | static int matchVectorShuffleAsShift(MVT &ShiftVT, unsigned &Opcode, | |||
10777 | unsigned ScalarSizeInBits, | |||
10778 | ArrayRef<int> Mask, int MaskOffset, | |||
10779 | const APInt &Zeroable, | |||
10780 | const X86Subtarget &Subtarget) { | |||
10781 | int Size = Mask.size(); | |||
10782 | unsigned SizeInBits = Size * ScalarSizeInBits; | |||
10783 | ||||
10784 | auto CheckZeros = [&](int Shift, int Scale, bool Left) { | |||
10785 | for (int i = 0; i < Size; i += Scale) | |||
10786 | for (int j = 0; j < Shift; ++j) | |||
10787 | if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))]) | |||
10788 | return false; | |||
10789 | ||||
10790 | return true; | |||
10791 | }; | |||
10792 | ||||
10793 | auto MatchShift = [&](int Shift, int Scale, bool Left) { | |||
10794 | for (int i = 0; i != Size; i += Scale) { | |||
10795 | unsigned Pos = Left ? i + Shift : i; | |||
10796 | unsigned Low = Left ? i : i + Shift; | |||
10797 | unsigned Len = Scale - Shift; | |||
10798 | if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset)) | |||
10799 | return -1; | |||
10800 | } | |||
10801 | ||||
10802 | int ShiftEltBits = ScalarSizeInBits * Scale; | |||
10803 | bool ByteShift = ShiftEltBits > 64; | |||
10804 | Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI) | |||
10805 | : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI); | |||
10806 | int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1); | |||
10807 | ||||
10808 | // Normalize the scale for byte shifts to still produce an i64 element | |||
10809 | // type. | |||
10810 | Scale = ByteShift ? Scale / 2 : Scale; | |||
10811 | ||||
10812 | // We need to round trip through the appropriate type for the shift. | |||
10813 | MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale); | |||
10814 | ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8) | |||
10815 | : MVT::getVectorVT(ShiftSVT, Size / Scale); | |||
10816 | return (int)ShiftAmt; | |||
10817 | }; | |||
10818 | ||||
10819 | // SSE/AVX supports logical shifts up to 64-bit integers - so we can just | |||
10820 | // keep doubling the size of the integer elements up to that. We can | |||
10821 | // then shift the elements of the integer vector by whole multiples of | |||
10822 | // their width within the elements of the larger integer vector. Test each | |||
10823 | // multiple to see if we can find a match with the moved element indices | |||
10824 | // and that the shifted in elements are all zeroable. | |||
10825 | unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128); | |||
10826 | for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2) | |||
10827 | for (int Shift = 1; Shift != Scale; ++Shift) | |||
10828 | for (bool Left : {true, false}) | |||
10829 | if (CheckZeros(Shift, Scale, Left)) { | |||
10830 | int ShiftAmt = MatchShift(Shift, Scale, Left); | |||
10831 | if (0 < ShiftAmt) | |||
10832 | return ShiftAmt; | |||
10833 | } | |||
10834 | ||||
10835 | // no match | |||
10836 | return -1; | |||
10837 | } | |||
10838 | ||||
10839 | static SDValue lowerVectorShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1, | |||
10840 | SDValue V2, ArrayRef<int> Mask, | |||
10841 | const APInt &Zeroable, | |||
10842 | const X86Subtarget &Subtarget, | |||
10843 | SelectionDAG &DAG) { | |||
10844 | int Size = Mask.size(); | |||
10845 | assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size")((Size == (int)VT.getVectorNumElements() && "Unexpected mask size" ) ? static_cast<void> (0) : __assert_fail ("Size == (int)VT.getVectorNumElements() && \"Unexpected mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10845, __PRETTY_FUNCTION__)); | |||
10846 | ||||
10847 | MVT ShiftVT; | |||
10848 | SDValue V = V1; | |||
10849 | unsigned Opcode; | |||
10850 | ||||
10851 | // Try to match shuffle against V1 shift. | |||
10852 | int ShiftAmt = matchVectorShuffleAsShift( | |||
10853 | ShiftVT, Opcode, VT.getScalarSizeInBits(), Mask, 0, Zeroable, Subtarget); | |||
10854 | ||||
10855 | // If V1 failed, try to match shuffle against V2 shift. | |||
10856 | if (ShiftAmt < 0) { | |||
10857 | ShiftAmt = | |||
10858 | matchVectorShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(), | |||
10859 | Mask, Size, Zeroable, Subtarget); | |||
10860 | V = V2; | |||
10861 | } | |||
10862 | ||||
10863 | if (ShiftAmt < 0) | |||
10864 | return SDValue(); | |||
10865 | ||||
10866 | assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&((DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) && "Illegal integer vector type") ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) && \"Illegal integer vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10867, __PRETTY_FUNCTION__)) | |||
10867 | "Illegal integer vector type")((DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) && "Illegal integer vector type") ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) && \"Illegal integer vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10867, __PRETTY_FUNCTION__)); | |||
10868 | V = DAG.getBitcast(ShiftVT, V); | |||
10869 | V = DAG.getNode(Opcode, DL, ShiftVT, V, | |||
10870 | DAG.getConstant(ShiftAmt, DL, MVT::i8)); | |||
10871 | return DAG.getBitcast(VT, V); | |||
10872 | } | |||
10873 | ||||
10874 | // EXTRQ: Extract Len elements from lower half of source, starting at Idx. | |||
10875 | // Remainder of lower half result is zero and upper half is all undef. | |||
10876 | static bool matchVectorShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2, | |||
10877 | ArrayRef<int> Mask, uint64_t &BitLen, | |||
10878 | uint64_t &BitIdx, const APInt &Zeroable) { | |||
10879 | int Size = Mask.size(); | |||
10880 | int HalfSize = Size / 2; | |||
10881 | assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size")((Size == (int)VT.getVectorNumElements() && "Unexpected mask size" ) ? static_cast<void> (0) : __assert_fail ("Size == (int)VT.getVectorNumElements() && \"Unexpected mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10881, __PRETTY_FUNCTION__)); | |||
10882 | assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask")((!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask" ) ? static_cast<void> (0) : __assert_fail ("!Zeroable.isAllOnesValue() && \"Fully zeroable shuffle mask\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10882, __PRETTY_FUNCTION__)); | |||
10883 | ||||
10884 | // Upper half must be undefined. | |||
10885 | if (!isUndefInRange(Mask, HalfSize, HalfSize)) | |||
10886 | return false; | |||
10887 | ||||
10888 | // Determine the extraction length from the part of the | |||
10889 | // lower half that isn't zeroable. | |||
10890 | int Len = HalfSize; | |||
10891 | for (; Len > 0; --Len) | |||
10892 | if (!Zeroable[Len - 1]) | |||
10893 | break; | |||
10894 | assert(Len > 0 && "Zeroable shuffle mask")((Len > 0 && "Zeroable shuffle mask") ? static_cast <void> (0) : __assert_fail ("Len > 0 && \"Zeroable shuffle mask\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10894, __PRETTY_FUNCTION__)); | |||
10895 | ||||
10896 | // Attempt to match first Len sequential elements from the lower half. | |||
10897 | SDValue Src; | |||
10898 | int Idx = -1; | |||
10899 | for (int i = 0; i != Len; ++i) { | |||
10900 | int M = Mask[i]; | |||
10901 | if (M == SM_SentinelUndef) | |||
10902 | continue; | |||
10903 | SDValue &V = (M < Size ? V1 : V2); | |||
10904 | M = M % Size; | |||
10905 | ||||
10906 | // The extracted elements must start at a valid index and all mask | |||
10907 | // elements must be in the lower half. | |||
10908 | if (i > M || M >= HalfSize) | |||
10909 | return false; | |||
10910 | ||||
10911 | if (Idx < 0 || (Src == V && Idx == (M - i))) { | |||
10912 | Src = V; | |||
10913 | Idx = M - i; | |||
10914 | continue; | |||
10915 | } | |||
10916 | return false; | |||
10917 | } | |||
10918 | ||||
10919 | if (!Src || Idx < 0) | |||
10920 | return false; | |||
10921 | ||||
10922 | assert((Idx + Len) <= HalfSize && "Illegal extraction mask")(((Idx + Len) <= HalfSize && "Illegal extraction mask" ) ? static_cast<void> (0) : __assert_fail ("(Idx + Len) <= HalfSize && \"Illegal extraction mask\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10922, __PRETTY_FUNCTION__)); | |||
10923 | BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f; | |||
10924 | BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f; | |||
10925 | V1 = Src; | |||
10926 | return true; | |||
10927 | } | |||
10928 | ||||
10929 | // INSERTQ: Extract lowest Len elements from lower half of second source and | |||
10930 | // insert over first source, starting at Idx. | |||
10931 | // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... } | |||
10932 | static bool matchVectorShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2, | |||
10933 | ArrayRef<int> Mask, uint64_t &BitLen, | |||
10934 | uint64_t &BitIdx) { | |||
10935 | int Size = Mask.size(); | |||
10936 | int HalfSize = Size / 2; | |||
10937 | assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size")((Size == (int)VT.getVectorNumElements() && "Unexpected mask size" ) ? static_cast<void> (0) : __assert_fail ("Size == (int)VT.getVectorNumElements() && \"Unexpected mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 10937, __PRETTY_FUNCTION__)); | |||
10938 | ||||
10939 | // Upper half must be undefined. | |||
10940 | if (!isUndefInRange(Mask, HalfSize, HalfSize)) | |||
10941 | return false; | |||
10942 | ||||
10943 | for (int Idx = 0; Idx != HalfSize; ++Idx) { | |||
10944 | SDValue Base; | |||
10945 | ||||
10946 | // Attempt to match first source from mask before insertion point. | |||
10947 | if (isUndefInRange(Mask, 0, Idx)) { | |||
10948 | /* EMPTY */ | |||
10949 | } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) { | |||
10950 | Base = V1; | |||
10951 | } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) { | |||
10952 | Base = V2; | |||
10953 | } else { | |||
10954 | continue; | |||
10955 | } | |||
10956 | ||||
10957 | // Extend the extraction length looking to match both the insertion of | |||
10958 | // the second source and the remaining elements of the first. | |||
10959 | for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) { | |||
10960 | SDValue Insert; | |||
10961 | int Len = Hi - Idx; | |||
10962 | ||||
10963 | // Match insertion. | |||
10964 | if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) { | |||
10965 | Insert = V1; | |||
10966 | } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) { | |||
10967 | Insert = V2; | |||
10968 | } else { | |||
10969 | continue; | |||
10970 | } | |||
10971 | ||||
10972 | // Match the remaining elements of the lower half. | |||
10973 | if (isUndefInRange(Mask, Hi, HalfSize - Hi)) { | |||
10974 | /* EMPTY */ | |||
10975 | } else if ((!Base || (Base == V1)) && | |||
10976 | isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) { | |||
10977 | Base = V1; | |||
10978 | } else if ((!Base || (Base == V2)) && | |||
10979 | isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, | |||
10980 | Size + Hi)) { | |||
10981 | Base = V2; | |||
10982 | } else { | |||
10983 | continue; | |||
10984 | } | |||
10985 | ||||
10986 | BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f; | |||
10987 | BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f; | |||
10988 | V1 = Base; | |||
10989 | V2 = Insert; | |||
10990 | return true; | |||
10991 | } | |||
10992 | } | |||
10993 | ||||
10994 | return false; | |||
10995 | } | |||
10996 | ||||
10997 | /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ. | |||
10998 | static SDValue lowerVectorShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1, | |||
10999 | SDValue V2, ArrayRef<int> Mask, | |||
11000 | const APInt &Zeroable, | |||
11001 | SelectionDAG &DAG) { | |||
11002 | uint64_t BitLen, BitIdx; | |||
11003 | if (matchVectorShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable)) | |||
11004 | return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1, | |||
11005 | DAG.getConstant(BitLen, DL, MVT::i8), | |||
11006 | DAG.getConstant(BitIdx, DL, MVT::i8)); | |||
11007 | ||||
11008 | if (matchVectorShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx)) | |||
11009 | return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT), | |||
11010 | V2 ? V2 : DAG.getUNDEF(VT), | |||
11011 | DAG.getConstant(BitLen, DL, MVT::i8), | |||
11012 | DAG.getConstant(BitIdx, DL, MVT::i8)); | |||
11013 | ||||
11014 | return SDValue(); | |||
11015 | } | |||
11016 | ||||
11017 | /// Lower a vector shuffle as a zero or any extension. | |||
11018 | /// | |||
11019 | /// Given a specific number of elements, element bit width, and extension | |||
11020 | /// stride, produce either a zero or any extension based on the available | |||
11021 | /// features of the subtarget. The extended elements are consecutive and | |||
11022 | /// begin and can start from an offsetted element index in the input; to | |||
11023 | /// avoid excess shuffling the offset must either being in the bottom lane | |||
11024 | /// or at the start of a higher lane. All extended elements must be from | |||
11025 | /// the same lane. | |||
11026 | static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend( | |||
11027 | const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV, | |||
11028 | ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) { | |||
11029 | assert(Scale > 1 && "Need a scale to extend.")((Scale > 1 && "Need a scale to extend.") ? static_cast <void> (0) : __assert_fail ("Scale > 1 && \"Need a scale to extend.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11029, __PRETTY_FUNCTION__)); | |||
11030 | int EltBits = VT.getScalarSizeInBits(); | |||
11031 | int NumElements = VT.getVectorNumElements(); | |||
11032 | int NumEltsPerLane = 128 / EltBits; | |||
11033 | int OffsetLane = Offset / NumEltsPerLane; | |||
11034 | assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&(((EltBits == 8 || EltBits == 16 || EltBits == 32) && "Only 8, 16, and 32 bit elements can be extended.") ? static_cast <void> (0) : __assert_fail ("(EltBits == 8 || EltBits == 16 || EltBits == 32) && \"Only 8, 16, and 32 bit elements can be extended.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11035, __PRETTY_FUNCTION__)) | |||
11035 | "Only 8, 16, and 32 bit elements can be extended.")(((EltBits == 8 || EltBits == 16 || EltBits == 32) && "Only 8, 16, and 32 bit elements can be extended.") ? static_cast <void> (0) : __assert_fail ("(EltBits == 8 || EltBits == 16 || EltBits == 32) && \"Only 8, 16, and 32 bit elements can be extended.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11035, __PRETTY_FUNCTION__)); | |||
11036 | assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.")((Scale * EltBits <= 64 && "Cannot zero extend past 64 bits." ) ? static_cast<void> (0) : __assert_fail ("Scale * EltBits <= 64 && \"Cannot zero extend past 64 bits.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11036, __PRETTY_FUNCTION__)); | |||
11037 | assert(0 <= Offset && "Extension offset must be positive.")((0 <= Offset && "Extension offset must be positive." ) ? static_cast<void> (0) : __assert_fail ("0 <= Offset && \"Extension offset must be positive.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11037, __PRETTY_FUNCTION__)); | |||
11038 | assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&(((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0 ) && "Extension offset must be in the first lane or start an upper lane." ) ? static_cast<void> (0) : __assert_fail ("(Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) && \"Extension offset must be in the first lane or start an upper lane.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11039, __PRETTY_FUNCTION__)) | |||
11039 | "Extension offset must be in the first lane or start an upper lane.")(((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0 ) && "Extension offset must be in the first lane or start an upper lane." ) ? static_cast<void> (0) : __assert_fail ("(Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) && \"Extension offset must be in the first lane or start an upper lane.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11039, __PRETTY_FUNCTION__)); | |||
11040 | ||||
11041 | // Check that an index is in same lane as the base offset. | |||
11042 | auto SafeOffset = [&](int Idx) { | |||
11043 | return OffsetLane == (Idx / NumEltsPerLane); | |||
11044 | }; | |||
11045 | ||||
11046 | // Shift along an input so that the offset base moves to the first element. | |||
11047 | auto ShuffleOffset = [&](SDValue V) { | |||
11048 | if (!Offset) | |||
11049 | return V; | |||
11050 | ||||
11051 | SmallVector<int, 8> ShMask((unsigned)NumElements, -1); | |||
11052 | for (int i = 0; i * Scale < NumElements; ++i) { | |||
11053 | int SrcIdx = i + Offset; | |||
11054 | ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1; | |||
11055 | } | |||
11056 | return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask); | |||
11057 | }; | |||
11058 | ||||
11059 | // Found a valid zext mask! Try various lowering strategies based on the | |||
11060 | // input type and available ISA extensions. | |||
11061 | if (Subtarget.hasSSE41()) { | |||
11062 | // Not worth offsetting 128-bit vectors if scale == 2, a pattern using | |||
11063 | // PUNPCK will catch this in a later shuffle match. | |||
11064 | if (Offset && Scale == 2 && VT.is128BitVector()) | |||
11065 | return SDValue(); | |||
11066 | MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale), | |||
11067 | NumElements / Scale); | |||
11068 | InputV = ShuffleOffset(InputV); | |||
11069 | InputV = getExtendInVec(/*Signed*/false, DL, ExtVT, InputV, DAG); | |||
11070 | return DAG.getBitcast(VT, InputV); | |||
11071 | } | |||
11072 | ||||
11073 | assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.")((VT.is128BitVector() && "Only 128-bit vectors can be extended." ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only 128-bit vectors can be extended.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11073, __PRETTY_FUNCTION__)); | |||
11074 | ||||
11075 | // For any extends we can cheat for larger element sizes and use shuffle | |||
11076 | // instructions that can fold with a load and/or copy. | |||
11077 | if (AnyExt && EltBits == 32) { | |||
11078 | int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1, | |||
11079 | -1}; | |||
11080 | return DAG.getBitcast( | |||
11081 | VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, | |||
11082 | DAG.getBitcast(MVT::v4i32, InputV), | |||
11083 | getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG))); | |||
11084 | } | |||
11085 | if (AnyExt && EltBits == 16 && Scale > 2) { | |||
11086 | int PSHUFDMask[4] = {Offset / 2, -1, | |||
11087 | SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1}; | |||
11088 | InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, | |||
11089 | DAG.getBitcast(MVT::v4i32, InputV), | |||
11090 | getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)); | |||
11091 | int PSHUFWMask[4] = {1, -1, -1, -1}; | |||
11092 | unsigned OddEvenOp = (Offset & 1 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW); | |||
11093 | return DAG.getBitcast( | |||
11094 | VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16, | |||
11095 | DAG.getBitcast(MVT::v8i16, InputV), | |||
11096 | getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG))); | |||
11097 | } | |||
11098 | ||||
11099 | // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes | |||
11100 | // to 64-bits. | |||
11101 | if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) { | |||
11102 | assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!")((NumElements == (int)Mask.size() && "Unexpected shuffle mask size!" ) ? static_cast<void> (0) : __assert_fail ("NumElements == (int)Mask.size() && \"Unexpected shuffle mask size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11102, __PRETTY_FUNCTION__)); | |||
11103 | assert(VT.is128BitVector() && "Unexpected vector width!")((VT.is128BitVector() && "Unexpected vector width!") ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Unexpected vector width!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11103, __PRETTY_FUNCTION__)); | |||
11104 | ||||
11105 | int LoIdx = Offset * EltBits; | |||
11106 | SDValue Lo = DAG.getBitcast( | |||
11107 | MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV, | |||
11108 | DAG.getConstant(EltBits, DL, MVT::i8), | |||
11109 | DAG.getConstant(LoIdx, DL, MVT::i8))); | |||
11110 | ||||
11111 | if (isUndefInRange(Mask, NumElements / 2, NumElements / 2) || | |||
11112 | !SafeOffset(Offset + 1)) | |||
11113 | return DAG.getBitcast(VT, Lo); | |||
11114 | ||||
11115 | int HiIdx = (Offset + 1) * EltBits; | |||
11116 | SDValue Hi = DAG.getBitcast( | |||
11117 | MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV, | |||
11118 | DAG.getConstant(EltBits, DL, MVT::i8), | |||
11119 | DAG.getConstant(HiIdx, DL, MVT::i8))); | |||
11120 | return DAG.getBitcast(VT, | |||
11121 | DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi)); | |||
11122 | } | |||
11123 | ||||
11124 | // If this would require more than 2 unpack instructions to expand, use | |||
11125 | // pshufb when available. We can only use more than 2 unpack instructions | |||
11126 | // when zero extending i8 elements which also makes it easier to use pshufb. | |||
11127 | if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) { | |||
11128 | assert(NumElements == 16 && "Unexpected byte vector width!")((NumElements == 16 && "Unexpected byte vector width!" ) ? static_cast<void> (0) : __assert_fail ("NumElements == 16 && \"Unexpected byte vector width!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11128, __PRETTY_FUNCTION__)); | |||
11129 | SDValue PSHUFBMask[16]; | |||
11130 | for (int i = 0; i < 16; ++i) { | |||
11131 | int Idx = Offset + (i / Scale); | |||
11132 | PSHUFBMask[i] = DAG.getConstant( | |||
11133 | (i % Scale == 0 && SafeOffset(Idx)) ? Idx : 0x80, DL, MVT::i8); | |||
11134 | } | |||
11135 | InputV = DAG.getBitcast(MVT::v16i8, InputV); | |||
11136 | return DAG.getBitcast( | |||
11137 | VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV, | |||
11138 | DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask))); | |||
11139 | } | |||
11140 | ||||
11141 | // If we are extending from an offset, ensure we start on a boundary that | |||
11142 | // we can unpack from. | |||
11143 | int AlignToUnpack = Offset % (NumElements / Scale); | |||
11144 | if (AlignToUnpack) { | |||
11145 | SmallVector<int, 8> ShMask((unsigned)NumElements, -1); | |||
11146 | for (int i = AlignToUnpack; i < NumElements; ++i) | |||
11147 | ShMask[i - AlignToUnpack] = i; | |||
11148 | InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask); | |||
11149 | Offset -= AlignToUnpack; | |||
11150 | } | |||
11151 | ||||
11152 | // Otherwise emit a sequence of unpacks. | |||
11153 | do { | |||
11154 | unsigned UnpackLoHi = X86ISD::UNPCKL; | |||
11155 | if (Offset >= (NumElements / 2)) { | |||
11156 | UnpackLoHi = X86ISD::UNPCKH; | |||
11157 | Offset -= (NumElements / 2); | |||
11158 | } | |||
11159 | ||||
11160 | MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements); | |||
11161 | SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT) | |||
11162 | : getZeroVector(InputVT, Subtarget, DAG, DL); | |||
11163 | InputV = DAG.getBitcast(InputVT, InputV); | |||
11164 | InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext); | |||
11165 | Scale /= 2; | |||
11166 | EltBits *= 2; | |||
11167 | NumElements /= 2; | |||
11168 | } while (Scale > 1); | |||
11169 | return DAG.getBitcast(VT, InputV); | |||
11170 | } | |||
11171 | ||||
11172 | /// Try to lower a vector shuffle as a zero extension on any microarch. | |||
11173 | /// | |||
11174 | /// This routine will try to do everything in its power to cleverly lower | |||
11175 | /// a shuffle which happens to match the pattern of a zero extend. It doesn't | |||
11176 | /// check for the profitability of this lowering, it tries to aggressively | |||
11177 | /// match this pattern. It will use all of the micro-architectural details it | |||
11178 | /// can to emit an efficient lowering. It handles both blends with all-zero | |||
11179 | /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to | |||
11180 | /// masking out later). | |||
11181 | /// | |||
11182 | /// The reason we have dedicated lowering for zext-style shuffles is that they | |||
11183 | /// are both incredibly common and often quite performance sensitive. | |||
11184 | static SDValue lowerVectorShuffleAsZeroOrAnyExtend( | |||
11185 | const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, | |||
11186 | const APInt &Zeroable, const X86Subtarget &Subtarget, | |||
11187 | SelectionDAG &DAG) { | |||
11188 | int Bits = VT.getSizeInBits(); | |||
11189 | int NumLanes = Bits / 128; | |||
11190 | int NumElements = VT.getVectorNumElements(); | |||
11191 | int NumEltsPerLane = NumElements / NumLanes; | |||
11192 | assert(VT.getScalarSizeInBits() <= 32 &&((VT.getScalarSizeInBits() <= 32 && "Exceeds 32-bit integer zero extension limit" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() <= 32 && \"Exceeds 32-bit integer zero extension limit\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11193, __PRETTY_FUNCTION__)) | |||
11193 | "Exceeds 32-bit integer zero extension limit")((VT.getScalarSizeInBits() <= 32 && "Exceeds 32-bit integer zero extension limit" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() <= 32 && \"Exceeds 32-bit integer zero extension limit\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11193, __PRETTY_FUNCTION__)); | |||
11194 | assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size")(((int)Mask.size() == NumElements && "Unexpected shuffle mask size" ) ? static_cast<void> (0) : __assert_fail ("(int)Mask.size() == NumElements && \"Unexpected shuffle mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11194, __PRETTY_FUNCTION__)); | |||
11195 | ||||
11196 | // Define a helper function to check a particular ext-scale and lower to it if | |||
11197 | // valid. | |||
11198 | auto Lower = [&](int Scale) -> SDValue { | |||
11199 | SDValue InputV; | |||
11200 | bool AnyExt = true; | |||
11201 | int Offset = 0; | |||
11202 | int Matches = 0; | |||
11203 | for (int i = 0; i < NumElements; ++i) { | |||
11204 | int M = Mask[i]; | |||
11205 | if (M < 0) | |||
11206 | continue; // Valid anywhere but doesn't tell us anything. | |||
11207 | if (i % Scale != 0) { | |||
11208 | // Each of the extended elements need to be zeroable. | |||
11209 | if (!Zeroable[i]) | |||
11210 | return SDValue(); | |||
11211 | ||||
11212 | // We no longer are in the anyext case. | |||
11213 | AnyExt = false; | |||
11214 | continue; | |||
11215 | } | |||
11216 | ||||
11217 | // Each of the base elements needs to be consecutive indices into the | |||
11218 | // same input vector. | |||
11219 | SDValue V = M < NumElements ? V1 : V2; | |||
11220 | M = M % NumElements; | |||
11221 | if (!InputV) { | |||
11222 | InputV = V; | |||
11223 | Offset = M - (i / Scale); | |||
11224 | } else if (InputV != V) | |||
11225 | return SDValue(); // Flip-flopping inputs. | |||
11226 | ||||
11227 | // Offset must start in the lowest 128-bit lane or at the start of an | |||
11228 | // upper lane. | |||
11229 | // FIXME: Is it ever worth allowing a negative base offset? | |||
11230 | if (!((0 <= Offset && Offset < NumEltsPerLane) || | |||
11231 | (Offset % NumEltsPerLane) == 0)) | |||
11232 | return SDValue(); | |||
11233 | ||||
11234 | // If we are offsetting, all referenced entries must come from the same | |||
11235 | // lane. | |||
11236 | if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane)) | |||
11237 | return SDValue(); | |||
11238 | ||||
11239 | if ((M % NumElements) != (Offset + (i / Scale))) | |||
11240 | return SDValue(); // Non-consecutive strided elements. | |||
11241 | Matches++; | |||
11242 | } | |||
11243 | ||||
11244 | // If we fail to find an input, we have a zero-shuffle which should always | |||
11245 | // have already been handled. | |||
11246 | // FIXME: Maybe handle this here in case during blending we end up with one? | |||
11247 | if (!InputV) | |||
11248 | return SDValue(); | |||
11249 | ||||
11250 | // If we are offsetting, don't extend if we only match a single input, we | |||
11251 | // can always do better by using a basic PSHUF or PUNPCK. | |||
11252 | if (Offset != 0 && Matches < 2) | |||
11253 | return SDValue(); | |||
11254 | ||||
11255 | return lowerVectorShuffleAsSpecificZeroOrAnyExtend( | |||
11256 | DL, VT, Scale, Offset, AnyExt, InputV, Mask, Subtarget, DAG); | |||
11257 | }; | |||
11258 | ||||
11259 | // The widest scale possible for extending is to a 64-bit integer. | |||
11260 | assert(Bits % 64 == 0 &&((Bits % 64 == 0 && "The number of bits in a vector must be divisible by 64 on x86!" ) ? static_cast<void> (0) : __assert_fail ("Bits % 64 == 0 && \"The number of bits in a vector must be divisible by 64 on x86!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11261, __PRETTY_FUNCTION__)) | |||
11261 | "The number of bits in a vector must be divisible by 64 on x86!")((Bits % 64 == 0 && "The number of bits in a vector must be divisible by 64 on x86!" ) ? static_cast<void> (0) : __assert_fail ("Bits % 64 == 0 && \"The number of bits in a vector must be divisible by 64 on x86!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11261, __PRETTY_FUNCTION__)); | |||
11262 | int NumExtElements = Bits / 64; | |||
11263 | ||||
11264 | // Each iteration, try extending the elements half as much, but into twice as | |||
11265 | // many elements. | |||
11266 | for (; NumExtElements < NumElements; NumExtElements *= 2) { | |||
11267 | assert(NumElements % NumExtElements == 0 &&((NumElements % NumExtElements == 0 && "The input vector size must be divisible by the extended size." ) ? static_cast<void> (0) : __assert_fail ("NumElements % NumExtElements == 0 && \"The input vector size must be divisible by the extended size.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11268, __PRETTY_FUNCTION__)) | |||
11268 | "The input vector size must be divisible by the extended size.")((NumElements % NumExtElements == 0 && "The input vector size must be divisible by the extended size." ) ? static_cast<void> (0) : __assert_fail ("NumElements % NumExtElements == 0 && \"The input vector size must be divisible by the extended size.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11268, __PRETTY_FUNCTION__)); | |||
11269 | if (SDValue V = Lower(NumElements / NumExtElements)) | |||
11270 | return V; | |||
11271 | } | |||
11272 | ||||
11273 | // General extends failed, but 128-bit vectors may be able to use MOVQ. | |||
11274 | if (Bits != 128) | |||
11275 | return SDValue(); | |||
11276 | ||||
11277 | // Returns one of the source operands if the shuffle can be reduced to a | |||
11278 | // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits. | |||
11279 | auto CanZExtLowHalf = [&]() { | |||
11280 | for (int i = NumElements / 2; i != NumElements; ++i) | |||
11281 | if (!Zeroable[i]) | |||
11282 | return SDValue(); | |||
11283 | if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0)) | |||
11284 | return V1; | |||
11285 | if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements)) | |||
11286 | return V2; | |||
11287 | return SDValue(); | |||
11288 | }; | |||
11289 | ||||
11290 | if (SDValue V = CanZExtLowHalf()) { | |||
11291 | V = DAG.getBitcast(MVT::v2i64, V); | |||
11292 | V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V); | |||
11293 | return DAG.getBitcast(VT, V); | |||
11294 | } | |||
11295 | ||||
11296 | // No viable ext lowering found. | |||
11297 | return SDValue(); | |||
11298 | } | |||
11299 | ||||
11300 | /// Try to get a scalar value for a specific element of a vector. | |||
11301 | /// | |||
11302 | /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar. | |||
11303 | static SDValue getScalarValueForVectorElement(SDValue V, int Idx, | |||
11304 | SelectionDAG &DAG) { | |||
11305 | MVT VT = V.getSimpleValueType(); | |||
11306 | MVT EltVT = VT.getVectorElementType(); | |||
11307 | V = peekThroughBitcasts(V); | |||
11308 | ||||
11309 | // If the bitcasts shift the element size, we can't extract an equivalent | |||
11310 | // element from it. | |||
11311 | MVT NewVT = V.getSimpleValueType(); | |||
11312 | if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) | |||
11313 | return SDValue(); | |||
11314 | ||||
11315 | if (V.getOpcode() == ISD::BUILD_VECTOR || | |||
11316 | (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) { | |||
11317 | // Ensure the scalar operand is the same size as the destination. | |||
11318 | // FIXME: Add support for scalar truncation where possible. | |||
11319 | SDValue S = V.getOperand(Idx); | |||
11320 | if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits()) | |||
11321 | return DAG.getBitcast(EltVT, S); | |||
11322 | } | |||
11323 | ||||
11324 | return SDValue(); | |||
11325 | } | |||
11326 | ||||
11327 | /// Helper to test for a load that can be folded with x86 shuffles. | |||
11328 | /// | |||
11329 | /// This is particularly important because the set of instructions varies | |||
11330 | /// significantly based on whether the operand is a load or not. | |||
11331 | static bool isShuffleFoldableLoad(SDValue V) { | |||
11332 | V = peekThroughBitcasts(V); | |||
11333 | return ISD::isNON_EXTLoad(V.getNode()); | |||
11334 | } | |||
11335 | ||||
11336 | /// Try to lower insertion of a single element into a zero vector. | |||
11337 | /// | |||
11338 | /// This is a common pattern that we have especially efficient patterns to lower | |||
11339 | /// across all subtarget feature sets. | |||
11340 | static SDValue lowerVectorShuffleAsElementInsertion( | |||
11341 | const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, | |||
11342 | const APInt &Zeroable, const X86Subtarget &Subtarget, | |||
11343 | SelectionDAG &DAG) { | |||
11344 | MVT ExtVT = VT; | |||
11345 | MVT EltVT = VT.getVectorElementType(); | |||
11346 | ||||
11347 | int V2Index = | |||
11348 | find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) - | |||
11349 | Mask.begin(); | |||
11350 | bool IsV1Zeroable = true; | |||
11351 | for (int i = 0, Size = Mask.size(); i < Size; ++i) | |||
11352 | if (i != V2Index && !Zeroable[i]) { | |||
11353 | IsV1Zeroable = false; | |||
11354 | break; | |||
11355 | } | |||
11356 | ||||
11357 | // Check for a single input from a SCALAR_TO_VECTOR node. | |||
11358 | // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and | |||
11359 | // all the smarts here sunk into that routine. However, the current | |||
11360 | // lowering of BUILD_VECTOR makes that nearly impossible until the old | |||
11361 | // vector shuffle lowering is dead. | |||
11362 | SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(), | |||
11363 | DAG); | |||
11364 | if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) { | |||
11365 | // We need to zext the scalar if it is smaller than an i32. | |||
11366 | V2S = DAG.getBitcast(EltVT, V2S); | |||
11367 | if (EltVT == MVT::i8 || EltVT == MVT::i16) { | |||
11368 | // Using zext to expand a narrow element won't work for non-zero | |||
11369 | // insertions. | |||
11370 | if (!IsV1Zeroable) | |||
11371 | return SDValue(); | |||
11372 | ||||
11373 | // Zero-extend directly to i32. | |||
11374 | ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32); | |||
11375 | V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S); | |||
11376 | } | |||
11377 | V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S); | |||
11378 | } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 || | |||
11379 | EltVT == MVT::i16) { | |||
11380 | // Either not inserting from the low element of the input or the input | |||
11381 | // element size is too small to use VZEXT_MOVL to clear the high bits. | |||
11382 | return SDValue(); | |||
11383 | } | |||
11384 | ||||
11385 | if (!IsV1Zeroable) { | |||
11386 | // If V1 can't be treated as a zero vector we have fewer options to lower | |||
11387 | // this. We can't support integer vectors or non-zero targets cheaply, and | |||
11388 | // the V1 elements can't be permuted in any way. | |||
11389 | assert(VT == ExtVT && "Cannot change extended type when non-zeroable!")((VT == ExtVT && "Cannot change extended type when non-zeroable!" ) ? static_cast<void> (0) : __assert_fail ("VT == ExtVT && \"Cannot change extended type when non-zeroable!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11389, __PRETTY_FUNCTION__)); | |||
11390 | if (!VT.isFloatingPoint() || V2Index != 0) | |||
11391 | return SDValue(); | |||
11392 | SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end()); | |||
11393 | V1Mask[V2Index] = -1; | |||
11394 | if (!isNoopShuffleMask(V1Mask)) | |||
11395 | return SDValue(); | |||
11396 | if (!VT.is128BitVector()) | |||
11397 | return SDValue(); | |||
11398 | ||||
11399 | // Otherwise, use MOVSD or MOVSS. | |||
11400 | assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&(((EltVT == MVT::f32 || EltVT == MVT::f64) && "Only two types of floating point element types to handle!" ) ? static_cast<void> (0) : __assert_fail ("(EltVT == MVT::f32 || EltVT == MVT::f64) && \"Only two types of floating point element types to handle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11401, __PRETTY_FUNCTION__)) | |||
11401 | "Only two types of floating point element types to handle!")(((EltVT == MVT::f32 || EltVT == MVT::f64) && "Only two types of floating point element types to handle!" ) ? static_cast<void> (0) : __assert_fail ("(EltVT == MVT::f32 || EltVT == MVT::f64) && \"Only two types of floating point element types to handle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11401, __PRETTY_FUNCTION__)); | |||
11402 | return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL, | |||
11403 | ExtVT, V1, V2); | |||
11404 | } | |||
11405 | ||||
11406 | // This lowering only works for the low element with floating point vectors. | |||
11407 | if (VT.isFloatingPoint() && V2Index != 0) | |||
11408 | return SDValue(); | |||
11409 | ||||
11410 | V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2); | |||
11411 | if (ExtVT != VT) | |||
11412 | V2 = DAG.getBitcast(VT, V2); | |||
11413 | ||||
11414 | if (V2Index != 0) { | |||
11415 | // If we have 4 or fewer lanes we can cheaply shuffle the element into | |||
11416 | // the desired position. Otherwise it is more efficient to do a vector | |||
11417 | // shift left. We know that we can do a vector shift left because all | |||
11418 | // the inputs are zero. | |||
11419 | if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) { | |||
11420 | SmallVector<int, 4> V2Shuffle(Mask.size(), 1); | |||
11421 | V2Shuffle[V2Index] = 0; | |||
11422 | V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle); | |||
11423 | } else { | |||
11424 | V2 = DAG.getBitcast(MVT::v16i8, V2); | |||
11425 | V2 = DAG.getNode( | |||
11426 | X86ISD::VSHLDQ, DL, MVT::v16i8, V2, | |||
11427 | DAG.getConstant(V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8)); | |||
11428 | V2 = DAG.getBitcast(VT, V2); | |||
11429 | } | |||
11430 | } | |||
11431 | return V2; | |||
11432 | } | |||
11433 | ||||
11434 | /// Try to lower broadcast of a single - truncated - integer element, | |||
11435 | /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements. | |||
11436 | /// | |||
11437 | /// This assumes we have AVX2. | |||
11438 | static SDValue lowerVectorShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, | |||
11439 | SDValue V0, int BroadcastIdx, | |||
11440 | const X86Subtarget &Subtarget, | |||
11441 | SelectionDAG &DAG) { | |||
11442 | assert(Subtarget.hasAVX2() &&((Subtarget.hasAVX2() && "We can only lower integer broadcasts with AVX2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower integer broadcasts with AVX2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11443, __PRETTY_FUNCTION__)) | |||
11443 | "We can only lower integer broadcasts with AVX2!")((Subtarget.hasAVX2() && "We can only lower integer broadcasts with AVX2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower integer broadcasts with AVX2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11443, __PRETTY_FUNCTION__)); | |||
11444 | ||||
11445 | EVT EltVT = VT.getVectorElementType(); | |||
11446 | EVT V0VT = V0.getValueType(); | |||
11447 | ||||
11448 | assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!")((VT.isInteger() && "Unexpected non-integer trunc broadcast!" ) ? static_cast<void> (0) : __assert_fail ("VT.isInteger() && \"Unexpected non-integer trunc broadcast!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11448, __PRETTY_FUNCTION__)); | |||
11449 | assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!")((V0VT.isVector() && "Unexpected non-vector vector-sized value!" ) ? static_cast<void> (0) : __assert_fail ("V0VT.isVector() && \"Unexpected non-vector vector-sized value!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11449, __PRETTY_FUNCTION__)); | |||
11450 | ||||
11451 | EVT V0EltVT = V0VT.getVectorElementType(); | |||
11452 | if (!V0EltVT.isInteger()) | |||
11453 | return SDValue(); | |||
11454 | ||||
11455 | const unsigned EltSize = EltVT.getSizeInBits(); | |||
11456 | const unsigned V0EltSize = V0EltVT.getSizeInBits(); | |||
11457 | ||||
11458 | // This is only a truncation if the original element type is larger. | |||
11459 | if (V0EltSize <= EltSize) | |||
11460 | return SDValue(); | |||
11461 | ||||
11462 | assert(((V0EltSize % EltSize) == 0) &&((((V0EltSize % EltSize) == 0) && "Scalar type sizes must all be powers of 2 on x86!" ) ? static_cast<void> (0) : __assert_fail ("((V0EltSize % EltSize) == 0) && \"Scalar type sizes must all be powers of 2 on x86!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11463, __PRETTY_FUNCTION__)) | |||
11463 | "Scalar type sizes must all be powers of 2 on x86!")((((V0EltSize % EltSize) == 0) && "Scalar type sizes must all be powers of 2 on x86!" ) ? static_cast<void> (0) : __assert_fail ("((V0EltSize % EltSize) == 0) && \"Scalar type sizes must all be powers of 2 on x86!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11463, __PRETTY_FUNCTION__)); | |||
11464 | ||||
11465 | const unsigned V0Opc = V0.getOpcode(); | |||
11466 | const unsigned Scale = V0EltSize / EltSize; | |||
11467 | const unsigned V0BroadcastIdx = BroadcastIdx / Scale; | |||
11468 | ||||
11469 | if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) && | |||
11470 | V0Opc != ISD::BUILD_VECTOR) | |||
11471 | return SDValue(); | |||
11472 | ||||
11473 | SDValue Scalar = V0.getOperand(V0BroadcastIdx); | |||
11474 | ||||
11475 | // If we're extracting non-least-significant bits, shift so we can truncate. | |||
11476 | // Hopefully, we can fold away the trunc/srl/load into the broadcast. | |||
11477 | // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer | |||
11478 | // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd. | |||
11479 | if (const int OffsetIdx = BroadcastIdx % Scale) | |||
11480 | Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar, | |||
11481 | DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8)); | |||
11482 | ||||
11483 | return DAG.getNode(X86ISD::VBROADCAST, DL, VT, | |||
11484 | DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar)); | |||
11485 | } | |||
11486 | ||||
11487 | /// Try to lower broadcast of a single element. | |||
11488 | /// | |||
11489 | /// For convenience, this code also bundles all of the subtarget feature set | |||
11490 | /// filtering. While a little annoying to re-dispatch on type here, there isn't | |||
11491 | /// a convenient way to factor it out. | |||
11492 | static SDValue lowerVectorShuffleAsBroadcast(const SDLoc &DL, MVT VT, | |||
11493 | SDValue V1, SDValue V2, | |||
11494 | ArrayRef<int> Mask, | |||
11495 | const X86Subtarget &Subtarget, | |||
11496 | SelectionDAG &DAG) { | |||
11497 | if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) || | |||
11498 | (Subtarget.hasAVX() && VT.isFloatingPoint()) || | |||
11499 | (Subtarget.hasAVX2() && VT.isInteger()))) | |||
11500 | return SDValue(); | |||
11501 | ||||
11502 | // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise | |||
11503 | // we can only broadcast from a register with AVX2. | |||
11504 | unsigned NumElts = Mask.size(); | |||
11505 | unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2()) | |||
11506 | ? X86ISD::MOVDDUP | |||
11507 | : X86ISD::VBROADCAST; | |||
11508 | bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2(); | |||
11509 | ||||
11510 | // Check that the mask is a broadcast. | |||
11511 | int BroadcastIdx = -1; | |||
11512 | for (int i = 0; i != (int)NumElts; ++i) { | |||
11513 | SmallVector<int, 8> BroadcastMask(NumElts, i); | |||
11514 | if (isShuffleEquivalent(V1, V2, Mask, BroadcastMask)) { | |||
11515 | BroadcastIdx = i; | |||
11516 | break; | |||
11517 | } | |||
11518 | } | |||
11519 | ||||
11520 | if (BroadcastIdx < 0) | |||
11521 | return SDValue(); | |||
11522 | assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "((BroadcastIdx < (int)Mask.size() && "We only expect to be called with " "a sorted mask where the broadcast " "comes from V1.") ? static_cast <void> (0) : __assert_fail ("BroadcastIdx < (int)Mask.size() && \"We only expect to be called with \" \"a sorted mask where the broadcast \" \"comes from V1.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11524, __PRETTY_FUNCTION__)) | |||
11523 | "a sorted mask where the broadcast "((BroadcastIdx < (int)Mask.size() && "We only expect to be called with " "a sorted mask where the broadcast " "comes from V1.") ? static_cast <void> (0) : __assert_fail ("BroadcastIdx < (int)Mask.size() && \"We only expect to be called with \" \"a sorted mask where the broadcast \" \"comes from V1.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11524, __PRETTY_FUNCTION__)) | |||
11524 | "comes from V1.")((BroadcastIdx < (int)Mask.size() && "We only expect to be called with " "a sorted mask where the broadcast " "comes from V1.") ? static_cast <void> (0) : __assert_fail ("BroadcastIdx < (int)Mask.size() && \"We only expect to be called with \" \"a sorted mask where the broadcast \" \"comes from V1.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11524, __PRETTY_FUNCTION__)); | |||
11525 | ||||
11526 | // Go up the chain of (vector) values to find a scalar load that we can | |||
11527 | // combine with the broadcast. | |||
11528 | SDValue V = V1; | |||
11529 | for (;;) { | |||
11530 | switch (V.getOpcode()) { | |||
11531 | case ISD::BITCAST: { | |||
11532 | // Peek through bitcasts as long as BroadcastIdx can be adjusted. | |||
11533 | SDValue VSrc = V.getOperand(0); | |||
11534 | unsigned NumEltBits = V.getScalarValueSizeInBits(); | |||
11535 | unsigned NumSrcBits = VSrc.getScalarValueSizeInBits(); | |||
11536 | if ((NumEltBits % NumSrcBits) == 0) | |||
11537 | BroadcastIdx *= (NumEltBits / NumSrcBits); | |||
11538 | else if ((NumSrcBits % NumEltBits) == 0 && | |||
11539 | (BroadcastIdx % (NumSrcBits / NumEltBits)) == 0) | |||
11540 | BroadcastIdx /= (NumSrcBits / NumEltBits); | |||
11541 | else | |||
11542 | break; | |||
11543 | V = VSrc; | |||
11544 | continue; | |||
11545 | } | |||
11546 | case ISD::CONCAT_VECTORS: { | |||
11547 | int OperandSize = | |||
11548 | V.getOperand(0).getSimpleValueType().getVectorNumElements(); | |||
11549 | V = V.getOperand(BroadcastIdx / OperandSize); | |||
11550 | BroadcastIdx %= OperandSize; | |||
11551 | continue; | |||
11552 | } | |||
11553 | case ISD::INSERT_SUBVECTOR: { | |||
11554 | SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1); | |||
11555 | auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2)); | |||
11556 | if (!ConstantIdx) | |||
11557 | break; | |||
11558 | ||||
11559 | int BeginIdx = (int)ConstantIdx->getZExtValue(); | |||
11560 | int EndIdx = | |||
11561 | BeginIdx + (int)VInner.getSimpleValueType().getVectorNumElements(); | |||
11562 | if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) { | |||
11563 | BroadcastIdx -= BeginIdx; | |||
11564 | V = VInner; | |||
11565 | } else { | |||
11566 | V = VOuter; | |||
11567 | } | |||
11568 | continue; | |||
11569 | } | |||
11570 | } | |||
11571 | break; | |||
11572 | } | |||
11573 | ||||
11574 | // Ensure the source vector and BroadcastIdx are for a suitable type. | |||
11575 | if (VT.getScalarSizeInBits() != V.getScalarValueSizeInBits()) { | |||
11576 | unsigned NumEltBits = VT.getScalarSizeInBits(); | |||
11577 | unsigned NumSrcBits = V.getScalarValueSizeInBits(); | |||
11578 | if ((NumSrcBits % NumEltBits) == 0) | |||
11579 | BroadcastIdx *= (NumSrcBits / NumEltBits); | |||
11580 | else if ((NumEltBits % NumSrcBits) == 0 && | |||
11581 | (BroadcastIdx % (NumEltBits / NumSrcBits)) == 0) | |||
11582 | BroadcastIdx /= (NumEltBits / NumSrcBits); | |||
11583 | else | |||
11584 | return SDValue(); | |||
11585 | ||||
11586 | unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits; | |||
11587 | MVT SrcVT = MVT::getVectorVT(VT.getScalarType(), NumSrcElts); | |||
11588 | V = DAG.getBitcast(SrcVT, V); | |||
11589 | } | |||
11590 | ||||
11591 | // Check if this is a broadcast of a scalar. We special case lowering | |||
11592 | // for scalars so that we can more effectively fold with loads. | |||
11593 | // First, look through bitcast: if the original value has a larger element | |||
11594 | // type than the shuffle, the broadcast element is in essence truncated. | |||
11595 | // Make that explicit to ease folding. | |||
11596 | if (V.getOpcode() == ISD::BITCAST && VT.isInteger()) | |||
11597 | if (SDValue TruncBroadcast = lowerVectorShuffleAsTruncBroadcast( | |||
11598 | DL, VT, V.getOperand(0), BroadcastIdx, Subtarget, DAG)) | |||
11599 | return TruncBroadcast; | |||
11600 | ||||
11601 | MVT BroadcastVT = VT; | |||
11602 | ||||
11603 | // Peek through any bitcast (only useful for loads). | |||
11604 | SDValue BC = peekThroughBitcasts(V); | |||
11605 | ||||
11606 | // Also check the simpler case, where we can directly reuse the scalar. | |||
11607 | if ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) || | |||
11608 | (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) { | |||
11609 | V = V.getOperand(BroadcastIdx); | |||
11610 | ||||
11611 | // If we can't broadcast from a register, check that the input is a load. | |||
11612 | if (!BroadcastFromReg && !isShuffleFoldableLoad(V)) | |||
11613 | return SDValue(); | |||
11614 | } else if (MayFoldLoad(BC) && !cast<LoadSDNode>(BC)->isVolatile()) { | |||
11615 | // 32-bit targets need to load i64 as a f64 and then bitcast the result. | |||
11616 | if (!Subtarget.is64Bit() && VT.getScalarType() == MVT::i64) { | |||
11617 | BroadcastVT = MVT::getVectorVT(MVT::f64, VT.getVectorNumElements()); | |||
11618 | Opcode = (BroadcastVT.is128BitVector() && !Subtarget.hasAVX2()) | |||
11619 | ? X86ISD::MOVDDUP | |||
11620 | : Opcode; | |||
11621 | } | |||
11622 | ||||
11623 | // If we are broadcasting a load that is only used by the shuffle | |||
11624 | // then we can reduce the vector load to the broadcasted scalar load. | |||
11625 | LoadSDNode *Ld = cast<LoadSDNode>(BC); | |||
11626 | SDValue BaseAddr = Ld->getOperand(1); | |||
11627 | EVT SVT = BroadcastVT.getScalarType(); | |||
11628 | unsigned Offset = BroadcastIdx * SVT.getStoreSize(); | |||
11629 | SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL); | |||
11630 | V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr, | |||
11631 | DAG.getMachineFunction().getMachineMemOperand( | |||
11632 | Ld->getMemOperand(), Offset, SVT.getStoreSize())); | |||
11633 | DAG.makeEquivalentMemoryOrdering(Ld, V); | |||
11634 | } else if (!BroadcastFromReg) { | |||
11635 | // We can't broadcast from a vector register. | |||
11636 | return SDValue(); | |||
11637 | } else if (BroadcastIdx != 0) { | |||
11638 | // We can only broadcast from the zero-element of a vector register, | |||
11639 | // but it can be advantageous to broadcast from the zero-element of a | |||
11640 | // subvector. | |||
11641 | if (!VT.is256BitVector() && !VT.is512BitVector()) | |||
11642 | return SDValue(); | |||
11643 | ||||
11644 | // VPERMQ/VPERMPD can perform the cross-lane shuffle directly. | |||
11645 | if (VT == MVT::v4f64 || VT == MVT::v4i64) | |||
11646 | return SDValue(); | |||
11647 | ||||
11648 | // Only broadcast the zero-element of a 128-bit subvector. | |||
11649 | unsigned EltSize = VT.getScalarSizeInBits(); | |||
11650 | if (((BroadcastIdx * EltSize) % 128) != 0) | |||
11651 | return SDValue(); | |||
11652 | ||||
11653 | // The shuffle input might have been a bitcast we looked through; look at | |||
11654 | // the original input vector. Emit an EXTRACT_SUBVECTOR of that type; we'll | |||
11655 | // later bitcast it to BroadcastVT. | |||
11656 | assert(V.getScalarValueSizeInBits() == BroadcastVT.getScalarSizeInBits() &&((V.getScalarValueSizeInBits() == BroadcastVT.getScalarSizeInBits () && "Unexpected vector element size") ? static_cast <void> (0) : __assert_fail ("V.getScalarValueSizeInBits() == BroadcastVT.getScalarSizeInBits() && \"Unexpected vector element size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11657, __PRETTY_FUNCTION__)) | |||
11657 | "Unexpected vector element size")((V.getScalarValueSizeInBits() == BroadcastVT.getScalarSizeInBits () && "Unexpected vector element size") ? static_cast <void> (0) : __assert_fail ("V.getScalarValueSizeInBits() == BroadcastVT.getScalarSizeInBits() && \"Unexpected vector element size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11657, __PRETTY_FUNCTION__)); | |||
11658 | assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&(((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) && "Unexpected vector size") ? static_cast<void > (0) : __assert_fail ("(V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) && \"Unexpected vector size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11659, __PRETTY_FUNCTION__)) | |||
11659 | "Unexpected vector size")(((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) && "Unexpected vector size") ? static_cast<void > (0) : __assert_fail ("(V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) && \"Unexpected vector size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11659, __PRETTY_FUNCTION__)); | |||
11660 | V = extract128BitVector(V, BroadcastIdx, DAG, DL); | |||
11661 | } | |||
11662 | ||||
11663 | if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector()) | |||
11664 | V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, | |||
11665 | DAG.getBitcast(MVT::f64, V)); | |||
11666 | ||||
11667 | // Bitcast back to the same scalar type as BroadcastVT. | |||
11668 | MVT SrcVT = V.getSimpleValueType(); | |||
11669 | if (SrcVT.getScalarType() != BroadcastVT.getScalarType()) { | |||
11670 | assert(SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() &&((SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits () && "Unexpected vector element size") ? static_cast <void> (0) : __assert_fail ("SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() && \"Unexpected vector element size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11671, __PRETTY_FUNCTION__)) | |||
11671 | "Unexpected vector element size")((SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits () && "Unexpected vector element size") ? static_cast <void> (0) : __assert_fail ("SrcVT.getScalarSizeInBits() == BroadcastVT.getScalarSizeInBits() && \"Unexpected vector element size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11671, __PRETTY_FUNCTION__)); | |||
11672 | if (SrcVT.isVector()) { | |||
11673 | unsigned NumSrcElts = SrcVT.getVectorNumElements(); | |||
11674 | SrcVT = MVT::getVectorVT(BroadcastVT.getScalarType(), NumSrcElts); | |||
11675 | } else { | |||
11676 | SrcVT = BroadcastVT.getScalarType(); | |||
11677 | } | |||
11678 | V = DAG.getBitcast(SrcVT, V); | |||
11679 | } | |||
11680 | ||||
11681 | // 32-bit targets need to load i64 as a f64 and then bitcast the result. | |||
11682 | if (!Subtarget.is64Bit() && SrcVT == MVT::i64) { | |||
11683 | V = DAG.getBitcast(MVT::f64, V); | |||
11684 | unsigned NumBroadcastElts = BroadcastVT.getVectorNumElements(); | |||
11685 | BroadcastVT = MVT::getVectorVT(MVT::f64, NumBroadcastElts); | |||
11686 | } | |||
11687 | ||||
11688 | // We only support broadcasting from 128-bit vectors to minimize the | |||
11689 | // number of patterns we need to deal with in isel. So extract down to | |||
11690 | // 128-bits, removing as many bitcasts as possible. | |||
11691 | if (SrcVT.getSizeInBits() > 128) { | |||
11692 | MVT ExtVT = MVT::getVectorVT(SrcVT.getScalarType(), | |||
11693 | 128 / SrcVT.getScalarSizeInBits()); | |||
11694 | V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL); | |||
11695 | V = DAG.getBitcast(ExtVT, V); | |||
11696 | } | |||
11697 | ||||
11698 | return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V)); | |||
11699 | } | |||
11700 | ||||
11701 | // Check for whether we can use INSERTPS to perform the shuffle. We only use | |||
11702 | // INSERTPS when the V1 elements are already in the correct locations | |||
11703 | // because otherwise we can just always use two SHUFPS instructions which | |||
11704 | // are much smaller to encode than a SHUFPS and an INSERTPS. We can also | |||
11705 | // perform INSERTPS if a single V1 element is out of place and all V2 | |||
11706 | // elements are zeroable. | |||
11707 | static bool matchVectorShuffleAsInsertPS(SDValue &V1, SDValue &V2, | |||
11708 | unsigned &InsertPSMask, | |||
11709 | const APInt &Zeroable, | |||
11710 | ArrayRef<int> Mask, | |||
11711 | SelectionDAG &DAG) { | |||
11712 | assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!")((V1.getSimpleValueType().is128BitVector() && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType().is128BitVector() && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11712, __PRETTY_FUNCTION__)); | |||
11713 | assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!")((V2.getSimpleValueType().is128BitVector() && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType().is128BitVector() && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11713, __PRETTY_FUNCTION__)); | |||
11714 | assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11714, __PRETTY_FUNCTION__)); | |||
11715 | ||||
11716 | // Attempt to match INSERTPS with one element from VA or VB being | |||
11717 | // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask | |||
11718 | // are updated. | |||
11719 | auto matchAsInsertPS = [&](SDValue VA, SDValue VB, | |||
11720 | ArrayRef<int> CandidateMask) { | |||
11721 | unsigned ZMask = 0; | |||
11722 | int VADstIndex = -1; | |||
11723 | int VBDstIndex = -1; | |||
11724 | bool VAUsedInPlace = false; | |||
11725 | ||||
11726 | for (int i = 0; i < 4; ++i) { | |||
11727 | // Synthesize a zero mask from the zeroable elements (includes undefs). | |||
11728 | if (Zeroable[i]) { | |||
11729 | ZMask |= 1 << i; | |||
11730 | continue; | |||
11731 | } | |||
11732 | ||||
11733 | // Flag if we use any VA inputs in place. | |||
11734 | if (i == CandidateMask[i]) { | |||
11735 | VAUsedInPlace = true; | |||
11736 | continue; | |||
11737 | } | |||
11738 | ||||
11739 | // We can only insert a single non-zeroable element. | |||
11740 | if (VADstIndex >= 0 || VBDstIndex >= 0) | |||
11741 | return false; | |||
11742 | ||||
11743 | if (CandidateMask[i] < 4) { | |||
11744 | // VA input out of place for insertion. | |||
11745 | VADstIndex = i; | |||
11746 | } else { | |||
11747 | // VB input for insertion. | |||
11748 | VBDstIndex = i; | |||
11749 | } | |||
11750 | } | |||
11751 | ||||
11752 | // Don't bother if we have no (non-zeroable) element for insertion. | |||
11753 | if (VADstIndex < 0 && VBDstIndex < 0) | |||
11754 | return false; | |||
11755 | ||||
11756 | // Determine element insertion src/dst indices. The src index is from the | |||
11757 | // start of the inserted vector, not the start of the concatenated vector. | |||
11758 | unsigned VBSrcIndex = 0; | |||
11759 | if (VADstIndex >= 0) { | |||
11760 | // If we have a VA input out of place, we use VA as the V2 element | |||
11761 | // insertion and don't use the original V2 at all. | |||
11762 | VBSrcIndex = CandidateMask[VADstIndex]; | |||
11763 | VBDstIndex = VADstIndex; | |||
11764 | VB = VA; | |||
11765 | } else { | |||
11766 | VBSrcIndex = CandidateMask[VBDstIndex] - 4; | |||
11767 | } | |||
11768 | ||||
11769 | // If no V1 inputs are used in place, then the result is created only from | |||
11770 | // the zero mask and the V2 insertion - so remove V1 dependency. | |||
11771 | if (!VAUsedInPlace) | |||
11772 | VA = DAG.getUNDEF(MVT::v4f32); | |||
11773 | ||||
11774 | // Update V1, V2 and InsertPSMask accordingly. | |||
11775 | V1 = VA; | |||
11776 | V2 = VB; | |||
11777 | ||||
11778 | // Insert the V2 element into the desired position. | |||
11779 | InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask; | |||
11780 | assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!")(((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!" ) ? static_cast<void> (0) : __assert_fail ("(InsertPSMask & ~0xFFu) == 0 && \"Invalid mask!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11780, __PRETTY_FUNCTION__)); | |||
11781 | return true; | |||
11782 | }; | |||
11783 | ||||
11784 | if (matchAsInsertPS(V1, V2, Mask)) | |||
11785 | return true; | |||
11786 | ||||
11787 | // Commute and try again. | |||
11788 | SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end()); | |||
11789 | ShuffleVectorSDNode::commuteMask(CommutedMask); | |||
11790 | if (matchAsInsertPS(V2, V1, CommutedMask)) | |||
11791 | return true; | |||
11792 | ||||
11793 | return false; | |||
11794 | } | |||
11795 | ||||
11796 | static SDValue lowerVectorShuffleAsInsertPS(const SDLoc &DL, SDValue V1, | |||
11797 | SDValue V2, ArrayRef<int> Mask, | |||
11798 | const APInt &Zeroable, | |||
11799 | SelectionDAG &DAG) { | |||
11800 | assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4f32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11800, __PRETTY_FUNCTION__)); | |||
11801 | assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4f32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11801, __PRETTY_FUNCTION__)); | |||
11802 | ||||
11803 | // Attempt to match the insertps pattern. | |||
11804 | unsigned InsertPSMask; | |||
11805 | if (!matchVectorShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG)) | |||
11806 | return SDValue(); | |||
11807 | ||||
11808 | // Insert the V2 element into the desired position. | |||
11809 | return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2, | |||
11810 | DAG.getConstant(InsertPSMask, DL, MVT::i8)); | |||
11811 | } | |||
11812 | ||||
11813 | /// Try to lower a shuffle as a permute of the inputs followed by an | |||
11814 | /// UNPCK instruction. | |||
11815 | /// | |||
11816 | /// This specifically targets cases where we end up with alternating between | |||
11817 | /// the two inputs, and so can permute them into something that feeds a single | |||
11818 | /// UNPCK instruction. Note that this routine only targets integer vectors | |||
11819 | /// because for floating point vectors we have a generalized SHUFPS lowering | |||
11820 | /// strategy that handles everything that doesn't *exactly* match an unpack, | |||
11821 | /// making this clever lowering unnecessary. | |||
11822 | static SDValue lowerVectorShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT, | |||
11823 | SDValue V1, SDValue V2, | |||
11824 | ArrayRef<int> Mask, | |||
11825 | SelectionDAG &DAG) { | |||
11826 | assert(!VT.isFloatingPoint() &&((!VT.isFloatingPoint() && "This routine only supports integer vectors." ) ? static_cast<void> (0) : __assert_fail ("!VT.isFloatingPoint() && \"This routine only supports integer vectors.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11827, __PRETTY_FUNCTION__)) | |||
11827 | "This routine only supports integer vectors.")((!VT.isFloatingPoint() && "This routine only supports integer vectors." ) ? static_cast<void> (0) : __assert_fail ("!VT.isFloatingPoint() && \"This routine only supports integer vectors.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11827, __PRETTY_FUNCTION__)); | |||
11828 | assert(VT.is128BitVector() &&((VT.is128BitVector() && "This routine only works on 128-bit vectors." ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"This routine only works on 128-bit vectors.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11829, __PRETTY_FUNCTION__)) | |||
11829 | "This routine only works on 128-bit vectors.")((VT.is128BitVector() && "This routine only works on 128-bit vectors." ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"This routine only works on 128-bit vectors.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11829, __PRETTY_FUNCTION__)); | |||
11830 | assert(!V2.isUndef() &&((!V2.isUndef() && "This routine should only be used when blending two inputs." ) ? static_cast<void> (0) : __assert_fail ("!V2.isUndef() && \"This routine should only be used when blending two inputs.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11831, __PRETTY_FUNCTION__)) | |||
11831 | "This routine should only be used when blending two inputs.")((!V2.isUndef() && "This routine should only be used when blending two inputs." ) ? static_cast<void> (0) : __assert_fail ("!V2.isUndef() && \"This routine should only be used when blending two inputs.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11831, __PRETTY_FUNCTION__)); | |||
11832 | assert(Mask.size() >= 2 && "Single element masks are invalid.")((Mask.size() >= 2 && "Single element masks are invalid." ) ? static_cast<void> (0) : __assert_fail ("Mask.size() >= 2 && \"Single element masks are invalid.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11832, __PRETTY_FUNCTION__)); | |||
11833 | ||||
11834 | int Size = Mask.size(); | |||
11835 | ||||
11836 | int NumLoInputs = | |||
11837 | count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; }); | |||
11838 | int NumHiInputs = | |||
11839 | count_if(Mask, [Size](int M) { return M % Size >= Size / 2; }); | |||
11840 | ||||
11841 | bool UnpackLo = NumLoInputs >= NumHiInputs; | |||
11842 | ||||
11843 | auto TryUnpack = [&](int ScalarSize, int Scale) { | |||
11844 | SmallVector<int, 16> V1Mask((unsigned)Size, -1); | |||
11845 | SmallVector<int, 16> V2Mask((unsigned)Size, -1); | |||
11846 | ||||
11847 | for (int i = 0; i < Size; ++i) { | |||
11848 | if (Mask[i] < 0) | |||
11849 | continue; | |||
11850 | ||||
11851 | // Each element of the unpack contains Scale elements from this mask. | |||
11852 | int UnpackIdx = i / Scale; | |||
11853 | ||||
11854 | // We only handle the case where V1 feeds the first slots of the unpack. | |||
11855 | // We rely on canonicalization to ensure this is the case. | |||
11856 | if ((UnpackIdx % 2 == 0) != (Mask[i] < Size)) | |||
11857 | return SDValue(); | |||
11858 | ||||
11859 | // Setup the mask for this input. The indexing is tricky as we have to | |||
11860 | // handle the unpack stride. | |||
11861 | SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask; | |||
11862 | VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] = | |||
11863 | Mask[i] % Size; | |||
11864 | } | |||
11865 | ||||
11866 | // If we will have to shuffle both inputs to use the unpack, check whether | |||
11867 | // we can just unpack first and shuffle the result. If so, skip this unpack. | |||
11868 | if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) && | |||
11869 | !isNoopShuffleMask(V2Mask)) | |||
11870 | return SDValue(); | |||
11871 | ||||
11872 | // Shuffle the inputs into place. | |||
11873 | V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask); | |||
11874 | V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask); | |||
11875 | ||||
11876 | // Cast the inputs to the type we will use to unpack them. | |||
11877 | MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale); | |||
11878 | V1 = DAG.getBitcast(UnpackVT, V1); | |||
11879 | V2 = DAG.getBitcast(UnpackVT, V2); | |||
11880 | ||||
11881 | // Unpack the inputs and cast the result back to the desired type. | |||
11882 | return DAG.getBitcast( | |||
11883 | VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL, | |||
11884 | UnpackVT, V1, V2)); | |||
11885 | }; | |||
11886 | ||||
11887 | // We try each unpack from the largest to the smallest to try and find one | |||
11888 | // that fits this mask. | |||
11889 | int OrigScalarSize = VT.getScalarSizeInBits(); | |||
11890 | for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) | |||
11891 | if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize)) | |||
11892 | return Unpack; | |||
11893 | ||||
11894 | // If none of the unpack-rooted lowerings worked (or were profitable) try an | |||
11895 | // initial unpack. | |||
11896 | if (NumLoInputs == 0 || NumHiInputs == 0) { | |||
11897 | assert((NumLoInputs > 0 || NumHiInputs > 0) &&(((NumLoInputs > 0 || NumHiInputs > 0) && "We have to have *some* inputs!" ) ? static_cast<void> (0) : __assert_fail ("(NumLoInputs > 0 || NumHiInputs > 0) && \"We have to have *some* inputs!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11898, __PRETTY_FUNCTION__)) | |||
11898 | "We have to have *some* inputs!")(((NumLoInputs > 0 || NumHiInputs > 0) && "We have to have *some* inputs!" ) ? static_cast<void> (0) : __assert_fail ("(NumLoInputs > 0 || NumHiInputs > 0) && \"We have to have *some* inputs!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11898, __PRETTY_FUNCTION__)); | |||
11899 | int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0; | |||
11900 | ||||
11901 | // FIXME: We could consider the total complexity of the permute of each | |||
11902 | // possible unpacking. Or at the least we should consider how many | |||
11903 | // half-crossings are created. | |||
11904 | // FIXME: We could consider commuting the unpacks. | |||
11905 | ||||
11906 | SmallVector<int, 32> PermMask((unsigned)Size, -1); | |||
11907 | for (int i = 0; i < Size; ++i) { | |||
11908 | if (Mask[i] < 0) | |||
11909 | continue; | |||
11910 | ||||
11911 | assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!")((Mask[i] % Size >= HalfOffset && "Found input from wrong half!" ) ? static_cast<void> (0) : __assert_fail ("Mask[i] % Size >= HalfOffset && \"Found input from wrong half!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11911, __PRETTY_FUNCTION__)); | |||
11912 | ||||
11913 | PermMask[i] = | |||
11914 | 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1); | |||
11915 | } | |||
11916 | return DAG.getVectorShuffle( | |||
11917 | VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL, | |||
11918 | DL, VT, V1, V2), | |||
11919 | DAG.getUNDEF(VT), PermMask); | |||
11920 | } | |||
11921 | ||||
11922 | return SDValue(); | |||
11923 | } | |||
11924 | ||||
11925 | /// Handle lowering of 2-lane 64-bit floating point shuffles. | |||
11926 | /// | |||
11927 | /// This is the basis function for the 2-lane 64-bit shuffles as we have full | |||
11928 | /// support for floating point shuffles but not integer shuffles. These | |||
11929 | /// instructions will incur a domain crossing penalty on some chips though so | |||
11930 | /// it is better to avoid lowering through this for integer vectors where | |||
11931 | /// possible. | |||
11932 | static SDValue lowerV2F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
11933 | const APInt &Zeroable, | |||
11934 | SDValue V1, SDValue V2, | |||
11935 | const X86Subtarget &Subtarget, | |||
11936 | SelectionDAG &DAG) { | |||
11937 | assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v2f64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11937, __PRETTY_FUNCTION__)); | |||
11938 | assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v2f64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11938, __PRETTY_FUNCTION__)); | |||
11939 | assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!")((Mask.size() == 2 && "Unexpected mask size for v2 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 2 && \"Unexpected mask size for v2 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11939, __PRETTY_FUNCTION__)); | |||
11940 | ||||
11941 | if (V2.isUndef()) { | |||
11942 | // Check for being able to broadcast a single element. | |||
11943 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast( | |||
11944 | DL, MVT::v2f64, V1, V2, Mask, Subtarget, DAG)) | |||
11945 | return Broadcast; | |||
11946 | ||||
11947 | // Straight shuffle of a single input vector. Simulate this by using the | |||
11948 | // single input as both of the "inputs" to this instruction.. | |||
11949 | unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1); | |||
11950 | ||||
11951 | if (Subtarget.hasAVX()) { | |||
11952 | // If we have AVX, we can use VPERMILPS which will allow folding a load | |||
11953 | // into the shuffle. | |||
11954 | return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1, | |||
11955 | DAG.getConstant(SHUFPDMask, DL, MVT::i8)); | |||
11956 | } | |||
11957 | ||||
11958 | return DAG.getNode( | |||
11959 | X86ISD::SHUFP, DL, MVT::v2f64, | |||
11960 | Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1, | |||
11961 | Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1, | |||
11962 | DAG.getConstant(SHUFPDMask, DL, MVT::i8)); | |||
11963 | } | |||
11964 | assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!")((Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!" ) ? static_cast<void> (0) : __assert_fail ("Mask[0] >= 0 && \"No undef lanes in multi-input v2 shuffles!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11964, __PRETTY_FUNCTION__)); | |||
11965 | assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!")((Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!" ) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= 0 && \"No undef lanes in multi-input v2 shuffles!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11965, __PRETTY_FUNCTION__)); | |||
11966 | assert(Mask[0] < 2 && "We sort V1 to be the first input.")((Mask[0] < 2 && "We sort V1 to be the first input." ) ? static_cast<void> (0) : __assert_fail ("Mask[0] < 2 && \"We sort V1 to be the first input.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11966, __PRETTY_FUNCTION__)); | |||
11967 | assert(Mask[1] >= 2 && "We sort V2 to be the second input.")((Mask[1] >= 2 && "We sort V2 to be the second input." ) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= 2 && \"We sort V2 to be the second input.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 11967, __PRETTY_FUNCTION__)); | |||
11968 | ||||
11969 | // When loading a scalar and then shuffling it into a vector we can often do | |||
11970 | // the insertion cheaply. | |||
11971 | if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( | |||
11972 | DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
11973 | return Insertion; | |||
11974 | // Try inverting the insertion since for v2 masks it is easy to do and we | |||
11975 | // can't reliably sort the mask one way or the other. | |||
11976 | int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2), | |||
11977 | Mask[1] < 0 ? -1 : (Mask[1] ^ 2)}; | |||
11978 | if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( | |||
11979 | DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG)) | |||
11980 | return Insertion; | |||
11981 | ||||
11982 | // Try to use one of the special instruction patterns to handle two common | |||
11983 | // blend patterns if a zero-blend above didn't work. | |||
11984 | if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) || | |||
11985 | isShuffleEquivalent(V1, V2, Mask, {1, 3})) | |||
11986 | if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG)) | |||
11987 | // We can either use a special instruction to load over the low double or | |||
11988 | // to move just the low double. | |||
11989 | return DAG.getNode( | |||
11990 | X86ISD::MOVSD, DL, MVT::v2f64, V2, | |||
11991 | DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S)); | |||
11992 | ||||
11993 | if (Subtarget.hasSSE41()) | |||
11994 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask, | |||
11995 | Zeroable, Subtarget, DAG)) | |||
11996 | return Blend; | |||
11997 | ||||
11998 | // Use dedicated unpack instructions for masks that match their pattern. | |||
11999 | if (SDValue V = | |||
12000 | lowerVectorShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG)) | |||
12001 | return V; | |||
12002 | ||||
12003 | unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1); | |||
12004 | return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2, | |||
12005 | DAG.getConstant(SHUFPDMask, DL, MVT::i8)); | |||
12006 | } | |||
12007 | ||||
12008 | /// Handle lowering of 2-lane 64-bit integer shuffles. | |||
12009 | /// | |||
12010 | /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by | |||
12011 | /// the integer unit to minimize domain crossing penalties. However, for blends | |||
12012 | /// it falls back to the floating point shuffle operation with appropriate bit | |||
12013 | /// casting. | |||
12014 | static SDValue lowerV2I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
12015 | const APInt &Zeroable, | |||
12016 | SDValue V1, SDValue V2, | |||
12017 | const X86Subtarget &Subtarget, | |||
12018 | SelectionDAG &DAG) { | |||
12019 | assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v2i64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12019, __PRETTY_FUNCTION__)); | |||
12020 | assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v2i64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12020, __PRETTY_FUNCTION__)); | |||
12021 | assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!")((Mask.size() == 2 && "Unexpected mask size for v2 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 2 && \"Unexpected mask size for v2 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12021, __PRETTY_FUNCTION__)); | |||
12022 | ||||
12023 | if (V2.isUndef()) { | |||
12024 | // Check for being able to broadcast a single element. | |||
12025 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast( | |||
12026 | DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG)) | |||
12027 | return Broadcast; | |||
12028 | ||||
12029 | // Straight shuffle of a single input vector. For everything from SSE2 | |||
12030 | // onward this has a single fast instruction with no scary immediates. | |||
12031 | // We have to map the mask as it is actually a v4i32 shuffle instruction. | |||
12032 | V1 = DAG.getBitcast(MVT::v4i32, V1); | |||
12033 | int WidenedMask[4] = { | |||
12034 | std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1, | |||
12035 | std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1}; | |||
12036 | return DAG.getBitcast( | |||
12037 | MVT::v2i64, | |||
12038 | DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1, | |||
12039 | getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG))); | |||
12040 | } | |||
12041 | assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!")((Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!" ) ? static_cast<void> (0) : __assert_fail ("Mask[0] != -1 && \"No undef lanes in multi-input v2 shuffles!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12041, __PRETTY_FUNCTION__)); | |||
12042 | assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!")((Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!" ) ? static_cast<void> (0) : __assert_fail ("Mask[1] != -1 && \"No undef lanes in multi-input v2 shuffles!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12042, __PRETTY_FUNCTION__)); | |||
12043 | assert(Mask[0] < 2 && "We sort V1 to be the first input.")((Mask[0] < 2 && "We sort V1 to be the first input." ) ? static_cast<void> (0) : __assert_fail ("Mask[0] < 2 && \"We sort V1 to be the first input.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12043, __PRETTY_FUNCTION__)); | |||
12044 | assert(Mask[1] >= 2 && "We sort V2 to be the second input.")((Mask[1] >= 2 && "We sort V2 to be the second input." ) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= 2 && \"We sort V2 to be the second input.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12044, __PRETTY_FUNCTION__)); | |||
12045 | ||||
12046 | // Try to use shift instructions. | |||
12047 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, | |||
12048 | Zeroable, Subtarget, DAG)) | |||
12049 | return Shift; | |||
12050 | ||||
12051 | // When loading a scalar and then shuffling it into a vector we can often do | |||
12052 | // the insertion cheaply. | |||
12053 | if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( | |||
12054 | DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
12055 | return Insertion; | |||
12056 | // Try inverting the insertion since for v2 masks it is easy to do and we | |||
12057 | // can't reliably sort the mask one way or the other. | |||
12058 | int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2}; | |||
12059 | if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( | |||
12060 | DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG)) | |||
12061 | return Insertion; | |||
12062 | ||||
12063 | // We have different paths for blend lowering, but they all must use the | |||
12064 | // *exact* same predicate. | |||
12065 | bool IsBlendSupported = Subtarget.hasSSE41(); | |||
12066 | if (IsBlendSupported) | |||
12067 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask, | |||
12068 | Zeroable, Subtarget, DAG)) | |||
12069 | return Blend; | |||
12070 | ||||
12071 | // Use dedicated unpack instructions for masks that match their pattern. | |||
12072 | if (SDValue V = | |||
12073 | lowerVectorShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG)) | |||
12074 | return V; | |||
12075 | ||||
12076 | // Try to use byte rotation instructions. | |||
12077 | // Its more profitable for pre-SSSE3 to use shuffles/unpacks. | |||
12078 | if (Subtarget.hasSSSE3()) { | |||
12079 | if (Subtarget.hasVLX()) | |||
12080 | if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v2i64, V1, V2, | |||
12081 | Mask, Subtarget, DAG)) | |||
12082 | return Rotate; | |||
12083 | ||||
12084 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
12085 | DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG)) | |||
12086 | return Rotate; | |||
12087 | } | |||
12088 | ||||
12089 | // If we have direct support for blends, we should lower by decomposing into | |||
12090 | // a permute. That will be faster than the domain cross. | |||
12091 | if (IsBlendSupported) | |||
12092 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, | |||
12093 | Mask, Subtarget, DAG); | |||
12094 | ||||
12095 | // We implement this with SHUFPD which is pretty lame because it will likely | |||
12096 | // incur 2 cycles of stall for integer vectors on Nehalem and older chips. | |||
12097 | // However, all the alternatives are still more cycles and newer chips don't | |||
12098 | // have this problem. It would be really nice if x86 had better shuffles here. | |||
12099 | V1 = DAG.getBitcast(MVT::v2f64, V1); | |||
12100 | V2 = DAG.getBitcast(MVT::v2f64, V2); | |||
12101 | return DAG.getBitcast(MVT::v2i64, | |||
12102 | DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask)); | |||
12103 | } | |||
12104 | ||||
12105 | /// Test whether this can be lowered with a single SHUFPS instruction. | |||
12106 | /// | |||
12107 | /// This is used to disable more specialized lowerings when the shufps lowering | |||
12108 | /// will happen to be efficient. | |||
12109 | static bool isSingleSHUFPSMask(ArrayRef<int> Mask) { | |||
12110 | // This routine only handles 128-bit shufps. | |||
12111 | assert(Mask.size() == 4 && "Unsupported mask size!")((Mask.size() == 4 && "Unsupported mask size!") ? static_cast <void> (0) : __assert_fail ("Mask.size() == 4 && \"Unsupported mask size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12111, __PRETTY_FUNCTION__)); | |||
12112 | assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!")((Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!" ) ? static_cast<void> (0) : __assert_fail ("Mask[0] >= -1 && Mask[0] < 8 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12112, __PRETTY_FUNCTION__)); | |||
12113 | assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!")((Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!" ) ? static_cast<void> (0) : __assert_fail ("Mask[1] >= -1 && Mask[1] < 8 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12113, __PRETTY_FUNCTION__)); | |||
12114 | assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!")((Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!" ) ? static_cast<void> (0) : __assert_fail ("Mask[2] >= -1 && Mask[2] < 8 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12114, __PRETTY_FUNCTION__)); | |||
12115 | assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!")((Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!" ) ? static_cast<void> (0) : __assert_fail ("Mask[3] >= -1 && Mask[3] < 8 && \"Out of bound mask element!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12115, __PRETTY_FUNCTION__)); | |||
12116 | ||||
12117 | // To lower with a single SHUFPS we need to have the low half and high half | |||
12118 | // each requiring a single input. | |||
12119 | if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4)) | |||
12120 | return false; | |||
12121 | if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4)) | |||
12122 | return false; | |||
12123 | ||||
12124 | return true; | |||
12125 | } | |||
12126 | ||||
12127 | /// Lower a vector shuffle using the SHUFPS instruction. | |||
12128 | /// | |||
12129 | /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS. | |||
12130 | /// It makes no assumptions about whether this is the *best* lowering, it simply | |||
12131 | /// uses it. | |||
12132 | static SDValue lowerVectorShuffleWithSHUFPS(const SDLoc &DL, MVT VT, | |||
12133 | ArrayRef<int> Mask, SDValue V1, | |||
12134 | SDValue V2, SelectionDAG &DAG) { | |||
12135 | SDValue LowV = V1, HighV = V2; | |||
12136 | int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]}; | |||
12137 | ||||
12138 | int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; }); | |||
12139 | ||||
12140 | if (NumV2Elements == 1) { | |||
12141 | int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin(); | |||
12142 | ||||
12143 | // Compute the index adjacent to V2Index and in the same half by toggling | |||
12144 | // the low bit. | |||
12145 | int V2AdjIndex = V2Index ^ 1; | |||
12146 | ||||
12147 | if (Mask[V2AdjIndex] < 0) { | |||
12148 | // Handles all the cases where we have a single V2 element and an undef. | |||
12149 | // This will only ever happen in the high lanes because we commute the | |||
12150 | // vector otherwise. | |||
12151 | if (V2Index < 2) | |||
12152 | std::swap(LowV, HighV); | |||
12153 | NewMask[V2Index] -= 4; | |||
12154 | } else { | |||
12155 | // Handle the case where the V2 element ends up adjacent to a V1 element. | |||
12156 | // To make this work, blend them together as the first step. | |||
12157 | int V1Index = V2AdjIndex; | |||
12158 | int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0}; | |||
12159 | V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1, | |||
12160 | getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG)); | |||
12161 | ||||
12162 | // Now proceed to reconstruct the final blend as we have the necessary | |||
12163 | // high or low half formed. | |||
12164 | if (V2Index < 2) { | |||
12165 | LowV = V2; | |||
12166 | HighV = V1; | |||
12167 | } else { | |||
12168 | HighV = V2; | |||
12169 | } | |||
12170 | NewMask[V1Index] = 2; // We put the V1 element in V2[2]. | |||
12171 | NewMask[V2Index] = 0; // We shifted the V2 element into V2[0]. | |||
12172 | } | |||
12173 | } else if (NumV2Elements == 2) { | |||
12174 | if (Mask[0] < 4 && Mask[1] < 4) { | |||
12175 | // Handle the easy case where we have V1 in the low lanes and V2 in the | |||
12176 | // high lanes. | |||
12177 | NewMask[2] -= 4; | |||
12178 | NewMask[3] -= 4; | |||
12179 | } else if (Mask[2] < 4 && Mask[3] < 4) { | |||
12180 | // We also handle the reversed case because this utility may get called | |||
12181 | // when we detect a SHUFPS pattern but can't easily commute the shuffle to | |||
12182 | // arrange things in the right direction. | |||
12183 | NewMask[0] -= 4; | |||
12184 | NewMask[1] -= 4; | |||
12185 | HighV = V1; | |||
12186 | LowV = V2; | |||
12187 | } else { | |||
12188 | // We have a mixture of V1 and V2 in both low and high lanes. Rather than | |||
12189 | // trying to place elements directly, just blend them and set up the final | |||
12190 | // shuffle to place them. | |||
12191 | ||||
12192 | // The first two blend mask elements are for V1, the second two are for | |||
12193 | // V2. | |||
12194 | int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1], | |||
12195 | Mask[2] < 4 ? Mask[2] : Mask[3], | |||
12196 | (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4, | |||
12197 | (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4}; | |||
12198 | V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2, | |||
12199 | getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG)); | |||
12200 | ||||
12201 | // Now we do a normal shuffle of V1 by giving V1 as both operands to | |||
12202 | // a blend. | |||
12203 | LowV = HighV = V1; | |||
12204 | NewMask[0] = Mask[0] < 4 ? 0 : 2; | |||
12205 | NewMask[1] = Mask[0] < 4 ? 2 : 0; | |||
12206 | NewMask[2] = Mask[2] < 4 ? 1 : 3; | |||
12207 | NewMask[3] = Mask[2] < 4 ? 3 : 1; | |||
12208 | } | |||
12209 | } | |||
12210 | return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV, | |||
12211 | getV4X86ShuffleImm8ForMask(NewMask, DL, DAG)); | |||
12212 | } | |||
12213 | ||||
12214 | /// Lower 4-lane 32-bit floating point shuffles. | |||
12215 | /// | |||
12216 | /// Uses instructions exclusively from the floating point unit to minimize | |||
12217 | /// domain crossing penalties, as these are sufficient to implement all v4f32 | |||
12218 | /// shuffles. | |||
12219 | static SDValue lowerV4F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
12220 | const APInt &Zeroable, | |||
12221 | SDValue V1, SDValue V2, | |||
12222 | const X86Subtarget &Subtarget, | |||
12223 | SelectionDAG &DAG) { | |||
12224 | assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4f32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12224, __PRETTY_FUNCTION__)); | |||
12225 | assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4f32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12225, __PRETTY_FUNCTION__)); | |||
12226 | assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12226, __PRETTY_FUNCTION__)); | |||
12227 | ||||
12228 | int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; }); | |||
12229 | ||||
12230 | if (NumV2Elements == 0) { | |||
12231 | // Check for being able to broadcast a single element. | |||
12232 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast( | |||
12233 | DL, MVT::v4f32, V1, V2, Mask, Subtarget, DAG)) | |||
12234 | return Broadcast; | |||
12235 | ||||
12236 | // Use even/odd duplicate instructions for masks that match their pattern. | |||
12237 | if (Subtarget.hasSSE3()) { | |||
12238 | if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2})) | |||
12239 | return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1); | |||
12240 | if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3})) | |||
12241 | return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1); | |||
12242 | } | |||
12243 | ||||
12244 | if (Subtarget.hasAVX()) { | |||
12245 | // If we have AVX, we can use VPERMILPS which will allow folding a load | |||
12246 | // into the shuffle. | |||
12247 | return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1, | |||
12248 | getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); | |||
12249 | } | |||
12250 | ||||
12251 | // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid | |||
12252 | // in SSE1 because otherwise they are widened to v2f64 and never get here. | |||
12253 | if (!Subtarget.hasSSE2()) { | |||
12254 | if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1})) | |||
12255 | return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1); | |||
12256 | if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3})) | |||
12257 | return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1); | |||
12258 | } | |||
12259 | ||||
12260 | // Otherwise, use a straight shuffle of a single input vector. We pass the | |||
12261 | // input vector to both operands to simulate this with a SHUFPS. | |||
12262 | return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1, | |||
12263 | getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); | |||
12264 | } | |||
12265 | ||||
12266 | // There are special ways we can lower some single-element blends. However, we | |||
12267 | // have custom ways we can lower more complex single-element blends below that | |||
12268 | // we defer to if both this and BLENDPS fail to match, so restrict this to | |||
12269 | // when the V2 input is targeting element 0 of the mask -- that is the fast | |||
12270 | // case here. | |||
12271 | if (NumV2Elements == 1 && Mask[0] >= 4) | |||
12272 | if (SDValue V = lowerVectorShuffleAsElementInsertion( | |||
12273 | DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
12274 | return V; | |||
12275 | ||||
12276 | if (Subtarget.hasSSE41()) { | |||
12277 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask, | |||
12278 | Zeroable, Subtarget, DAG)) | |||
12279 | return Blend; | |||
12280 | ||||
12281 | // Use INSERTPS if we can complete the shuffle efficiently. | |||
12282 | if (SDValue V = | |||
12283 | lowerVectorShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG)) | |||
12284 | return V; | |||
12285 | ||||
12286 | if (!isSingleSHUFPSMask(Mask)) | |||
12287 | if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute( | |||
12288 | DL, MVT::v4f32, V1, V2, Mask, DAG)) | |||
12289 | return BlendPerm; | |||
12290 | } | |||
12291 | ||||
12292 | // Use low/high mov instructions. These are only valid in SSE1 because | |||
12293 | // otherwise they are widened to v2f64 and never get here. | |||
12294 | if (!Subtarget.hasSSE2()) { | |||
12295 | if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) | |||
12296 | return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2); | |||
12297 | if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7})) | |||
12298 | return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1); | |||
12299 | } | |||
12300 | ||||
12301 | // Use dedicated unpack instructions for masks that match their pattern. | |||
12302 | if (SDValue V = | |||
12303 | lowerVectorShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG)) | |||
12304 | return V; | |||
12305 | ||||
12306 | // Otherwise fall back to a SHUFPS lowering strategy. | |||
12307 | return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG); | |||
12308 | } | |||
12309 | ||||
12310 | /// Lower 4-lane i32 vector shuffles. | |||
12311 | /// | |||
12312 | /// We try to handle these with integer-domain shuffles where we can, but for | |||
12313 | /// blends we use the floating point domain blend instructions. | |||
12314 | static SDValue lowerV4I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
12315 | const APInt &Zeroable, | |||
12316 | SDValue V1, SDValue V2, | |||
12317 | const X86Subtarget &Subtarget, | |||
12318 | SelectionDAG &DAG) { | |||
12319 | assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4i32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12319, __PRETTY_FUNCTION__)); | |||
12320 | assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4i32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12320, __PRETTY_FUNCTION__)); | |||
12321 | assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12321, __PRETTY_FUNCTION__)); | |||
12322 | ||||
12323 | // Whenever we can lower this as a zext, that instruction is strictly faster | |||
12324 | // than any alternative. It also allows us to fold memory operands into the | |||
12325 | // shuffle in many cases. | |||
12326 | if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend( | |||
12327 | DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
12328 | return ZExt; | |||
12329 | ||||
12330 | int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; }); | |||
12331 | ||||
12332 | if (NumV2Elements == 0) { | |||
12333 | // Check for being able to broadcast a single element. | |||
12334 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast( | |||
12335 | DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG)) | |||
12336 | return Broadcast; | |||
12337 | ||||
12338 | // Straight shuffle of a single input vector. For everything from SSE2 | |||
12339 | // onward this has a single fast instruction with no scary immediates. | |||
12340 | // We coerce the shuffle pattern to be compatible with UNPCK instructions | |||
12341 | // but we aren't actually going to use the UNPCK instruction because doing | |||
12342 | // so prevents folding a load into this instruction or making a copy. | |||
12343 | const int UnpackLoMask[] = {0, 0, 1, 1}; | |||
12344 | const int UnpackHiMask[] = {2, 2, 3, 3}; | |||
12345 | if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1})) | |||
12346 | Mask = UnpackLoMask; | |||
12347 | else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3})) | |||
12348 | Mask = UnpackHiMask; | |||
12349 | ||||
12350 | return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1, | |||
12351 | getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); | |||
12352 | } | |||
12353 | ||||
12354 | // Try to use shift instructions. | |||
12355 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, | |||
12356 | Zeroable, Subtarget, DAG)) | |||
12357 | return Shift; | |||
12358 | ||||
12359 | // There are special ways we can lower some single-element blends. | |||
12360 | if (NumV2Elements == 1) | |||
12361 | if (SDValue V = lowerVectorShuffleAsElementInsertion( | |||
12362 | DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
12363 | return V; | |||
12364 | ||||
12365 | // We have different paths for blend lowering, but they all must use the | |||
12366 | // *exact* same predicate. | |||
12367 | bool IsBlendSupported = Subtarget.hasSSE41(); | |||
12368 | if (IsBlendSupported) | |||
12369 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask, | |||
12370 | Zeroable, Subtarget, DAG)) | |||
12371 | return Blend; | |||
12372 | ||||
12373 | if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, | |||
12374 | Zeroable, DAG)) | |||
12375 | return Masked; | |||
12376 | ||||
12377 | // Use dedicated unpack instructions for masks that match their pattern. | |||
12378 | if (SDValue V = | |||
12379 | lowerVectorShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG)) | |||
12380 | return V; | |||
12381 | ||||
12382 | // Try to use byte rotation instructions. | |||
12383 | // Its more profitable for pre-SSSE3 to use shuffles/unpacks. | |||
12384 | if (Subtarget.hasSSSE3()) { | |||
12385 | if (Subtarget.hasVLX()) | |||
12386 | if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v4i32, V1, V2, | |||
12387 | Mask, Subtarget, DAG)) | |||
12388 | return Rotate; | |||
12389 | ||||
12390 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
12391 | DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG)) | |||
12392 | return Rotate; | |||
12393 | } | |||
12394 | ||||
12395 | // Assume that a single SHUFPS is faster than an alternative sequence of | |||
12396 | // multiple instructions (even if the CPU has a domain penalty). | |||
12397 | // If some CPU is harmed by the domain switch, we can fix it in a later pass. | |||
12398 | if (!isSingleSHUFPSMask(Mask)) { | |||
12399 | // If we have direct support for blends, we should lower by decomposing into | |||
12400 | // a permute. That will be faster than the domain cross. | |||
12401 | if (IsBlendSupported) | |||
12402 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, | |||
12403 | Mask, Subtarget, DAG); | |||
12404 | ||||
12405 | // Try to lower by permuting the inputs into an unpack instruction. | |||
12406 | if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack( | |||
12407 | DL, MVT::v4i32, V1, V2, Mask, DAG)) | |||
12408 | return Unpack; | |||
12409 | } | |||
12410 | ||||
12411 | // We implement this with SHUFPS because it can blend from two vectors. | |||
12412 | // Because we're going to eventually use SHUFPS, we use SHUFPS even to build | |||
12413 | // up the inputs, bypassing domain shift penalties that we would incur if we | |||
12414 | // directly used PSHUFD on Nehalem and older. For newer chips, this isn't | |||
12415 | // relevant. | |||
12416 | SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1); | |||
12417 | SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2); | |||
12418 | SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask); | |||
12419 | return DAG.getBitcast(MVT::v4i32, ShufPS); | |||
12420 | } | |||
12421 | ||||
12422 | /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2 | |||
12423 | /// shuffle lowering, and the most complex part. | |||
12424 | /// | |||
12425 | /// The lowering strategy is to try to form pairs of input lanes which are | |||
12426 | /// targeted at the same half of the final vector, and then use a dword shuffle | |||
12427 | /// to place them onto the right half, and finally unpack the paired lanes into | |||
12428 | /// their final position. | |||
12429 | /// | |||
12430 | /// The exact breakdown of how to form these dword pairs and align them on the | |||
12431 | /// correct sides is really tricky. See the comments within the function for | |||
12432 | /// more of the details. | |||
12433 | /// | |||
12434 | /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each | |||
12435 | /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to | |||
12436 | /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16 | |||
12437 | /// vector, form the analogous 128-bit 8-element Mask. | |||
12438 | static SDValue lowerV8I16GeneralSingleInputVectorShuffle( | |||
12439 | const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask, | |||
12440 | const X86Subtarget &Subtarget, SelectionDAG &DAG) { | |||
12441 | assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!")((VT.getVectorElementType() == MVT::i16 && "Bad input type!" ) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i16 && \"Bad input type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12441, __PRETTY_FUNCTION__)); | |||
12442 | MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2); | |||
12443 | ||||
12444 | assert(Mask.size() == 8 && "Shuffle mask length doesn't match!")((Mask.size() == 8 && "Shuffle mask length doesn't match!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Shuffle mask length doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12444, __PRETTY_FUNCTION__)); | |||
12445 | MutableArrayRef<int> LoMask = Mask.slice(0, 4); | |||
12446 | MutableArrayRef<int> HiMask = Mask.slice(4, 4); | |||
12447 | ||||
12448 | // Attempt to directly match PSHUFLW or PSHUFHW. | |||
12449 | if (isUndefOrInRange(LoMask, 0, 4) && | |||
12450 | isSequentialOrUndefInRange(HiMask, 0, 4, 4)) { | |||
12451 | return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V, | |||
12452 | getV4X86ShuffleImm8ForMask(LoMask, DL, DAG)); | |||
12453 | } | |||
12454 | if (isUndefOrInRange(HiMask, 4, 8) && | |||
12455 | isSequentialOrUndefInRange(LoMask, 0, 4, 0)) { | |||
12456 | for (int i = 0; i != 4; ++i) | |||
12457 | HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4)); | |||
12458 | return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V, | |||
12459 | getV4X86ShuffleImm8ForMask(HiMask, DL, DAG)); | |||
12460 | } | |||
12461 | ||||
12462 | SmallVector<int, 4> LoInputs; | |||
12463 | copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; }); | |||
12464 | array_pod_sort(LoInputs.begin(), LoInputs.end()); | |||
12465 | LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end()); | |||
12466 | SmallVector<int, 4> HiInputs; | |||
12467 | copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; }); | |||
12468 | array_pod_sort(HiInputs.begin(), HiInputs.end()); | |||
12469 | HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end()); | |||
12470 | int NumLToL = | |||
12471 | std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin(); | |||
12472 | int NumHToL = LoInputs.size() - NumLToL; | |||
12473 | int NumLToH = | |||
12474 | std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin(); | |||
12475 | int NumHToH = HiInputs.size() - NumLToH; | |||
12476 | MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL); | |||
12477 | MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH); | |||
12478 | MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL); | |||
12479 | MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH); | |||
12480 | ||||
12481 | // If we are shuffling values from one half - check how many different DWORD | |||
12482 | // pairs we need to create. If only 1 or 2 then we can perform this as a | |||
12483 | // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below. | |||
12484 | auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask, | |||
12485 | ArrayRef<int> PSHUFDMask, unsigned ShufWOp) { | |||
12486 | V = DAG.getNode(ShufWOp, DL, VT, V, | |||
12487 | getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG)); | |||
12488 | V = DAG.getBitcast(PSHUFDVT, V); | |||
12489 | V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V, | |||
12490 | getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)); | |||
12491 | return DAG.getBitcast(VT, V); | |||
12492 | }; | |||
12493 | ||||
12494 | if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) { | |||
12495 | int PSHUFDMask[4] = { -1, -1, -1, -1 }; | |||
12496 | SmallVector<std::pair<int, int>, 4> DWordPairs; | |||
12497 | int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2); | |||
12498 | ||||
12499 | // Collect the different DWORD pairs. | |||
12500 | for (int DWord = 0; DWord != 4; ++DWord) { | |||
12501 | int M0 = Mask[2 * DWord + 0]; | |||
12502 | int M1 = Mask[2 * DWord + 1]; | |||
12503 | M0 = (M0 >= 0 ? M0 % 4 : M0); | |||
12504 | M1 = (M1 >= 0 ? M1 % 4 : M1); | |||
12505 | if (M0 < 0 && M1 < 0) | |||
12506 | continue; | |||
12507 | ||||
12508 | bool Match = false; | |||
12509 | for (int j = 0, e = DWordPairs.size(); j < e; ++j) { | |||
12510 | auto &DWordPair = DWordPairs[j]; | |||
12511 | if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) && | |||
12512 | (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) { | |||
12513 | DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first); | |||
12514 | DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second); | |||
12515 | PSHUFDMask[DWord] = DOffset + j; | |||
12516 | Match = true; | |||
12517 | break; | |||
12518 | } | |||
12519 | } | |||
12520 | if (!Match) { | |||
12521 | PSHUFDMask[DWord] = DOffset + DWordPairs.size(); | |||
12522 | DWordPairs.push_back(std::make_pair(M0, M1)); | |||
12523 | } | |||
12524 | } | |||
12525 | ||||
12526 | if (DWordPairs.size() <= 2) { | |||
12527 | DWordPairs.resize(2, std::make_pair(-1, -1)); | |||
12528 | int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second, | |||
12529 | DWordPairs[1].first, DWordPairs[1].second}; | |||
12530 | if ((NumHToL + NumHToH) == 0) | |||
12531 | return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW); | |||
12532 | if ((NumLToL + NumLToH) == 0) | |||
12533 | return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW); | |||
12534 | } | |||
12535 | } | |||
12536 | ||||
12537 | // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all | |||
12538 | // such inputs we can swap two of the dwords across the half mark and end up | |||
12539 | // with <=2 inputs to each half in each half. Once there, we can fall through | |||
12540 | // to the generic code below. For example: | |||
12541 | // | |||
12542 | // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h] | |||
12543 | // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5] | |||
12544 | // | |||
12545 | // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half | |||
12546 | // and an existing 2-into-2 on the other half. In this case we may have to | |||
12547 | // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or | |||
12548 | // 1-into-3 which could cause us to cycle endlessly fixing each side in turn. | |||
12549 | // Fortunately, we don't have to handle anything but a 2-into-2 pattern | |||
12550 | // because any other situation (including a 3-into-1 or 1-into-3 in the other | |||
12551 | // half than the one we target for fixing) will be fixed when we re-enter this | |||
12552 | // path. We will also combine away any sequence of PSHUFD instructions that | |||
12553 | // result into a single instruction. Here is an example of the tricky case: | |||
12554 | // | |||
12555 | // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h] | |||
12556 | // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3] | |||
12557 | // | |||
12558 | // This now has a 1-into-3 in the high half! Instead, we do two shuffles: | |||
12559 | // | |||
12560 | // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h] | |||
12561 | // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6] | |||
12562 | // | |||
12563 | // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h] | |||
12564 | // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6] | |||
12565 | // | |||
12566 | // The result is fine to be handled by the generic logic. | |||
12567 | auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs, | |||
12568 | ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs, | |||
12569 | int AOffset, int BOffset) { | |||
12570 | assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&(((AToAInputs.size() == 3 || AToAInputs.size() == 1) && "Must call this with A having 3 or 1 inputs from the A half." ) ? static_cast<void> (0) : __assert_fail ("(AToAInputs.size() == 3 || AToAInputs.size() == 1) && \"Must call this with A having 3 or 1 inputs from the A half.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12571, __PRETTY_FUNCTION__)) | |||
12571 | "Must call this with A having 3 or 1 inputs from the A half.")(((AToAInputs.size() == 3 || AToAInputs.size() == 1) && "Must call this with A having 3 or 1 inputs from the A half." ) ? static_cast<void> (0) : __assert_fail ("(AToAInputs.size() == 3 || AToAInputs.size() == 1) && \"Must call this with A having 3 or 1 inputs from the A half.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12571, __PRETTY_FUNCTION__)); | |||
12572 | assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&(((BToAInputs.size() == 1 || BToAInputs.size() == 3) && "Must call this with B having 1 or 3 inputs from the B half." ) ? static_cast<void> (0) : __assert_fail ("(BToAInputs.size() == 1 || BToAInputs.size() == 3) && \"Must call this with B having 1 or 3 inputs from the B half.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12573, __PRETTY_FUNCTION__)) | |||
12573 | "Must call this with B having 1 or 3 inputs from the B half.")(((BToAInputs.size() == 1 || BToAInputs.size() == 3) && "Must call this with B having 1 or 3 inputs from the B half." ) ? static_cast<void> (0) : __assert_fail ("(BToAInputs.size() == 1 || BToAInputs.size() == 3) && \"Must call this with B having 1 or 3 inputs from the B half.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12573, __PRETTY_FUNCTION__)); | |||
12574 | assert(AToAInputs.size() + BToAInputs.size() == 4 &&((AToAInputs.size() + BToAInputs.size() == 4 && "Must call this with either 3:1 or 1:3 inputs (summing to 4)." ) ? static_cast<void> (0) : __assert_fail ("AToAInputs.size() + BToAInputs.size() == 4 && \"Must call this with either 3:1 or 1:3 inputs (summing to 4).\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12575, __PRETTY_FUNCTION__)) | |||
12575 | "Must call this with either 3:1 or 1:3 inputs (summing to 4).")((AToAInputs.size() + BToAInputs.size() == 4 && "Must call this with either 3:1 or 1:3 inputs (summing to 4)." ) ? static_cast<void> (0) : __assert_fail ("AToAInputs.size() + BToAInputs.size() == 4 && \"Must call this with either 3:1 or 1:3 inputs (summing to 4).\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12575, __PRETTY_FUNCTION__)); | |||
12576 | ||||
12577 | bool ThreeAInputs = AToAInputs.size() == 3; | |||
12578 | ||||
12579 | // Compute the index of dword with only one word among the three inputs in | |||
12580 | // a half by taking the sum of the half with three inputs and subtracting | |||
12581 | // the sum of the actual three inputs. The difference is the remaining | |||
12582 | // slot. | |||
12583 | int ADWord, BDWord; | |||
12584 | int &TripleDWord = ThreeAInputs ? ADWord : BDWord; | |||
12585 | int &OneInputDWord = ThreeAInputs ? BDWord : ADWord; | |||
12586 | int TripleInputOffset = ThreeAInputs ? AOffset : BOffset; | |||
12587 | ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs; | |||
12588 | int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0]; | |||
12589 | int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset); | |||
12590 | int TripleNonInputIdx = | |||
12591 | TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0); | |||
12592 | TripleDWord = TripleNonInputIdx / 2; | |||
12593 | ||||
12594 | // We use xor with one to compute the adjacent DWord to whichever one the | |||
12595 | // OneInput is in. | |||
12596 | OneInputDWord = (OneInput / 2) ^ 1; | |||
12597 | ||||
12598 | // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA | |||
12599 | // and BToA inputs. If there is also such a problem with the BToB and AToB | |||
12600 | // inputs, we don't try to fix it necessarily -- we'll recurse and see it in | |||
12601 | // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it | |||
12602 | // is essential that we don't *create* a 3<-1 as then we might oscillate. | |||
12603 | if (BToBInputs.size() == 2 && AToBInputs.size() == 2) { | |||
12604 | // Compute how many inputs will be flipped by swapping these DWords. We | |||
12605 | // need | |||
12606 | // to balance this to ensure we don't form a 3-1 shuffle in the other | |||
12607 | // half. | |||
12608 | int NumFlippedAToBInputs = | |||
12609 | std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) + | |||
12610 | std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1); | |||
12611 | int NumFlippedBToBInputs = | |||
12612 | std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) + | |||
12613 | std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1); | |||
12614 | if ((NumFlippedAToBInputs == 1 && | |||
12615 | (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) || | |||
12616 | (NumFlippedBToBInputs == 1 && | |||
12617 | (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) { | |||
12618 | // We choose whether to fix the A half or B half based on whether that | |||
12619 | // half has zero flipped inputs. At zero, we may not be able to fix it | |||
12620 | // with that half. We also bias towards fixing the B half because that | |||
12621 | // will more commonly be the high half, and we have to bias one way. | |||
12622 | auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord, | |||
12623 | ArrayRef<int> Inputs) { | |||
12624 | int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot. | |||
12625 | bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1); | |||
12626 | // Determine whether the free index is in the flipped dword or the | |||
12627 | // unflipped dword based on where the pinned index is. We use this bit | |||
12628 | // in an xor to conditionally select the adjacent dword. | |||
12629 | int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord)); | |||
12630 | bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx); | |||
12631 | if (IsFixIdxInput == IsFixFreeIdxInput) | |||
12632 | FixFreeIdx += 1; | |||
12633 | IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx); | |||
12634 | assert(IsFixIdxInput != IsFixFreeIdxInput &&((IsFixIdxInput != IsFixFreeIdxInput && "We need to be changing the number of flipped inputs!" ) ? static_cast<void> (0) : __assert_fail ("IsFixIdxInput != IsFixFreeIdxInput && \"We need to be changing the number of flipped inputs!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12635, __PRETTY_FUNCTION__)) | |||
12635 | "We need to be changing the number of flipped inputs!")((IsFixIdxInput != IsFixFreeIdxInput && "We need to be changing the number of flipped inputs!" ) ? static_cast<void> (0) : __assert_fail ("IsFixIdxInput != IsFixFreeIdxInput && \"We need to be changing the number of flipped inputs!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12635, __PRETTY_FUNCTION__)); | |||
12636 | int PSHUFHalfMask[] = {0, 1, 2, 3}; | |||
12637 | std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]); | |||
12638 | V = DAG.getNode( | |||
12639 | FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL, | |||
12640 | MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V, | |||
12641 | getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG)); | |||
12642 | ||||
12643 | for (int &M : Mask) | |||
12644 | if (M >= 0 && M == FixIdx) | |||
12645 | M = FixFreeIdx; | |||
12646 | else if (M >= 0 && M == FixFreeIdx) | |||
12647 | M = FixIdx; | |||
12648 | }; | |||
12649 | if (NumFlippedBToBInputs != 0) { | |||
12650 | int BPinnedIdx = | |||
12651 | BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput; | |||
12652 | FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs); | |||
12653 | } else { | |||
12654 | assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!")((NumFlippedAToBInputs != 0 && "Impossible given predicates!" ) ? static_cast<void> (0) : __assert_fail ("NumFlippedAToBInputs != 0 && \"Impossible given predicates!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12654, __PRETTY_FUNCTION__)); | |||
12655 | int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput; | |||
12656 | FixFlippedInputs(APinnedIdx, ADWord, AToBInputs); | |||
12657 | } | |||
12658 | } | |||
12659 | } | |||
12660 | ||||
12661 | int PSHUFDMask[] = {0, 1, 2, 3}; | |||
12662 | PSHUFDMask[ADWord] = BDWord; | |||
12663 | PSHUFDMask[BDWord] = ADWord; | |||
12664 | V = DAG.getBitcast( | |||
12665 | VT, | |||
12666 | DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V), | |||
12667 | getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG))); | |||
12668 | ||||
12669 | // Adjust the mask to match the new locations of A and B. | |||
12670 | for (int &M : Mask) | |||
12671 | if (M >= 0 && M/2 == ADWord) | |||
12672 | M = 2 * BDWord + M % 2; | |||
12673 | else if (M >= 0 && M/2 == BDWord) | |||
12674 | M = 2 * ADWord + M % 2; | |||
12675 | ||||
12676 | // Recurse back into this routine to re-compute state now that this isn't | |||
12677 | // a 3 and 1 problem. | |||
12678 | return lowerV8I16GeneralSingleInputVectorShuffle(DL, VT, V, Mask, Subtarget, | |||
12679 | DAG); | |||
12680 | }; | |||
12681 | if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3)) | |||
12682 | return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4); | |||
12683 | if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3)) | |||
12684 | return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0); | |||
12685 | ||||
12686 | // At this point there are at most two inputs to the low and high halves from | |||
12687 | // each half. That means the inputs can always be grouped into dwords and | |||
12688 | // those dwords can then be moved to the correct half with a dword shuffle. | |||
12689 | // We use at most one low and one high word shuffle to collect these paired | |||
12690 | // inputs into dwords, and finally a dword shuffle to place them. | |||
12691 | int PSHUFLMask[4] = {-1, -1, -1, -1}; | |||
12692 | int PSHUFHMask[4] = {-1, -1, -1, -1}; | |||
12693 | int PSHUFDMask[4] = {-1, -1, -1, -1}; | |||
12694 | ||||
12695 | // First fix the masks for all the inputs that are staying in their | |||
12696 | // original halves. This will then dictate the targets of the cross-half | |||
12697 | // shuffles. | |||
12698 | auto fixInPlaceInputs = | |||
12699 | [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs, | |||
12700 | MutableArrayRef<int> SourceHalfMask, | |||
12701 | MutableArrayRef<int> HalfMask, int HalfOffset) { | |||
12702 | if (InPlaceInputs.empty()) | |||
12703 | return; | |||
12704 | if (InPlaceInputs.size() == 1) { | |||
12705 | SourceHalfMask[InPlaceInputs[0] - HalfOffset] = | |||
12706 | InPlaceInputs[0] - HalfOffset; | |||
12707 | PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2; | |||
12708 | return; | |||
12709 | } | |||
12710 | if (IncomingInputs.empty()) { | |||
12711 | // Just fix all of the in place inputs. | |||
12712 | for (int Input : InPlaceInputs) { | |||
12713 | SourceHalfMask[Input - HalfOffset] = Input - HalfOffset; | |||
12714 | PSHUFDMask[Input / 2] = Input / 2; | |||
12715 | } | |||
12716 | return; | |||
12717 | } | |||
12718 | ||||
12719 | assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!")((InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!" ) ? static_cast<void> (0) : __assert_fail ("InPlaceInputs.size() == 2 && \"Cannot handle 3 or 4 inputs!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12719, __PRETTY_FUNCTION__)); | |||
12720 | SourceHalfMask[InPlaceInputs[0] - HalfOffset] = | |||
12721 | InPlaceInputs[0] - HalfOffset; | |||
12722 | // Put the second input next to the first so that they are packed into | |||
12723 | // a dword. We find the adjacent index by toggling the low bit. | |||
12724 | int AdjIndex = InPlaceInputs[0] ^ 1; | |||
12725 | SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset; | |||
12726 | std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex); | |||
12727 | PSHUFDMask[AdjIndex / 2] = AdjIndex / 2; | |||
12728 | }; | |||
12729 | fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0); | |||
12730 | fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4); | |||
12731 | ||||
12732 | // Now gather the cross-half inputs and place them into a free dword of | |||
12733 | // their target half. | |||
12734 | // FIXME: This operation could almost certainly be simplified dramatically to | |||
12735 | // look more like the 3-1 fixing operation. | |||
12736 | auto moveInputsToRightHalf = [&PSHUFDMask]( | |||
12737 | MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs, | |||
12738 | MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask, | |||
12739 | MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset, | |||
12740 | int DestOffset) { | |||
12741 | auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) { | |||
12742 | return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word; | |||
12743 | }; | |||
12744 | auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask, | |||
12745 | int Word) { | |||
12746 | int LowWord = Word & ~1; | |||
12747 | int HighWord = Word | 1; | |||
12748 | return isWordClobbered(SourceHalfMask, LowWord) || | |||
12749 | isWordClobbered(SourceHalfMask, HighWord); | |||
12750 | }; | |||
12751 | ||||
12752 | if (IncomingInputs.empty()) | |||
12753 | return; | |||
12754 | ||||
12755 | if (ExistingInputs.empty()) { | |||
12756 | // Map any dwords with inputs from them into the right half. | |||
12757 | for (int Input : IncomingInputs) { | |||
12758 | // If the source half mask maps over the inputs, turn those into | |||
12759 | // swaps and use the swapped lane. | |||
12760 | if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) { | |||
12761 | if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) { | |||
12762 | SourceHalfMask[SourceHalfMask[Input - SourceOffset]] = | |||
12763 | Input - SourceOffset; | |||
12764 | // We have to swap the uses in our half mask in one sweep. | |||
12765 | for (int &M : HalfMask) | |||
12766 | if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset) | |||
12767 | M = Input; | |||
12768 | else if (M == Input) | |||
12769 | M = SourceHalfMask[Input - SourceOffset] + SourceOffset; | |||
12770 | } else { | |||
12771 | assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==((SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input - SourceOffset && "Previous placement doesn't match!" ) ? static_cast<void> (0) : __assert_fail ("SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input - SourceOffset && \"Previous placement doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12773, __PRETTY_FUNCTION__)) | |||
12772 | Input - SourceOffset &&((SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input - SourceOffset && "Previous placement doesn't match!" ) ? static_cast<void> (0) : __assert_fail ("SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input - SourceOffset && \"Previous placement doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12773, __PRETTY_FUNCTION__)) | |||
12773 | "Previous placement doesn't match!")((SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input - SourceOffset && "Previous placement doesn't match!" ) ? static_cast<void> (0) : __assert_fail ("SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == Input - SourceOffset && \"Previous placement doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12773, __PRETTY_FUNCTION__)); | |||
12774 | } | |||
12775 | // Note that this correctly re-maps both when we do a swap and when | |||
12776 | // we observe the other side of the swap above. We rely on that to | |||
12777 | // avoid swapping the members of the input list directly. | |||
12778 | Input = SourceHalfMask[Input - SourceOffset] + SourceOffset; | |||
12779 | } | |||
12780 | ||||
12781 | // Map the input's dword into the correct half. | |||
12782 | if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0) | |||
12783 | PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2; | |||
12784 | else | |||
12785 | assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==((PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input / 2 && "Previous placement doesn't match!") ? static_cast <void> (0) : __assert_fail ("PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input / 2 && \"Previous placement doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12787, __PRETTY_FUNCTION__)) | |||
12786 | Input / 2 &&((PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input / 2 && "Previous placement doesn't match!") ? static_cast <void> (0) : __assert_fail ("PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input / 2 && \"Previous placement doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12787, __PRETTY_FUNCTION__)) | |||
12787 | "Previous placement doesn't match!")((PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input / 2 && "Previous placement doesn't match!") ? static_cast <void> (0) : __assert_fail ("PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == Input / 2 && \"Previous placement doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12787, __PRETTY_FUNCTION__)); | |||
12788 | } | |||
12789 | ||||
12790 | // And just directly shift any other-half mask elements to be same-half | |||
12791 | // as we will have mirrored the dword containing the element into the | |||
12792 | // same position within that half. | |||
12793 | for (int &M : HalfMask) | |||
12794 | if (M >= SourceOffset && M < SourceOffset + 4) { | |||
12795 | M = M - SourceOffset + DestOffset; | |||
12796 | assert(M >= 0 && "This should never wrap below zero!")((M >= 0 && "This should never wrap below zero!") ? static_cast<void> (0) : __assert_fail ("M >= 0 && \"This should never wrap below zero!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12796, __PRETTY_FUNCTION__)); | |||
12797 | } | |||
12798 | return; | |||
12799 | } | |||
12800 | ||||
12801 | // Ensure we have the input in a viable dword of its current half. This | |||
12802 | // is particularly tricky because the original position may be clobbered | |||
12803 | // by inputs being moved and *staying* in that half. | |||
12804 | if (IncomingInputs.size() == 1) { | |||
12805 | if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) { | |||
12806 | int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) + | |||
12807 | SourceOffset; | |||
12808 | SourceHalfMask[InputFixed - SourceOffset] = | |||
12809 | IncomingInputs[0] - SourceOffset; | |||
12810 | std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0], | |||
12811 | InputFixed); | |||
12812 | IncomingInputs[0] = InputFixed; | |||
12813 | } | |||
12814 | } else if (IncomingInputs.size() == 2) { | |||
12815 | if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 || | |||
12816 | isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) { | |||
12817 | // We have two non-adjacent or clobbered inputs we need to extract from | |||
12818 | // the source half. To do this, we need to map them into some adjacent | |||
12819 | // dword slot in the source mask. | |||
12820 | int InputsFixed[2] = {IncomingInputs[0] - SourceOffset, | |||
12821 | IncomingInputs[1] - SourceOffset}; | |||
12822 | ||||
12823 | // If there is a free slot in the source half mask adjacent to one of | |||
12824 | // the inputs, place the other input in it. We use (Index XOR 1) to | |||
12825 | // compute an adjacent index. | |||
12826 | if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) && | |||
12827 | SourceHalfMask[InputsFixed[0] ^ 1] < 0) { | |||
12828 | SourceHalfMask[InputsFixed[0]] = InputsFixed[0]; | |||
12829 | SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1]; | |||
12830 | InputsFixed[1] = InputsFixed[0] ^ 1; | |||
12831 | } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) && | |||
12832 | SourceHalfMask[InputsFixed[1] ^ 1] < 0) { | |||
12833 | SourceHalfMask[InputsFixed[1]] = InputsFixed[1]; | |||
12834 | SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0]; | |||
12835 | InputsFixed[0] = InputsFixed[1] ^ 1; | |||
12836 | } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 && | |||
12837 | SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) { | |||
12838 | // The two inputs are in the same DWord but it is clobbered and the | |||
12839 | // adjacent DWord isn't used at all. Move both inputs to the free | |||
12840 | // slot. | |||
12841 | SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0]; | |||
12842 | SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1]; | |||
12843 | InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1); | |||
12844 | InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1; | |||
12845 | } else { | |||
12846 | // The only way we hit this point is if there is no clobbering | |||
12847 | // (because there are no off-half inputs to this half) and there is no | |||
12848 | // free slot adjacent to one of the inputs. In this case, we have to | |||
12849 | // swap an input with a non-input. | |||
12850 | for (int i = 0; i < 4; ++i) | |||
12851 | assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&(((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) && "We can't handle any clobbers here!") ? static_cast<void> (0) : __assert_fail ("(SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) && \"We can't handle any clobbers here!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12852, __PRETTY_FUNCTION__)) | |||
12852 | "We can't handle any clobbers here!")(((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) && "We can't handle any clobbers here!") ? static_cast<void> (0) : __assert_fail ("(SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) && \"We can't handle any clobbers here!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12852, __PRETTY_FUNCTION__)); | |||
12853 | assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&((InputsFixed[1] != (InputsFixed[0] ^ 1) && "Cannot have adjacent inputs here!" ) ? static_cast<void> (0) : __assert_fail ("InputsFixed[1] != (InputsFixed[0] ^ 1) && \"Cannot have adjacent inputs here!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12854, __PRETTY_FUNCTION__)) | |||
12854 | "Cannot have adjacent inputs here!")((InputsFixed[1] != (InputsFixed[0] ^ 1) && "Cannot have adjacent inputs here!" ) ? static_cast<void> (0) : __assert_fail ("InputsFixed[1] != (InputsFixed[0] ^ 1) && \"Cannot have adjacent inputs here!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12854, __PRETTY_FUNCTION__)); | |||
12855 | ||||
12856 | SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1]; | |||
12857 | SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1; | |||
12858 | ||||
12859 | // We also have to update the final source mask in this case because | |||
12860 | // it may need to undo the above swap. | |||
12861 | for (int &M : FinalSourceHalfMask) | |||
12862 | if (M == (InputsFixed[0] ^ 1) + SourceOffset) | |||
12863 | M = InputsFixed[1] + SourceOffset; | |||
12864 | else if (M == InputsFixed[1] + SourceOffset) | |||
12865 | M = (InputsFixed[0] ^ 1) + SourceOffset; | |||
12866 | ||||
12867 | InputsFixed[1] = InputsFixed[0] ^ 1; | |||
12868 | } | |||
12869 | ||||
12870 | // Point everything at the fixed inputs. | |||
12871 | for (int &M : HalfMask) | |||
12872 | if (M == IncomingInputs[0]) | |||
12873 | M = InputsFixed[0] + SourceOffset; | |||
12874 | else if (M == IncomingInputs[1]) | |||
12875 | M = InputsFixed[1] + SourceOffset; | |||
12876 | ||||
12877 | IncomingInputs[0] = InputsFixed[0] + SourceOffset; | |||
12878 | IncomingInputs[1] = InputsFixed[1] + SourceOffset; | |||
12879 | } | |||
12880 | } else { | |||
12881 | llvm_unreachable("Unhandled input size!")::llvm::llvm_unreachable_internal("Unhandled input size!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12881); | |||
12882 | } | |||
12883 | ||||
12884 | // Now hoist the DWord down to the right half. | |||
12885 | int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2; | |||
12886 | assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free")((PSHUFDMask[FreeDWord] < 0 && "DWord not free") ? static_cast<void> (0) : __assert_fail ("PSHUFDMask[FreeDWord] < 0 && \"DWord not free\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12886, __PRETTY_FUNCTION__)); | |||
12887 | PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2; | |||
12888 | for (int &M : HalfMask) | |||
12889 | for (int Input : IncomingInputs) | |||
12890 | if (M == Input) | |||
12891 | M = FreeDWord * 2 + Input % 2; | |||
12892 | }; | |||
12893 | moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask, | |||
12894 | /*SourceOffset*/ 4, /*DestOffset*/ 0); | |||
12895 | moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask, | |||
12896 | /*SourceOffset*/ 0, /*DestOffset*/ 4); | |||
12897 | ||||
12898 | // Now enact all the shuffles we've computed to move the inputs into their | |||
12899 | // target half. | |||
12900 | if (!isNoopShuffleMask(PSHUFLMask)) | |||
12901 | V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V, | |||
12902 | getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG)); | |||
12903 | if (!isNoopShuffleMask(PSHUFHMask)) | |||
12904 | V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V, | |||
12905 | getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG)); | |||
12906 | if (!isNoopShuffleMask(PSHUFDMask)) | |||
12907 | V = DAG.getBitcast( | |||
12908 | VT, | |||
12909 | DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V), | |||
12910 | getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG))); | |||
12911 | ||||
12912 | // At this point, each half should contain all its inputs, and we can then | |||
12913 | // just shuffle them into their final position. | |||
12914 | assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&((count_if(LoMask, [](int M) { return M >= 4; }) == 0 && "Failed to lift all the high half inputs to the low mask!") ? static_cast<void> (0) : __assert_fail ("count_if(LoMask, [](int M) { return M >= 4; }) == 0 && \"Failed to lift all the high half inputs to the low mask!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12915, __PRETTY_FUNCTION__)) | |||
12915 | "Failed to lift all the high half inputs to the low mask!")((count_if(LoMask, [](int M) { return M >= 4; }) == 0 && "Failed to lift all the high half inputs to the low mask!") ? static_cast<void> (0) : __assert_fail ("count_if(LoMask, [](int M) { return M >= 4; }) == 0 && \"Failed to lift all the high half inputs to the low mask!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12915, __PRETTY_FUNCTION__)); | |||
12916 | assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&((count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 && "Failed to lift all the low half inputs to the high mask!" ) ? static_cast<void> (0) : __assert_fail ("count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 && \"Failed to lift all the low half inputs to the high mask!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12917, __PRETTY_FUNCTION__)) | |||
12917 | "Failed to lift all the low half inputs to the high mask!")((count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 && "Failed to lift all the low half inputs to the high mask!" ) ? static_cast<void> (0) : __assert_fail ("count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 && \"Failed to lift all the low half inputs to the high mask!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12917, __PRETTY_FUNCTION__)); | |||
12918 | ||||
12919 | // Do a half shuffle for the low mask. | |||
12920 | if (!isNoopShuffleMask(LoMask)) | |||
12921 | V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V, | |||
12922 | getV4X86ShuffleImm8ForMask(LoMask, DL, DAG)); | |||
12923 | ||||
12924 | // Do a half shuffle with the high mask after shifting its values down. | |||
12925 | for (int &M : HiMask) | |||
12926 | if (M >= 0) | |||
12927 | M -= 4; | |||
12928 | if (!isNoopShuffleMask(HiMask)) | |||
12929 | V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V, | |||
12930 | getV4X86ShuffleImm8ForMask(HiMask, DL, DAG)); | |||
12931 | ||||
12932 | return V; | |||
12933 | } | |||
12934 | ||||
12935 | /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the | |||
12936 | /// blend if only one input is used. | |||
12937 | static SDValue lowerVectorShuffleAsBlendOfPSHUFBs( | |||
12938 | const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, | |||
12939 | const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) { | |||
12940 | assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&((!is128BitLaneCrossingShuffleMask(VT, Mask) && "Lane crossing shuffle masks not supported" ) ? static_cast<void> (0) : __assert_fail ("!is128BitLaneCrossingShuffleMask(VT, Mask) && \"Lane crossing shuffle masks not supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12941, __PRETTY_FUNCTION__)) | |||
12941 | "Lane crossing shuffle masks not supported")((!is128BitLaneCrossingShuffleMask(VT, Mask) && "Lane crossing shuffle masks not supported" ) ? static_cast<void> (0) : __assert_fail ("!is128BitLaneCrossingShuffleMask(VT, Mask) && \"Lane crossing shuffle masks not supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 12941, __PRETTY_FUNCTION__)); | |||
12942 | ||||
12943 | int NumBytes = VT.getSizeInBits() / 8; | |||
12944 | int Size = Mask.size(); | |||
12945 | int Scale = NumBytes / Size; | |||
12946 | ||||
12947 | SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8)); | |||
12948 | SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8)); | |||
12949 | V1InUse = false; | |||
12950 | V2InUse = false; | |||
12951 | ||||
12952 | for (int i = 0; i < NumBytes; ++i) { | |||
12953 | int M = Mask[i / Scale]; | |||
12954 | if (M < 0) | |||
12955 | continue; | |||
12956 | ||||
12957 | const int ZeroMask = 0x80; | |||
12958 | int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask; | |||
12959 | int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale; | |||
12960 | if (Zeroable[i / Scale]) | |||
12961 | V1Idx = V2Idx = ZeroMask; | |||
12962 | ||||
12963 | V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8); | |||
12964 | V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8); | |||
12965 | V1InUse |= (ZeroMask != V1Idx); | |||
12966 | V2InUse |= (ZeroMask != V2Idx); | |||
12967 | } | |||
12968 | ||||
12969 | MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes); | |||
12970 | if (V1InUse) | |||
12971 | V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1), | |||
12972 | DAG.getBuildVector(ShufVT, DL, V1Mask)); | |||
12973 | if (V2InUse) | |||
12974 | V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2), | |||
12975 | DAG.getBuildVector(ShufVT, DL, V2Mask)); | |||
12976 | ||||
12977 | // If we need shuffled inputs from both, blend the two. | |||
12978 | SDValue V; | |||
12979 | if (V1InUse && V2InUse) | |||
12980 | V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2); | |||
12981 | else | |||
12982 | V = V1InUse ? V1 : V2; | |||
12983 | ||||
12984 | // Cast the result back to the correct type. | |||
12985 | return DAG.getBitcast(VT, V); | |||
12986 | } | |||
12987 | ||||
12988 | /// Generic lowering of 8-lane i16 shuffles. | |||
12989 | /// | |||
12990 | /// This handles both single-input shuffles and combined shuffle/blends with | |||
12991 | /// two inputs. The single input shuffles are immediately delegated to | |||
12992 | /// a dedicated lowering routine. | |||
12993 | /// | |||
12994 | /// The blends are lowered in one of three fundamental ways. If there are few | |||
12995 | /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle | |||
12996 | /// of the input is significantly cheaper when lowered as an interleaving of | |||
12997 | /// the two inputs, try to interleave them. Otherwise, blend the low and high | |||
12998 | /// halves of the inputs separately (making them have relatively few inputs) | |||
12999 | /// and then concatenate them. | |||
13000 | static SDValue lowerV8I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
13001 | const APInt &Zeroable, | |||
13002 | SDValue V1, SDValue V2, | |||
13003 | const X86Subtarget &Subtarget, | |||
13004 | SelectionDAG &DAG) { | |||
13005 | assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8i16 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13005, __PRETTY_FUNCTION__)); | |||
13006 | assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8i16 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13006, __PRETTY_FUNCTION__)); | |||
13007 | assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13007, __PRETTY_FUNCTION__)); | |||
13008 | ||||
13009 | // Whenever we can lower this as a zext, that instruction is strictly faster | |||
13010 | // than any alternative. | |||
13011 | if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend( | |||
13012 | DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
13013 | return ZExt; | |||
13014 | ||||
13015 | int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; }); | |||
13016 | ||||
13017 | if (NumV2Inputs == 0) { | |||
13018 | // Check for being able to broadcast a single element. | |||
13019 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast( | |||
13020 | DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG)) | |||
13021 | return Broadcast; | |||
13022 | ||||
13023 | // Try to use shift instructions. | |||
13024 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask, | |||
13025 | Zeroable, Subtarget, DAG)) | |||
13026 | return Shift; | |||
13027 | ||||
13028 | // Use dedicated unpack instructions for masks that match their pattern. | |||
13029 | if (SDValue V = | |||
13030 | lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG)) | |||
13031 | return V; | |||
13032 | ||||
13033 | // Use dedicated pack instructions for masks that match their pattern. | |||
13034 | if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, | |||
13035 | DAG, Subtarget)) | |||
13036 | return V; | |||
13037 | ||||
13038 | // Try to use byte rotation instructions. | |||
13039 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, | |||
13040 | Mask, Subtarget, DAG)) | |||
13041 | return Rotate; | |||
13042 | ||||
13043 | // Make a copy of the mask so it can be modified. | |||
13044 | SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end()); | |||
13045 | return lowerV8I16GeneralSingleInputVectorShuffle(DL, MVT::v8i16, V1, | |||
13046 | MutableMask, Subtarget, | |||
13047 | DAG); | |||
13048 | } | |||
13049 | ||||
13050 | assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&((llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) && "All single-input shuffles should be canonicalized to be V1-input " "shuffles.") ? static_cast<void> (0) : __assert_fail ( "llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) && \"All single-input shuffles should be canonicalized to be V1-input \" \"shuffles.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13052, __PRETTY_FUNCTION__)) | |||
13051 | "All single-input shuffles should be canonicalized to be V1-input "((llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) && "All single-input shuffles should be canonicalized to be V1-input " "shuffles.") ? static_cast<void> (0) : __assert_fail ( "llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) && \"All single-input shuffles should be canonicalized to be V1-input \" \"shuffles.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13052, __PRETTY_FUNCTION__)) | |||
13052 | "shuffles.")((llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) && "All single-input shuffles should be canonicalized to be V1-input " "shuffles.") ? static_cast<void> (0) : __assert_fail ( "llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) && \"All single-input shuffles should be canonicalized to be V1-input \" \"shuffles.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13052, __PRETTY_FUNCTION__)); | |||
13053 | ||||
13054 | // Try to use shift instructions. | |||
13055 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, | |||
13056 | Zeroable, Subtarget, DAG)) | |||
13057 | return Shift; | |||
13058 | ||||
13059 | // See if we can use SSE4A Extraction / Insertion. | |||
13060 | if (Subtarget.hasSSE4A()) | |||
13061 | if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask, | |||
13062 | Zeroable, DAG)) | |||
13063 | return V; | |||
13064 | ||||
13065 | // There are special ways we can lower some single-element blends. | |||
13066 | if (NumV2Inputs == 1) | |||
13067 | if (SDValue V = lowerVectorShuffleAsElementInsertion( | |||
13068 | DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
13069 | return V; | |||
13070 | ||||
13071 | // We have different paths for blend lowering, but they all must use the | |||
13072 | // *exact* same predicate. | |||
13073 | bool IsBlendSupported = Subtarget.hasSSE41(); | |||
13074 | if (IsBlendSupported) | |||
13075 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask, | |||
13076 | Zeroable, Subtarget, DAG)) | |||
13077 | return Blend; | |||
13078 | ||||
13079 | if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, | |||
13080 | Zeroable, DAG)) | |||
13081 | return Masked; | |||
13082 | ||||
13083 | // Use dedicated unpack instructions for masks that match their pattern. | |||
13084 | if (SDValue V = | |||
13085 | lowerVectorShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG)) | |||
13086 | return V; | |||
13087 | ||||
13088 | // Use dedicated pack instructions for masks that match their pattern. | |||
13089 | if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG, | |||
13090 | Subtarget)) | |||
13091 | return V; | |||
13092 | ||||
13093 | // Try to use byte rotation instructions. | |||
13094 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
13095 | DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG)) | |||
13096 | return Rotate; | |||
13097 | ||||
13098 | if (SDValue BitBlend = | |||
13099 | lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG)) | |||
13100 | return BitBlend; | |||
13101 | ||||
13102 | // Try to lower by permuting the inputs into an unpack instruction. | |||
13103 | if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, | |||
13104 | V2, Mask, DAG)) | |||
13105 | return Unpack; | |||
13106 | ||||
13107 | // If we can't directly blend but can use PSHUFB, that will be better as it | |||
13108 | // can both shuffle and set up the inefficient blend. | |||
13109 | if (!IsBlendSupported && Subtarget.hasSSSE3()) { | |||
13110 | bool V1InUse, V2InUse; | |||
13111 | return lowerVectorShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask, | |||
13112 | Zeroable, DAG, V1InUse, V2InUse); | |||
13113 | } | |||
13114 | ||||
13115 | // We can always bit-blend if we have to so the fallback strategy is to | |||
13116 | // decompose into single-input permutes and blends. | |||
13117 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2, | |||
13118 | Mask, Subtarget, DAG); | |||
13119 | } | |||
13120 | ||||
13121 | /// Check whether a compaction lowering can be done by dropping even | |||
13122 | /// elements and compute how many times even elements must be dropped. | |||
13123 | /// | |||
13124 | /// This handles shuffles which take every Nth element where N is a power of | |||
13125 | /// two. Example shuffle masks: | |||
13126 | /// | |||
13127 | /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14 | |||
13128 | /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30 | |||
13129 | /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12 | |||
13130 | /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28 | |||
13131 | /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8 | |||
13132 | /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24 | |||
13133 | /// | |||
13134 | /// Any of these lanes can of course be undef. | |||
13135 | /// | |||
13136 | /// This routine only supports N <= 3. | |||
13137 | /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here | |||
13138 | /// for larger N. | |||
13139 | /// | |||
13140 | /// \returns N above, or the number of times even elements must be dropped if | |||
13141 | /// there is such a number. Otherwise returns zero. | |||
13142 | static int canLowerByDroppingEvenElements(ArrayRef<int> Mask, | |||
13143 | bool IsSingleInput) { | |||
13144 | // The modulus for the shuffle vector entries is based on whether this is | |||
13145 | // a single input or not. | |||
13146 | int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2); | |||
13147 | assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&((isPowerOf2_32((uint32_t)ShuffleModulus) && "We should only be called with masks with a power-of-2 size!" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32((uint32_t)ShuffleModulus) && \"We should only be called with masks with a power-of-2 size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13148, __PRETTY_FUNCTION__)) | |||
13148 | "We should only be called with masks with a power-of-2 size!")((isPowerOf2_32((uint32_t)ShuffleModulus) && "We should only be called with masks with a power-of-2 size!" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32((uint32_t)ShuffleModulus) && \"We should only be called with masks with a power-of-2 size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13148, __PRETTY_FUNCTION__)); | |||
13149 | ||||
13150 | uint64_t ModMask = (uint64_t)ShuffleModulus - 1; | |||
13151 | ||||
13152 | // We track whether the input is viable for all power-of-2 strides 2^1, 2^2, | |||
13153 | // and 2^3 simultaneously. This is because we may have ambiguity with | |||
13154 | // partially undef inputs. | |||
13155 | bool ViableForN[3] = {true, true, true}; | |||
13156 | ||||
13157 | for (int i = 0, e = Mask.size(); i < e; ++i) { | |||
13158 | // Ignore undef lanes, we'll optimistically collapse them to the pattern we | |||
13159 | // want. | |||
13160 | if (Mask[i] < 0) | |||
13161 | continue; | |||
13162 | ||||
13163 | bool IsAnyViable = false; | |||
13164 | for (unsigned j = 0; j != array_lengthof(ViableForN); ++j) | |||
13165 | if (ViableForN[j]) { | |||
13166 | uint64_t N = j + 1; | |||
13167 | ||||
13168 | // The shuffle mask must be equal to (i * 2^N) % M. | |||
13169 | if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask)) | |||
13170 | IsAnyViable = true; | |||
13171 | else | |||
13172 | ViableForN[j] = false; | |||
13173 | } | |||
13174 | // Early exit if we exhaust the possible powers of two. | |||
13175 | if (!IsAnyViable) | |||
13176 | break; | |||
13177 | } | |||
13178 | ||||
13179 | for (unsigned j = 0; j != array_lengthof(ViableForN); ++j) | |||
13180 | if (ViableForN[j]) | |||
13181 | return j + 1; | |||
13182 | ||||
13183 | // Return 0 as there is no viable power of two. | |||
13184 | return 0; | |||
13185 | } | |||
13186 | ||||
13187 | static SDValue lowerVectorShuffleWithPERMV(const SDLoc &DL, MVT VT, | |||
13188 | ArrayRef<int> Mask, SDValue V1, | |||
13189 | SDValue V2, SelectionDAG &DAG) { | |||
13190 | MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits()); | |||
13191 | MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements()); | |||
13192 | ||||
13193 | SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true); | |||
13194 | if (V2.isUndef()) | |||
13195 | return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1); | |||
13196 | ||||
13197 | return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2); | |||
13198 | } | |||
13199 | ||||
13200 | /// Generic lowering of v16i8 shuffles. | |||
13201 | /// | |||
13202 | /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to | |||
13203 | /// detect any complexity reducing interleaving. If that doesn't help, it uses | |||
13204 | /// UNPCK to spread the i8 elements across two i16-element vectors, and uses | |||
13205 | /// the existing lowering for v8i16 blends on each half, finally PACK-ing them | |||
13206 | /// back together. | |||
13207 | static SDValue lowerV16I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
13208 | const APInt &Zeroable, | |||
13209 | SDValue V1, SDValue V2, | |||
13210 | const X86Subtarget &Subtarget, | |||
13211 | SelectionDAG &DAG) { | |||
13212 | assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v16i8 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13212, __PRETTY_FUNCTION__)); | |||
13213 | assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v16i8 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13213, __PRETTY_FUNCTION__)); | |||
13214 | assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!")((Mask.size() == 16 && "Unexpected mask size for v16 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 16 && \"Unexpected mask size for v16 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13214, __PRETTY_FUNCTION__)); | |||
13215 | ||||
13216 | // Try to use shift instructions. | |||
13217 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, | |||
13218 | Zeroable, Subtarget, DAG)) | |||
13219 | return Shift; | |||
13220 | ||||
13221 | // Try to use byte rotation instructions. | |||
13222 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
13223 | DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG)) | |||
13224 | return Rotate; | |||
13225 | ||||
13226 | // Use dedicated pack instructions for masks that match their pattern. | |||
13227 | if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG, | |||
13228 | Subtarget)) | |||
13229 | return V; | |||
13230 | ||||
13231 | // Try to use a zext lowering. | |||
13232 | if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend( | |||
13233 | DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
13234 | return ZExt; | |||
13235 | ||||
13236 | // See if we can use SSE4A Extraction / Insertion. | |||
13237 | if (Subtarget.hasSSE4A()) | |||
13238 | if (SDValue V = lowerVectorShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask, | |||
13239 | Zeroable, DAG)) | |||
13240 | return V; | |||
13241 | ||||
13242 | int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; }); | |||
13243 | ||||
13244 | // For single-input shuffles, there are some nicer lowering tricks we can use. | |||
13245 | if (NumV2Elements == 0) { | |||
13246 | // Check for being able to broadcast a single element. | |||
13247 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast( | |||
13248 | DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG)) | |||
13249 | return Broadcast; | |||
13250 | ||||
13251 | if (SDValue V = | |||
13252 | lowerVectorShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG)) | |||
13253 | return V; | |||
13254 | ||||
13255 | // Check whether we can widen this to an i16 shuffle by duplicating bytes. | |||
13256 | // Notably, this handles splat and partial-splat shuffles more efficiently. | |||
13257 | // However, it only makes sense if the pre-duplication shuffle simplifies | |||
13258 | // things significantly. Currently, this means we need to be able to | |||
13259 | // express the pre-duplication shuffle as an i16 shuffle. | |||
13260 | // | |||
13261 | // FIXME: We should check for other patterns which can be widened into an | |||
13262 | // i16 shuffle as well. | |||
13263 | auto canWidenViaDuplication = [](ArrayRef<int> Mask) { | |||
13264 | for (int i = 0; i < 16; i += 2) | |||
13265 | if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1]) | |||
13266 | return false; | |||
13267 | ||||
13268 | return true; | |||
13269 | }; | |||
13270 | auto tryToWidenViaDuplication = [&]() -> SDValue { | |||
13271 | if (!canWidenViaDuplication(Mask)) | |||
13272 | return SDValue(); | |||
13273 | SmallVector<int, 4> LoInputs; | |||
13274 | copy_if(Mask, std::back_inserter(LoInputs), | |||
13275 | [](int M) { return M >= 0 && M < 8; }); | |||
13276 | array_pod_sort(LoInputs.begin(), LoInputs.end()); | |||
13277 | LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), | |||
13278 | LoInputs.end()); | |||
13279 | SmallVector<int, 4> HiInputs; | |||
13280 | copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; }); | |||
13281 | array_pod_sort(HiInputs.begin(), HiInputs.end()); | |||
13282 | HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), | |||
13283 | HiInputs.end()); | |||
13284 | ||||
13285 | bool TargetLo = LoInputs.size() >= HiInputs.size(); | |||
13286 | ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs; | |||
13287 | ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs; | |||
13288 | ||||
13289 | int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |||
13290 | SmallDenseMap<int, int, 8> LaneMap; | |||
13291 | for (int I : InPlaceInputs) { | |||
13292 | PreDupI16Shuffle[I/2] = I/2; | |||
13293 | LaneMap[I] = I; | |||
13294 | } | |||
13295 | int j = TargetLo ? 0 : 4, je = j + 4; | |||
13296 | for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) { | |||
13297 | // Check if j is already a shuffle of this input. This happens when | |||
13298 | // there are two adjacent bytes after we move the low one. | |||
13299 | if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) { | |||
13300 | // If we haven't yet mapped the input, search for a slot into which | |||
13301 | // we can map it. | |||
13302 | while (j < je && PreDupI16Shuffle[j] >= 0) | |||
13303 | ++j; | |||
13304 | ||||
13305 | if (j == je) | |||
13306 | // We can't place the inputs into a single half with a simple i16 shuffle, so bail. | |||
13307 | return SDValue(); | |||
13308 | ||||
13309 | // Map this input with the i16 shuffle. | |||
13310 | PreDupI16Shuffle[j] = MovingInputs[i] / 2; | |||
13311 | } | |||
13312 | ||||
13313 | // Update the lane map based on the mapping we ended up with. | |||
13314 | LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2; | |||
13315 | } | |||
13316 | V1 = DAG.getBitcast( | |||
13317 | MVT::v16i8, | |||
13318 | DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1), | |||
13319 | DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle)); | |||
13320 | ||||
13321 | // Unpack the bytes to form the i16s that will be shuffled into place. | |||
13322 | V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL, | |||
13323 | MVT::v16i8, V1, V1); | |||
13324 | ||||
13325 | int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1}; | |||
13326 | for (int i = 0; i < 16; ++i) | |||
13327 | if (Mask[i] >= 0) { | |||
13328 | int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8); | |||
13329 | assert(MappedMask < 8 && "Invalid v8 shuffle mask!")((MappedMask < 8 && "Invalid v8 shuffle mask!") ? static_cast <void> (0) : __assert_fail ("MappedMask < 8 && \"Invalid v8 shuffle mask!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13329, __PRETTY_FUNCTION__)); | |||
13330 | if (PostDupI16Shuffle[i / 2] < 0) | |||
13331 | PostDupI16Shuffle[i / 2] = MappedMask; | |||
13332 | else | |||
13333 | assert(PostDupI16Shuffle[i / 2] == MappedMask &&((PostDupI16Shuffle[i / 2] == MappedMask && "Conflicting entries in the original shuffle!" ) ? static_cast<void> (0) : __assert_fail ("PostDupI16Shuffle[i / 2] == MappedMask && \"Conflicting entries in the original shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13334, __PRETTY_FUNCTION__)) | |||
13334 | "Conflicting entries in the original shuffle!")((PostDupI16Shuffle[i / 2] == MappedMask && "Conflicting entries in the original shuffle!" ) ? static_cast<void> (0) : __assert_fail ("PostDupI16Shuffle[i / 2] == MappedMask && \"Conflicting entries in the original shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13334, __PRETTY_FUNCTION__)); | |||
13335 | } | |||
13336 | return DAG.getBitcast( | |||
13337 | MVT::v16i8, | |||
13338 | DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1), | |||
13339 | DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle)); | |||
13340 | }; | |||
13341 | if (SDValue V = tryToWidenViaDuplication()) | |||
13342 | return V; | |||
13343 | } | |||
13344 | ||||
13345 | if (SDValue Masked = lowerVectorShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask, | |||
13346 | Zeroable, DAG)) | |||
13347 | return Masked; | |||
13348 | ||||
13349 | // Use dedicated unpack instructions for masks that match their pattern. | |||
13350 | if (SDValue V = | |||
13351 | lowerVectorShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG)) | |||
13352 | return V; | |||
13353 | ||||
13354 | // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly | |||
13355 | // with PSHUFB. It is important to do this before we attempt to generate any | |||
13356 | // blends but after all of the single-input lowerings. If the single input | |||
13357 | // lowerings can find an instruction sequence that is faster than a PSHUFB, we | |||
13358 | // want to preserve that and we can DAG combine any longer sequences into | |||
13359 | // a PSHUFB in the end. But once we start blending from multiple inputs, | |||
13360 | // the complexity of DAG combining bad patterns back into PSHUFB is too high, | |||
13361 | // and there are *very* few patterns that would actually be faster than the | |||
13362 | // PSHUFB approach because of its ability to zero lanes. | |||
13363 | // | |||
13364 | // FIXME: The only exceptions to the above are blends which are exact | |||
13365 | // interleavings with direct instructions supporting them. We currently don't | |||
13366 | // handle those well here. | |||
13367 | if (Subtarget.hasSSSE3()) { | |||
13368 | bool V1InUse = false; | |||
13369 | bool V2InUse = false; | |||
13370 | ||||
13371 | SDValue PSHUFB = lowerVectorShuffleAsBlendOfPSHUFBs( | |||
13372 | DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse); | |||
13373 | ||||
13374 | // If both V1 and V2 are in use and we can use a direct blend or an unpack, | |||
13375 | // do so. This avoids using them to handle blends-with-zero which is | |||
13376 | // important as a single pshufb is significantly faster for that. | |||
13377 | if (V1InUse && V2InUse) { | |||
13378 | if (Subtarget.hasSSE41()) | |||
13379 | if (SDValue Blend = lowerVectorShuffleAsBlend( | |||
13380 | DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
13381 | return Blend; | |||
13382 | ||||
13383 | // We can use an unpack to do the blending rather than an or in some | |||
13384 | // cases. Even though the or may be (very minorly) more efficient, we | |||
13385 | // preference this lowering because there are common cases where part of | |||
13386 | // the complexity of the shuffles goes away when we do the final blend as | |||
13387 | // an unpack. | |||
13388 | // FIXME: It might be worth trying to detect if the unpack-feeding | |||
13389 | // shuffles will both be pshufb, in which case we shouldn't bother with | |||
13390 | // this. | |||
13391 | if (SDValue Unpack = lowerVectorShuffleAsPermuteAndUnpack( | |||
13392 | DL, MVT::v16i8, V1, V2, Mask, DAG)) | |||
13393 | return Unpack; | |||
13394 | ||||
13395 | // If we have VBMI we can use one VPERM instead of multiple PSHUFBs. | |||
13396 | if (Subtarget.hasVBMI() && Subtarget.hasVLX()) | |||
13397 | return lowerVectorShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG); | |||
13398 | ||||
13399 | // Use PALIGNR+Permute if possible - permute might become PSHUFB but the | |||
13400 | // PALIGNR will be cheaper than the second PSHUFB+OR. | |||
13401 | if (SDValue V = lowerVectorShuffleAsByteRotateAndPermute( | |||
13402 | DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG)) | |||
13403 | return V; | |||
13404 | } | |||
13405 | ||||
13406 | return PSHUFB; | |||
13407 | } | |||
13408 | ||||
13409 | // There are special ways we can lower some single-element blends. | |||
13410 | if (NumV2Elements == 1) | |||
13411 | if (SDValue V = lowerVectorShuffleAsElementInsertion( | |||
13412 | DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
13413 | return V; | |||
13414 | ||||
13415 | if (SDValue BitBlend = | |||
13416 | lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG)) | |||
13417 | return BitBlend; | |||
13418 | ||||
13419 | // Check whether a compaction lowering can be done. This handles shuffles | |||
13420 | // which take every Nth element for some even N. See the helper function for | |||
13421 | // details. | |||
13422 | // | |||
13423 | // We special case these as they can be particularly efficiently handled with | |||
13424 | // the PACKUSB instruction on x86 and they show up in common patterns of | |||
13425 | // rearranging bytes to truncate wide elements. | |||
13426 | bool IsSingleInput = V2.isUndef(); | |||
13427 | if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput)) { | |||
13428 | // NumEvenDrops is the power of two stride of the elements. Another way of | |||
13429 | // thinking about it is that we need to drop the even elements this many | |||
13430 | // times to get the original input. | |||
13431 | ||||
13432 | // First we need to zero all the dropped bytes. | |||
13433 | assert(NumEvenDrops <= 3 &&((NumEvenDrops <= 3 && "No support for dropping even elements more than 3 times." ) ? static_cast<void> (0) : __assert_fail ("NumEvenDrops <= 3 && \"No support for dropping even elements more than 3 times.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13434, __PRETTY_FUNCTION__)) | |||
13434 | "No support for dropping even elements more than 3 times.")((NumEvenDrops <= 3 && "No support for dropping even elements more than 3 times." ) ? static_cast<void> (0) : __assert_fail ("NumEvenDrops <= 3 && \"No support for dropping even elements more than 3 times.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13434, __PRETTY_FUNCTION__)); | |||
13435 | // We use the mask type to pick which bytes are preserved based on how many | |||
13436 | // elements are dropped. | |||
13437 | MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 }; | |||
13438 | SDValue ByteClearMask = DAG.getBitcast( | |||
13439 | MVT::v16i8, DAG.getConstant(0xFF, DL, MaskVTs[NumEvenDrops - 1])); | |||
13440 | V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask); | |||
13441 | if (!IsSingleInput) | |||
13442 | V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask); | |||
13443 | ||||
13444 | // Now pack things back together. | |||
13445 | V1 = DAG.getBitcast(MVT::v8i16, V1); | |||
13446 | V2 = IsSingleInput ? V1 : DAG.getBitcast(MVT::v8i16, V2); | |||
13447 | SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2); | |||
13448 | for (int i = 1; i < NumEvenDrops; ++i) { | |||
13449 | Result = DAG.getBitcast(MVT::v8i16, Result); | |||
13450 | Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result); | |||
13451 | } | |||
13452 | ||||
13453 | return Result; | |||
13454 | } | |||
13455 | ||||
13456 | // Handle multi-input cases by blending single-input shuffles. | |||
13457 | if (NumV2Elements > 0) | |||
13458 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, | |||
13459 | Mask, Subtarget, DAG); | |||
13460 | ||||
13461 | // The fallback path for single-input shuffles widens this into two v8i16 | |||
13462 | // vectors with unpacks, shuffles those, and then pulls them back together | |||
13463 | // with a pack. | |||
13464 | SDValue V = V1; | |||
13465 | ||||
13466 | std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}}; | |||
13467 | std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}}; | |||
13468 | for (int i = 0; i < 16; ++i) | |||
13469 | if (Mask[i] >= 0) | |||
13470 | (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i]; | |||
13471 | ||||
13472 | SDValue VLoHalf, VHiHalf; | |||
13473 | // Check if any of the odd lanes in the v16i8 are used. If not, we can mask | |||
13474 | // them out and avoid using UNPCK{L,H} to extract the elements of V as | |||
13475 | // i16s. | |||
13476 | if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) && | |||
13477 | none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) { | |||
13478 | // Use a mask to drop the high bytes. | |||
13479 | VLoHalf = DAG.getBitcast(MVT::v8i16, V); | |||
13480 | VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf, | |||
13481 | DAG.getConstant(0x00FF, DL, MVT::v8i16)); | |||
13482 | ||||
13483 | // This will be a single vector shuffle instead of a blend so nuke VHiHalf. | |||
13484 | VHiHalf = DAG.getUNDEF(MVT::v8i16); | |||
13485 | ||||
13486 | // Squash the masks to point directly into VLoHalf. | |||
13487 | for (int &M : LoBlendMask) | |||
13488 | if (M >= 0) | |||
13489 | M /= 2; | |||
13490 | for (int &M : HiBlendMask) | |||
13491 | if (M >= 0) | |||
13492 | M /= 2; | |||
13493 | } else { | |||
13494 | // Otherwise just unpack the low half of V into VLoHalf and the high half into | |||
13495 | // VHiHalf so that we can blend them as i16s. | |||
13496 | SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL); | |||
13497 | ||||
13498 | VLoHalf = DAG.getBitcast( | |||
13499 | MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero)); | |||
13500 | VHiHalf = DAG.getBitcast( | |||
13501 | MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero)); | |||
13502 | } | |||
13503 | ||||
13504 | SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask); | |||
13505 | SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask); | |||
13506 | ||||
13507 | return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV); | |||
13508 | } | |||
13509 | ||||
13510 | /// Dispatching routine to lower various 128-bit x86 vector shuffles. | |||
13511 | /// | |||
13512 | /// This routine breaks down the specific type of 128-bit shuffle and | |||
13513 | /// dispatches to the lowering routines accordingly. | |||
13514 | static SDValue lower128BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
13515 | MVT VT, SDValue V1, SDValue V2, | |||
13516 | const APInt &Zeroable, | |||
13517 | const X86Subtarget &Subtarget, | |||
13518 | SelectionDAG &DAG) { | |||
13519 | switch (VT.SimpleTy) { | |||
13520 | case MVT::v2i64: | |||
13521 | return lowerV2I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
13522 | case MVT::v2f64: | |||
13523 | return lowerV2F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
13524 | case MVT::v4i32: | |||
13525 | return lowerV4I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
13526 | case MVT::v4f32: | |||
13527 | return lowerV4F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
13528 | case MVT::v8i16: | |||
13529 | return lowerV8I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
13530 | case MVT::v16i8: | |||
13531 | return lowerV16I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
13532 | ||||
13533 | default: | |||
13534 | llvm_unreachable("Unimplemented!")::llvm::llvm_unreachable_internal("Unimplemented!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13534); | |||
13535 | } | |||
13536 | } | |||
13537 | ||||
13538 | /// Generic routine to split vector shuffle into half-sized shuffles. | |||
13539 | /// | |||
13540 | /// This routine just extracts two subvectors, shuffles them independently, and | |||
13541 | /// then concatenates them back together. This should work effectively with all | |||
13542 | /// AVX vector shuffle types. | |||
13543 | static SDValue splitAndLowerVectorShuffle(const SDLoc &DL, MVT VT, SDValue V1, | |||
13544 | SDValue V2, ArrayRef<int> Mask, | |||
13545 | SelectionDAG &DAG) { | |||
13546 | assert(VT.getSizeInBits() >= 256 &&((VT.getSizeInBits() >= 256 && "Only for 256-bit or wider vector shuffles!" ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() >= 256 && \"Only for 256-bit or wider vector shuffles!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13547, __PRETTY_FUNCTION__)) | |||
13547 | "Only for 256-bit or wider vector shuffles!")((VT.getSizeInBits() >= 256 && "Only for 256-bit or wider vector shuffles!" ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() >= 256 && \"Only for 256-bit or wider vector shuffles!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13547, __PRETTY_FUNCTION__)); | |||
13548 | assert(V1.getSimpleValueType() == VT && "Bad operand type!")((V1.getSimpleValueType() == VT && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == VT && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13548, __PRETTY_FUNCTION__)); | |||
13549 | assert(V2.getSimpleValueType() == VT && "Bad operand type!")((V2.getSimpleValueType() == VT && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == VT && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13549, __PRETTY_FUNCTION__)); | |||
13550 | ||||
13551 | ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2); | |||
13552 | ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2); | |||
13553 | ||||
13554 | int NumElements = VT.getVectorNumElements(); | |||
13555 | int SplitNumElements = NumElements / 2; | |||
13556 | MVT ScalarVT = VT.getVectorElementType(); | |||
13557 | MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2); | |||
13558 | ||||
13559 | // Rather than splitting build-vectors, just build two narrower build | |||
13560 | // vectors. This helps shuffling with splats and zeros. | |||
13561 | auto SplitVector = [&](SDValue V) { | |||
13562 | V = peekThroughBitcasts(V); | |||
13563 | ||||
13564 | MVT OrigVT = V.getSimpleValueType(); | |||
13565 | int OrigNumElements = OrigVT.getVectorNumElements(); | |||
13566 | int OrigSplitNumElements = OrigNumElements / 2; | |||
13567 | MVT OrigScalarVT = OrigVT.getVectorElementType(); | |||
13568 | MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2); | |||
13569 | ||||
13570 | SDValue LoV, HiV; | |||
13571 | ||||
13572 | auto *BV = dyn_cast<BuildVectorSDNode>(V); | |||
13573 | if (!BV) { | |||
13574 | LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V, | |||
13575 | DAG.getIntPtrConstant(0, DL)); | |||
13576 | HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V, | |||
13577 | DAG.getIntPtrConstant(OrigSplitNumElements, DL)); | |||
13578 | } else { | |||
13579 | ||||
13580 | SmallVector<SDValue, 16> LoOps, HiOps; | |||
13581 | for (int i = 0; i < OrigSplitNumElements; ++i) { | |||
13582 | LoOps.push_back(BV->getOperand(i)); | |||
13583 | HiOps.push_back(BV->getOperand(i + OrigSplitNumElements)); | |||
13584 | } | |||
13585 | LoV = DAG.getBuildVector(OrigSplitVT, DL, LoOps); | |||
13586 | HiV = DAG.getBuildVector(OrigSplitVT, DL, HiOps); | |||
13587 | } | |||
13588 | return std::make_pair(DAG.getBitcast(SplitVT, LoV), | |||
13589 | DAG.getBitcast(SplitVT, HiV)); | |||
13590 | }; | |||
13591 | ||||
13592 | SDValue LoV1, HiV1, LoV2, HiV2; | |||
13593 | std::tie(LoV1, HiV1) = SplitVector(V1); | |||
13594 | std::tie(LoV2, HiV2) = SplitVector(V2); | |||
13595 | ||||
13596 | // Now create two 4-way blends of these half-width vectors. | |||
13597 | auto HalfBlend = [&](ArrayRef<int> HalfMask) { | |||
13598 | bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false; | |||
13599 | SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1); | |||
13600 | SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1); | |||
13601 | SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1); | |||
13602 | for (int i = 0; i < SplitNumElements; ++i) { | |||
13603 | int M = HalfMask[i]; | |||
13604 | if (M >= NumElements) { | |||
13605 | if (M >= NumElements + SplitNumElements) | |||
13606 | UseHiV2 = true; | |||
13607 | else | |||
13608 | UseLoV2 = true; | |||
13609 | V2BlendMask[i] = M - NumElements; | |||
13610 | BlendMask[i] = SplitNumElements + i; | |||
13611 | } else if (M >= 0) { | |||
13612 | if (M >= SplitNumElements) | |||
13613 | UseHiV1 = true; | |||
13614 | else | |||
13615 | UseLoV1 = true; | |||
13616 | V1BlendMask[i] = M; | |||
13617 | BlendMask[i] = i; | |||
13618 | } | |||
13619 | } | |||
13620 | ||||
13621 | // Because the lowering happens after all combining takes place, we need to | |||
13622 | // manually combine these blend masks as much as possible so that we create | |||
13623 | // a minimal number of high-level vector shuffle nodes. | |||
13624 | ||||
13625 | // First try just blending the halves of V1 or V2. | |||
13626 | if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2) | |||
13627 | return DAG.getUNDEF(SplitVT); | |||
13628 | if (!UseLoV2 && !UseHiV2) | |||
13629 | return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask); | |||
13630 | if (!UseLoV1 && !UseHiV1) | |||
13631 | return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask); | |||
13632 | ||||
13633 | SDValue V1Blend, V2Blend; | |||
13634 | if (UseLoV1 && UseHiV1) { | |||
13635 | V1Blend = | |||
13636 | DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask); | |||
13637 | } else { | |||
13638 | // We only use half of V1 so map the usage down into the final blend mask. | |||
13639 | V1Blend = UseLoV1 ? LoV1 : HiV1; | |||
13640 | for (int i = 0; i < SplitNumElements; ++i) | |||
13641 | if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements) | |||
13642 | BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements); | |||
13643 | } | |||
13644 | if (UseLoV2 && UseHiV2) { | |||
13645 | V2Blend = | |||
13646 | DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask); | |||
13647 | } else { | |||
13648 | // We only use half of V2 so map the usage down into the final blend mask. | |||
13649 | V2Blend = UseLoV2 ? LoV2 : HiV2; | |||
13650 | for (int i = 0; i < SplitNumElements; ++i) | |||
13651 | if (BlendMask[i] >= SplitNumElements) | |||
13652 | BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0); | |||
13653 | } | |||
13654 | return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask); | |||
13655 | }; | |||
13656 | SDValue Lo = HalfBlend(LoMask); | |||
13657 | SDValue Hi = HalfBlend(HiMask); | |||
13658 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); | |||
13659 | } | |||
13660 | ||||
13661 | /// Either split a vector in halves or decompose the shuffles and the | |||
13662 | /// blend. | |||
13663 | /// | |||
13664 | /// This is provided as a good fallback for many lowerings of non-single-input | |||
13665 | /// shuffles with more than one 128-bit lane. In those cases, we want to select | |||
13666 | /// between splitting the shuffle into 128-bit components and stitching those | |||
13667 | /// back together vs. extracting the single-input shuffles and blending those | |||
13668 | /// results. | |||
13669 | static SDValue lowerVectorShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, | |||
13670 | SDValue V1, SDValue V2, | |||
13671 | ArrayRef<int> Mask, | |||
13672 | const X86Subtarget &Subtarget, | |||
13673 | SelectionDAG &DAG) { | |||
13674 | assert(!V2.isUndef() && "This routine must not be used to lower single-input "((!V2.isUndef() && "This routine must not be used to lower single-input " "shuffles as it could then recurse on itself.") ? static_cast <void> (0) : __assert_fail ("!V2.isUndef() && \"This routine must not be used to lower single-input \" \"shuffles as it could then recurse on itself.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13675, __PRETTY_FUNCTION__)) | |||
13675 | "shuffles as it could then recurse on itself.")((!V2.isUndef() && "This routine must not be used to lower single-input " "shuffles as it could then recurse on itself.") ? static_cast <void> (0) : __assert_fail ("!V2.isUndef() && \"This routine must not be used to lower single-input \" \"shuffles as it could then recurse on itself.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13675, __PRETTY_FUNCTION__)); | |||
13676 | int Size = Mask.size(); | |||
13677 | ||||
13678 | // If this can be modeled as a broadcast of two elements followed by a blend, | |||
13679 | // prefer that lowering. This is especially important because broadcasts can | |||
13680 | // often fold with memory operands. | |||
13681 | auto DoBothBroadcast = [&] { | |||
13682 | int V1BroadcastIdx = -1, V2BroadcastIdx = -1; | |||
13683 | for (int M : Mask) | |||
13684 | if (M >= Size) { | |||
13685 | if (V2BroadcastIdx < 0) | |||
13686 | V2BroadcastIdx = M - Size; | |||
13687 | else if (M - Size != V2BroadcastIdx) | |||
13688 | return false; | |||
13689 | } else if (M >= 0) { | |||
13690 | if (V1BroadcastIdx < 0) | |||
13691 | V1BroadcastIdx = M; | |||
13692 | else if (M != V1BroadcastIdx) | |||
13693 | return false; | |||
13694 | } | |||
13695 | return true; | |||
13696 | }; | |||
13697 | if (DoBothBroadcast()) | |||
13698 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, | |||
13699 | Subtarget, DAG); | |||
13700 | ||||
13701 | // If the inputs all stem from a single 128-bit lane of each input, then we | |||
13702 | // split them rather than blending because the split will decompose to | |||
13703 | // unusually few instructions. | |||
13704 | int LaneCount = VT.getSizeInBits() / 128; | |||
13705 | int LaneSize = Size / LaneCount; | |||
13706 | SmallBitVector LaneInputs[2]; | |||
13707 | LaneInputs[0].resize(LaneCount, false); | |||
13708 | LaneInputs[1].resize(LaneCount, false); | |||
13709 | for (int i = 0; i < Size; ++i) | |||
13710 | if (Mask[i] >= 0) | |||
13711 | LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true; | |||
13712 | if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1) | |||
13713 | return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG); | |||
13714 | ||||
13715 | // Otherwise, just fall back to decomposed shuffles and a blend. This requires | |||
13716 | // that the decomposed single-input shuffles don't end up here. | |||
13717 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, | |||
13718 | Subtarget, DAG); | |||
13719 | } | |||
13720 | ||||
13721 | /// Lower a vector shuffle crossing multiple 128-bit lanes as | |||
13722 | /// a lane permutation followed by a per-lane permutation. | |||
13723 | /// | |||
13724 | /// This is mainly for cases where we can have non-repeating permutes | |||
13725 | /// in each lane. | |||
13726 | /// | |||
13727 | /// TODO: This is very similar to lowerVectorShuffleByMerging128BitLanes, | |||
13728 | /// we should investigate merging them. | |||
13729 | static SDValue lowerVectorShuffleAsLanePermuteAndPermute( | |||
13730 | const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, | |||
13731 | SelectionDAG &DAG, const X86Subtarget &Subtarget) { | |||
13732 | int NumElts = VT.getVectorNumElements(); | |||
13733 | int NumLanes = VT.getSizeInBits() / 128; | |||
13734 | int NumEltsPerLane = NumElts / NumLanes; | |||
13735 | ||||
13736 | SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef); | |||
13737 | SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef); | |||
13738 | SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef); | |||
13739 | ||||
13740 | for (int i = 0; i != NumElts; ++i) { | |||
13741 | int M = Mask[i]; | |||
13742 | if (M < 0) | |||
13743 | continue; | |||
13744 | ||||
13745 | // Ensure that each lane comes from a single source lane. | |||
13746 | int SrcLane = M / NumEltsPerLane; | |||
13747 | int DstLane = i / NumEltsPerLane; | |||
13748 | if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane)) | |||
13749 | return SDValue(); | |||
13750 | SrcLaneMask[DstLane] = SrcLane; | |||
13751 | ||||
13752 | LaneMask[i] = (SrcLane * NumEltsPerLane) + (i % NumEltsPerLane); | |||
13753 | PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane); | |||
13754 | } | |||
13755 | ||||
13756 | // If we're only shuffling a single lowest lane and the rest are identity | |||
13757 | // then don't bother. | |||
13758 | // TODO - isShuffleMaskInputInPlace could be extended to something like this. | |||
13759 | int NumIdentityLanes = 0; | |||
13760 | bool OnlyShuffleLowestLane = true; | |||
13761 | for (int i = 0; i != NumLanes; ++i) { | |||
13762 | if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane, | |||
13763 | i * NumEltsPerLane)) | |||
13764 | NumIdentityLanes++; | |||
13765 | else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes) | |||
13766 | OnlyShuffleLowestLane = false; | |||
13767 | } | |||
13768 | if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1)) | |||
13769 | return SDValue(); | |||
13770 | ||||
13771 | SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask); | |||
13772 | return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask); | |||
13773 | } | |||
13774 | ||||
13775 | /// Lower a vector shuffle crossing multiple 128-bit lanes as | |||
13776 | /// a permutation and blend of those lanes. | |||
13777 | /// | |||
13778 | /// This essentially blends the out-of-lane inputs to each lane into the lane | |||
13779 | /// from a permuted copy of the vector. This lowering strategy results in four | |||
13780 | /// instructions in the worst case for a single-input cross lane shuffle which | |||
13781 | /// is lower than any other fully general cross-lane shuffle strategy I'm aware | |||
13782 | /// of. Special cases for each particular shuffle pattern should be handled | |||
13783 | /// prior to trying this lowering. | |||
13784 | static SDValue lowerVectorShuffleAsLanePermuteAndBlend(const SDLoc &DL, MVT VT, | |||
13785 | SDValue V1, SDValue V2, | |||
13786 | ArrayRef<int> Mask, | |||
13787 | SelectionDAG &DAG, | |||
13788 | const X86Subtarget &Subtarget) { | |||
13789 | // FIXME: This should probably be generalized for 512-bit vectors as well. | |||
13790 | assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!")((VT.is256BitVector() && "Only for 256-bit vector shuffles!" ) ? static_cast<void> (0) : __assert_fail ("VT.is256BitVector() && \"Only for 256-bit vector shuffles!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13790, __PRETTY_FUNCTION__)); | |||
13791 | int Size = Mask.size(); | |||
13792 | int LaneSize = Size / 2; | |||
13793 | ||||
13794 | // If there are only inputs from one 128-bit lane, splitting will in fact be | |||
13795 | // less expensive. The flags track whether the given lane contains an element | |||
13796 | // that crosses to another lane. | |||
13797 | if (!Subtarget.hasAVX2()) { | |||
13798 | bool LaneCrossing[2] = {false, false}; | |||
13799 | for (int i = 0; i < Size; ++i) | |||
13800 | if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize) | |||
13801 | LaneCrossing[(Mask[i] % Size) / LaneSize] = true; | |||
13802 | if (!LaneCrossing[0] || !LaneCrossing[1]) | |||
13803 | return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG); | |||
13804 | } else { | |||
13805 | bool LaneUsed[2] = {false, false}; | |||
13806 | for (int i = 0; i < Size; ++i) | |||
13807 | if (Mask[i] >= 0) | |||
13808 | LaneUsed[(Mask[i] / LaneSize)] = true; | |||
13809 | if (!LaneUsed[0] || !LaneUsed[1]) | |||
13810 | return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG); | |||
13811 | } | |||
13812 | ||||
13813 | assert(V2.isUndef() &&((V2.isUndef() && "This last part of this routine only works on single input shuffles" ) ? static_cast<void> (0) : __assert_fail ("V2.isUndef() && \"This last part of this routine only works on single input shuffles\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13814, __PRETTY_FUNCTION__)) | |||
13814 | "This last part of this routine only works on single input shuffles")((V2.isUndef() && "This last part of this routine only works on single input shuffles" ) ? static_cast<void> (0) : __assert_fail ("V2.isUndef() && \"This last part of this routine only works on single input shuffles\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13814, __PRETTY_FUNCTION__)); | |||
13815 | ||||
13816 | SmallVector<int, 32> FlippedBlendMask(Size); | |||
13817 | for (int i = 0; i < Size; ++i) | |||
13818 | FlippedBlendMask[i] = | |||
13819 | Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize) | |||
13820 | ? Mask[i] | |||
13821 | : Mask[i] % LaneSize + | |||
13822 | (i / LaneSize) * LaneSize + Size); | |||
13823 | ||||
13824 | // Flip the vector, and blend the results which should now be in-lane. | |||
13825 | MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64; | |||
13826 | SDValue Flipped = DAG.getBitcast(PVT, V1); | |||
13827 | Flipped = DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), | |||
13828 | { 2, 3, 0, 1 }); | |||
13829 | Flipped = DAG.getBitcast(VT, Flipped); | |||
13830 | return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask); | |||
13831 | } | |||
13832 | ||||
13833 | /// Handle lowering 2-lane 128-bit shuffles. | |||
13834 | static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1, | |||
13835 | SDValue V2, ArrayRef<int> Mask, | |||
13836 | const APInt &Zeroable, | |||
13837 | const X86Subtarget &Subtarget, | |||
13838 | SelectionDAG &DAG) { | |||
13839 | // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding. | |||
13840 | if (Subtarget.hasAVX2() && V2.isUndef()) | |||
13841 | return SDValue(); | |||
13842 | ||||
13843 | SmallVector<int, 4> WidenedMask; | |||
13844 | if (!canWidenShuffleElements(Mask, Zeroable, WidenedMask)) | |||
13845 | return SDValue(); | |||
13846 | ||||
13847 | bool IsLowZero = (Zeroable & 0x3) == 0x3; | |||
13848 | bool IsHighZero = (Zeroable & 0xc) == 0xc; | |||
13849 | ||||
13850 | // Try to use an insert into a zero vector. | |||
13851 | if (WidenedMask[0] == 0 && IsHighZero) { | |||
13852 | MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2); | |||
13853 | SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1, | |||
13854 | DAG.getIntPtrConstant(0, DL)); | |||
13855 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, | |||
13856 | getZeroVector(VT, Subtarget, DAG, DL), LoV, | |||
13857 | DAG.getIntPtrConstant(0, DL)); | |||
13858 | } | |||
13859 | ||||
13860 | // TODO: If minimizing size and one of the inputs is a zero vector and the | |||
13861 | // the zero vector has only one use, we could use a VPERM2X128 to save the | |||
13862 | // instruction bytes needed to explicitly generate the zero vector. | |||
13863 | ||||
13864 | // Blends are faster and handle all the non-lane-crossing cases. | |||
13865 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask, | |||
13866 | Zeroable, Subtarget, DAG)) | |||
13867 | return Blend; | |||
13868 | ||||
13869 | // If either input operand is a zero vector, use VPERM2X128 because its mask | |||
13870 | // allows us to replace the zero input with an implicit zero. | |||
13871 | if (!IsLowZero && !IsHighZero) { | |||
13872 | // Check for patterns which can be matched with a single insert of a 128-bit | |||
13873 | // subvector. | |||
13874 | bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}); | |||
13875 | if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) { | |||
13876 | ||||
13877 | // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise, | |||
13878 | // this will likely become vinsertf128 which can't fold a 256-bit memop. | |||
13879 | if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) { | |||
13880 | MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2); | |||
13881 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, | |||
13882 | OnlyUsesV1 ? V1 : V2, | |||
13883 | DAG.getIntPtrConstant(0, DL)); | |||
13884 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec, | |||
13885 | DAG.getIntPtrConstant(2, DL)); | |||
13886 | } | |||
13887 | } | |||
13888 | ||||
13889 | // Try to use SHUF128 if possible. | |||
13890 | if (Subtarget.hasVLX()) { | |||
13891 | if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) { | |||
13892 | unsigned PermMask = ((WidenedMask[0] % 2) << 0) | | |||
13893 | ((WidenedMask[1] % 2) << 1); | |||
13894 | return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2, | |||
13895 | DAG.getConstant(PermMask, DL, MVT::i8)); | |||
13896 | } | |||
13897 | } | |||
13898 | } | |||
13899 | ||||
13900 | // Otherwise form a 128-bit permutation. After accounting for undefs, | |||
13901 | // convert the 64-bit shuffle mask selection values into 128-bit | |||
13902 | // selection bits by dividing the indexes by 2 and shifting into positions | |||
13903 | // defined by a vperm2*128 instruction's immediate control byte. | |||
13904 | ||||
13905 | // The immediate permute control byte looks like this: | |||
13906 | // [1:0] - select 128 bits from sources for low half of destination | |||
13907 | // [2] - ignore | |||
13908 | // [3] - zero low half of destination | |||
13909 | // [5:4] - select 128 bits from sources for high half of destination | |||
13910 | // [6] - ignore | |||
13911 | // [7] - zero high half of destination | |||
13912 | ||||
13913 | assert((WidenedMask[0] >= 0 || IsLowZero) &&(((WidenedMask[0] >= 0 || IsLowZero) && (WidenedMask [1] >= 0 || IsHighZero) && "Undef half?") ? static_cast <void> (0) : __assert_fail ("(WidenedMask[0] >= 0 || IsLowZero) && (WidenedMask[1] >= 0 || IsHighZero) && \"Undef half?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13914, __PRETTY_FUNCTION__)) | |||
13914 | (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?")(((WidenedMask[0] >= 0 || IsLowZero) && (WidenedMask [1] >= 0 || IsHighZero) && "Undef half?") ? static_cast <void> (0) : __assert_fail ("(WidenedMask[0] >= 0 || IsLowZero) && (WidenedMask[1] >= 0 || IsHighZero) && \"Undef half?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13914, __PRETTY_FUNCTION__)); | |||
13915 | ||||
13916 | unsigned PermMask = 0; | |||
13917 | PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0); | |||
13918 | PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4); | |||
13919 | ||||
13920 | // Check the immediate mask and replace unused sources with undef. | |||
13921 | if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00) | |||
13922 | V1 = DAG.getUNDEF(VT); | |||
13923 | if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20) | |||
13924 | V2 = DAG.getUNDEF(VT); | |||
13925 | ||||
13926 | return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2, | |||
13927 | DAG.getConstant(PermMask, DL, MVT::i8)); | |||
13928 | } | |||
13929 | ||||
13930 | /// Lower a vector shuffle by first fixing the 128-bit lanes and then | |||
13931 | /// shuffling each lane. | |||
13932 | /// | |||
13933 | /// This attempts to create a repeated lane shuffle where each lane uses one | |||
13934 | /// or two of the lanes of the inputs. The lanes of the input vectors are | |||
13935 | /// shuffled in one or two independent shuffles to get the lanes into the | |||
13936 | /// position needed by the final shuffle. | |||
13937 | /// | |||
13938 | /// FIXME: This should be generalized to 512-bit shuffles. | |||
13939 | static SDValue lowerVectorShuffleByMerging128BitLanes( | |||
13940 | const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, | |||
13941 | const X86Subtarget &Subtarget, SelectionDAG &DAG) { | |||
13942 | assert(!V2.isUndef() && "This is only useful with multiple inputs.")((!V2.isUndef() && "This is only useful with multiple inputs." ) ? static_cast<void> (0) : __assert_fail ("!V2.isUndef() && \"This is only useful with multiple inputs.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13942, __PRETTY_FUNCTION__)); | |||
13943 | ||||
13944 | if (is128BitLaneRepeatedShuffleMask(VT, Mask)) | |||
13945 | return SDValue(); | |||
13946 | ||||
13947 | int Size = Mask.size(); | |||
13948 | int LaneSize = 128 / VT.getScalarSizeInBits(); | |||
13949 | int NumLanes = Size / LaneSize; | |||
13950 | assert(NumLanes == 2 && "Only handles 256-bit shuffles.")((NumLanes == 2 && "Only handles 256-bit shuffles.") ? static_cast<void> (0) : __assert_fail ("NumLanes == 2 && \"Only handles 256-bit shuffles.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13950, __PRETTY_FUNCTION__)); | |||
13951 | ||||
13952 | SmallVector<int, 16> RepeatMask(LaneSize, -1); | |||
13953 | int LaneSrcs[2][2] = { { -1, -1 }, { -1 , -1 } }; | |||
13954 | ||||
13955 | // First pass will try to fill in the RepeatMask from lanes that need two | |||
13956 | // sources. | |||
13957 | for (int Lane = 0; Lane != NumLanes; ++Lane) { | |||
13958 | int Srcs[2] = { -1, -1 }; | |||
13959 | SmallVector<int, 16> InLaneMask(LaneSize, -1); | |||
13960 | for (int i = 0; i != LaneSize; ++i) { | |||
13961 | int M = Mask[(Lane * LaneSize) + i]; | |||
13962 | if (M < 0) | |||
13963 | continue; | |||
13964 | // Determine which of the 4 possible input lanes (2 from each source) | |||
13965 | // this element comes from. Assign that as one of the sources for this | |||
13966 | // lane. We can assign up to 2 sources for this lane. If we run out | |||
13967 | // sources we can't do anything. | |||
13968 | int LaneSrc = M / LaneSize; | |||
13969 | int Src; | |||
13970 | if (Srcs[0] < 0 || Srcs[0] == LaneSrc) | |||
13971 | Src = 0; | |||
13972 | else if (Srcs[1] < 0 || Srcs[1] == LaneSrc) | |||
13973 | Src = 1; | |||
13974 | else | |||
13975 | return SDValue(); | |||
13976 | ||||
13977 | Srcs[Src] = LaneSrc; | |||
13978 | InLaneMask[i] = (M % LaneSize) + Src * Size; | |||
13979 | } | |||
13980 | ||||
13981 | // If this lane has two sources, see if it fits with the repeat mask so far. | |||
13982 | if (Srcs[1] < 0) | |||
13983 | continue; | |||
13984 | ||||
13985 | LaneSrcs[Lane][0] = Srcs[0]; | |||
13986 | LaneSrcs[Lane][1] = Srcs[1]; | |||
13987 | ||||
13988 | auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) { | |||
13989 | assert(M1.size() == M2.size() && "Unexpected mask size")((M1.size() == M2.size() && "Unexpected mask size") ? static_cast<void> (0) : __assert_fail ("M1.size() == M2.size() && \"Unexpected mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13989, __PRETTY_FUNCTION__)); | |||
13990 | for (int i = 0, e = M1.size(); i != e; ++i) | |||
13991 | if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i]) | |||
13992 | return false; | |||
13993 | return true; | |||
13994 | }; | |||
13995 | ||||
13996 | auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) { | |||
13997 | assert(Mask.size() == MergedMask.size() && "Unexpected mask size")((Mask.size() == MergedMask.size() && "Unexpected mask size" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == MergedMask.size() && \"Unexpected mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 13997, __PRETTY_FUNCTION__)); | |||
13998 | for (int i = 0, e = MergedMask.size(); i != e; ++i) { | |||
13999 | int M = Mask[i]; | |||
14000 | if (M < 0) | |||
14001 | continue; | |||
14002 | assert((MergedMask[i] < 0 || MergedMask[i] == M) &&(((MergedMask[i] < 0 || MergedMask[i] == M) && "Unexpected mask element" ) ? static_cast<void> (0) : __assert_fail ("(MergedMask[i] < 0 || MergedMask[i] == M) && \"Unexpected mask element\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14003, __PRETTY_FUNCTION__)) | |||
14003 | "Unexpected mask element")(((MergedMask[i] < 0 || MergedMask[i] == M) && "Unexpected mask element" ) ? static_cast<void> (0) : __assert_fail ("(MergedMask[i] < 0 || MergedMask[i] == M) && \"Unexpected mask element\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14003, __PRETTY_FUNCTION__)); | |||
14004 | MergedMask[i] = M; | |||
14005 | } | |||
14006 | }; | |||
14007 | ||||
14008 | if (MatchMasks(InLaneMask, RepeatMask)) { | |||
14009 | // Merge this lane mask into the final repeat mask. | |||
14010 | MergeMasks(InLaneMask, RepeatMask); | |||
14011 | continue; | |||
14012 | } | |||
14013 | ||||
14014 | // Didn't find a match. Swap the operands and try again. | |||
14015 | std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]); | |||
14016 | ShuffleVectorSDNode::commuteMask(InLaneMask); | |||
14017 | ||||
14018 | if (MatchMasks(InLaneMask, RepeatMask)) { | |||
14019 | // Merge this lane mask into the final repeat mask. | |||
14020 | MergeMasks(InLaneMask, RepeatMask); | |||
14021 | continue; | |||
14022 | } | |||
14023 | ||||
14024 | // Couldn't find a match with the operands in either order. | |||
14025 | return SDValue(); | |||
14026 | } | |||
14027 | ||||
14028 | // Now handle any lanes with only one source. | |||
14029 | for (int Lane = 0; Lane != NumLanes; ++Lane) { | |||
14030 | // If this lane has already been processed, skip it. | |||
14031 | if (LaneSrcs[Lane][0] >= 0) | |||
14032 | continue; | |||
14033 | ||||
14034 | for (int i = 0; i != LaneSize; ++i) { | |||
14035 | int M = Mask[(Lane * LaneSize) + i]; | |||
14036 | if (M < 0) | |||
14037 | continue; | |||
14038 | ||||
14039 | // If RepeatMask isn't defined yet we can define it ourself. | |||
14040 | if (RepeatMask[i] < 0) | |||
14041 | RepeatMask[i] = M % LaneSize; | |||
14042 | ||||
14043 | if (RepeatMask[i] < Size) { | |||
14044 | if (RepeatMask[i] != M % LaneSize) | |||
14045 | return SDValue(); | |||
14046 | LaneSrcs[Lane][0] = M / LaneSize; | |||
14047 | } else { | |||
14048 | if (RepeatMask[i] != ((M % LaneSize) + Size)) | |||
14049 | return SDValue(); | |||
14050 | LaneSrcs[Lane][1] = M / LaneSize; | |||
14051 | } | |||
14052 | } | |||
14053 | ||||
14054 | if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0) | |||
14055 | return SDValue(); | |||
14056 | } | |||
14057 | ||||
14058 | SmallVector<int, 16> NewMask(Size, -1); | |||
14059 | for (int Lane = 0; Lane != NumLanes; ++Lane) { | |||
14060 | int Src = LaneSrcs[Lane][0]; | |||
14061 | for (int i = 0; i != LaneSize; ++i) { | |||
14062 | int M = -1; | |||
14063 | if (Src >= 0) | |||
14064 | M = Src * LaneSize + i; | |||
14065 | NewMask[Lane * LaneSize + i] = M; | |||
14066 | } | |||
14067 | } | |||
14068 | SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask); | |||
14069 | // Ensure we didn't get back the shuffle we started with. | |||
14070 | // FIXME: This is a hack to make up for some splat handling code in | |||
14071 | // getVectorShuffle. | |||
14072 | if (isa<ShuffleVectorSDNode>(NewV1) && | |||
14073 | cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask) | |||
14074 | return SDValue(); | |||
14075 | ||||
14076 | for (int Lane = 0; Lane != NumLanes; ++Lane) { | |||
14077 | int Src = LaneSrcs[Lane][1]; | |||
14078 | for (int i = 0; i != LaneSize; ++i) { | |||
14079 | int M = -1; | |||
14080 | if (Src >= 0) | |||
14081 | M = Src * LaneSize + i; | |||
14082 | NewMask[Lane * LaneSize + i] = M; | |||
14083 | } | |||
14084 | } | |||
14085 | SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask); | |||
14086 | // Ensure we didn't get back the shuffle we started with. | |||
14087 | // FIXME: This is a hack to make up for some splat handling code in | |||
14088 | // getVectorShuffle. | |||
14089 | if (isa<ShuffleVectorSDNode>(NewV2) && | |||
14090 | cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask) | |||
14091 | return SDValue(); | |||
14092 | ||||
14093 | for (int i = 0; i != Size; ++i) { | |||
14094 | NewMask[i] = RepeatMask[i % LaneSize]; | |||
14095 | if (NewMask[i] < 0) | |||
14096 | continue; | |||
14097 | ||||
14098 | NewMask[i] += (i / LaneSize) * LaneSize; | |||
14099 | } | |||
14100 | return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask); | |||
14101 | } | |||
14102 | ||||
14103 | /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF. | |||
14104 | /// This allows for fast cases such as subvector extraction/insertion | |||
14105 | /// or shuffling smaller vector types which can lower more efficiently. | |||
14106 | static SDValue lowerVectorShuffleWithUndefHalf(const SDLoc &DL, MVT VT, | |||
14107 | SDValue V1, SDValue V2, | |||
14108 | ArrayRef<int> Mask, | |||
14109 | const X86Subtarget &Subtarget, | |||
14110 | SelectionDAG &DAG) { | |||
14111 | assert((VT.is256BitVector() || VT.is512BitVector()) &&(((VT.is256BitVector() || VT.is512BitVector()) && "Expected 256-bit or 512-bit vector" ) ? static_cast<void> (0) : __assert_fail ("(VT.is256BitVector() || VT.is512BitVector()) && \"Expected 256-bit or 512-bit vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14112, __PRETTY_FUNCTION__)) | |||
14112 | "Expected 256-bit or 512-bit vector")(((VT.is256BitVector() || VT.is512BitVector()) && "Expected 256-bit or 512-bit vector" ) ? static_cast<void> (0) : __assert_fail ("(VT.is256BitVector() || VT.is512BitVector()) && \"Expected 256-bit or 512-bit vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14112, __PRETTY_FUNCTION__)); | |||
14113 | ||||
14114 | unsigned NumElts = VT.getVectorNumElements(); | |||
14115 | unsigned HalfNumElts = NumElts / 2; | |||
14116 | MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), HalfNumElts); | |||
14117 | ||||
14118 | bool UndefLower = isUndefInRange(Mask, 0, HalfNumElts); | |||
14119 | bool UndefUpper = isUndefInRange(Mask, HalfNumElts, HalfNumElts); | |||
14120 | if (!UndefLower && !UndefUpper) | |||
14121 | return SDValue(); | |||
14122 | ||||
14123 | // Upper half is undef and lower half is whole upper subvector. | |||
14124 | // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u> | |||
14125 | if (UndefUpper && | |||
14126 | isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) { | |||
14127 | SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1, | |||
14128 | DAG.getIntPtrConstant(HalfNumElts, DL)); | |||
14129 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi, | |||
14130 | DAG.getIntPtrConstant(0, DL)); | |||
14131 | } | |||
14132 | ||||
14133 | // Lower half is undef and upper half is whole lower subvector. | |||
14134 | // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1> | |||
14135 | if (UndefLower && | |||
14136 | isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) { | |||
14137 | SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1, | |||
14138 | DAG.getIntPtrConstant(0, DL)); | |||
14139 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi, | |||
14140 | DAG.getIntPtrConstant(HalfNumElts, DL)); | |||
14141 | } | |||
14142 | ||||
14143 | // If the shuffle only uses two of the four halves of the input operands, | |||
14144 | // then extract them and perform the 'half' shuffle at half width. | |||
14145 | // e.g. vector_shuffle <X, X, X, X, u, u, u, u> or <X, X, u, u> | |||
14146 | int HalfIdx1 = -1, HalfIdx2 = -1; | |||
14147 | SmallVector<int, 8> HalfMask(HalfNumElts); | |||
14148 | unsigned Offset = UndefLower ? HalfNumElts : 0; | |||
14149 | for (unsigned i = 0; i != HalfNumElts; ++i) { | |||
14150 | int M = Mask[i + Offset]; | |||
14151 | if (M < 0) { | |||
14152 | HalfMask[i] = M; | |||
14153 | continue; | |||
14154 | } | |||
14155 | ||||
14156 | // Determine which of the 4 half vectors this element is from. | |||
14157 | // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2. | |||
14158 | int HalfIdx = M / HalfNumElts; | |||
14159 | ||||
14160 | // Determine the element index into its half vector source. | |||
14161 | int HalfElt = M % HalfNumElts; | |||
14162 | ||||
14163 | // We can shuffle with up to 2 half vectors, set the new 'half' | |||
14164 | // shuffle mask accordingly. | |||
14165 | if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) { | |||
14166 | HalfMask[i] = HalfElt; | |||
14167 | HalfIdx1 = HalfIdx; | |||
14168 | continue; | |||
14169 | } | |||
14170 | if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) { | |||
14171 | HalfMask[i] = HalfElt + HalfNumElts; | |||
14172 | HalfIdx2 = HalfIdx; | |||
14173 | continue; | |||
14174 | } | |||
14175 | ||||
14176 | // Too many half vectors referenced. | |||
14177 | return SDValue(); | |||
14178 | } | |||
14179 | assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length")((HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length" ) ? static_cast<void> (0) : __assert_fail ("HalfMask.size() == HalfNumElts && \"Unexpected shuffle mask length\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14179, __PRETTY_FUNCTION__)); | |||
14180 | ||||
14181 | // Only shuffle the halves of the inputs when useful. | |||
14182 | int NumLowerHalves = | |||
14183 | (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2); | |||
14184 | int NumUpperHalves = | |||
14185 | (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3); | |||
14186 | ||||
14187 | // uuuuXXXX - don't extract uppers just to insert again. | |||
14188 | if (UndefLower && NumUpperHalves != 0) | |||
14189 | return SDValue(); | |||
14190 | ||||
14191 | // XXXXuuuu - don't extract both uppers, instead shuffle and then extract. | |||
14192 | if (UndefUpper && NumUpperHalves == 2) | |||
14193 | return SDValue(); | |||
14194 | ||||
14195 | // AVX2 - XXXXuuuu - always extract lowers. | |||
14196 | if (Subtarget.hasAVX2() && !(UndefUpper && NumUpperHalves == 0)) { | |||
14197 | // AVX2 supports efficient immediate 64-bit element cross-lane shuffles. | |||
14198 | if (VT == MVT::v4f64 || VT == MVT::v4i64) | |||
14199 | return SDValue(); | |||
14200 | // AVX2 supports variable 32-bit element cross-lane shuffles. | |||
14201 | if (VT == MVT::v8f32 || VT == MVT::v8i32) { | |||
14202 | // XXXXuuuu - don't extract lowers and uppers. | |||
14203 | if (UndefUpper && NumLowerHalves != 0 && NumUpperHalves != 0) | |||
14204 | return SDValue(); | |||
14205 | } | |||
14206 | } | |||
14207 | ||||
14208 | // AVX512 - XXXXuuuu - always extract lowers. | |||
14209 | if (VT.is512BitVector() && !(UndefUpper && NumUpperHalves == 0)) | |||
14210 | return SDValue(); | |||
14211 | ||||
14212 | auto GetHalfVector = [&](int HalfIdx) { | |||
14213 | if (HalfIdx < 0) | |||
14214 | return DAG.getUNDEF(HalfVT); | |||
14215 | SDValue V = (HalfIdx < 2 ? V1 : V2); | |||
14216 | HalfIdx = (HalfIdx % 2) * HalfNumElts; | |||
14217 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V, | |||
14218 | DAG.getIntPtrConstant(HalfIdx, DL)); | |||
14219 | }; | |||
14220 | ||||
14221 | SDValue Half1 = GetHalfVector(HalfIdx1); | |||
14222 | SDValue Half2 = GetHalfVector(HalfIdx2); | |||
14223 | SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask); | |||
14224 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, | |||
14225 | DAG.getIntPtrConstant(Offset, DL)); | |||
14226 | } | |||
14227 | ||||
14228 | /// Test whether the specified input (0 or 1) is in-place blended by the | |||
14229 | /// given mask. | |||
14230 | /// | |||
14231 | /// This returns true if the elements from a particular input are already in the | |||
14232 | /// slot required by the given mask and require no permutation. | |||
14233 | static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) { | |||
14234 | assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.")(((Input == 0 || Input == 1) && "Only two inputs to shuffles." ) ? static_cast<void> (0) : __assert_fail ("(Input == 0 || Input == 1) && \"Only two inputs to shuffles.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14234, __PRETTY_FUNCTION__)); | |||
14235 | int Size = Mask.size(); | |||
14236 | for (int i = 0; i < Size; ++i) | |||
14237 | if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i) | |||
14238 | return false; | |||
14239 | ||||
14240 | return true; | |||
14241 | } | |||
14242 | ||||
14243 | /// Handle case where shuffle sources are coming from the same 128-bit lane and | |||
14244 | /// every lane can be represented as the same repeating mask - allowing us to | |||
14245 | /// shuffle the sources with the repeating shuffle and then permute the result | |||
14246 | /// to the destination lanes. | |||
14247 | static SDValue lowerShuffleAsRepeatedMaskAndLanePermute( | |||
14248 | const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask, | |||
14249 | const X86Subtarget &Subtarget, SelectionDAG &DAG) { | |||
14250 | int NumElts = VT.getVectorNumElements(); | |||
14251 | int NumLanes = VT.getSizeInBits() / 128; | |||
14252 | int NumLaneElts = NumElts / NumLanes; | |||
14253 | ||||
14254 | // On AVX2 we may be able to just shuffle the lowest elements and then | |||
14255 | // broadcast the result. | |||
14256 | if (Subtarget.hasAVX2()) { | |||
14257 | for (unsigned BroadcastSize : {16, 32, 64}) { | |||
14258 | if (BroadcastSize <= VT.getScalarSizeInBits()) | |||
14259 | continue; | |||
14260 | int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits(); | |||
14261 | ||||
14262 | // Attempt to match a repeating pattern every NumBroadcastElts, | |||
14263 | // accounting for UNDEFs but only references the lowest 128-bit | |||
14264 | // lane of the inputs. | |||
14265 | auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) { | |||
14266 | for (int i = 0; i != NumElts; i += NumBroadcastElts) | |||
14267 | for (int j = 0; j != NumBroadcastElts; ++j) { | |||
14268 | int M = Mask[i + j]; | |||
14269 | if (M < 0) | |||
14270 | continue; | |||
14271 | int &R = RepeatMask[j]; | |||
14272 | if (0 != ((M % NumElts) / NumLaneElts)) | |||
14273 | return false; | |||
14274 | if (0 <= R && R != M) | |||
14275 | return false; | |||
14276 | R = M; | |||
14277 | } | |||
14278 | return true; | |||
14279 | }; | |||
14280 | ||||
14281 | SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1); | |||
14282 | if (!FindRepeatingBroadcastMask(RepeatMask)) | |||
14283 | continue; | |||
14284 | ||||
14285 | // Shuffle the (lowest) repeated elements in place for broadcast. | |||
14286 | SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask); | |||
14287 | ||||
14288 | // Shuffle the actual broadcast. | |||
14289 | SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1); | |||
14290 | for (int i = 0; i != NumElts; i += NumBroadcastElts) | |||
14291 | for (int j = 0; j != NumBroadcastElts; ++j) | |||
14292 | BroadcastMask[i + j] = j; | |||
14293 | return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT), | |||
14294 | BroadcastMask); | |||
14295 | } | |||
14296 | } | |||
14297 | ||||
14298 | // Bail if the shuffle mask doesn't cross 128-bit lanes. | |||
14299 | if (!is128BitLaneCrossingShuffleMask(VT, Mask)) | |||
14300 | return SDValue(); | |||
14301 | ||||
14302 | // Bail if we already have a repeated lane shuffle mask. | |||
14303 | SmallVector<int, 8> RepeatedShuffleMask; | |||
14304 | if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask)) | |||
14305 | return SDValue(); | |||
14306 | ||||
14307 | // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes | |||
14308 | // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes. | |||
14309 | int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1; | |||
14310 | int NumSubLanes = NumLanes * SubLaneScale; | |||
14311 | int NumSubLaneElts = NumLaneElts / SubLaneScale; | |||
14312 | ||||
14313 | // Check that all the sources are coming from the same lane and see if we can | |||
14314 | // form a repeating shuffle mask (local to each sub-lane). At the same time, | |||
14315 | // determine the source sub-lane for each destination sub-lane. | |||
14316 | int TopSrcSubLane = -1; | |||
14317 | SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1); | |||
14318 | SmallVector<int, 8> RepeatedSubLaneMasks[2] = { | |||
14319 | SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef), | |||
14320 | SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)}; | |||
14321 | ||||
14322 | for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) { | |||
14323 | // Extract the sub-lane mask, check that it all comes from the same lane | |||
14324 | // and normalize the mask entries to come from the first lane. | |||
14325 | int SrcLane = -1; | |||
14326 | SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1); | |||
14327 | for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) { | |||
14328 | int M = Mask[(DstSubLane * NumSubLaneElts) + Elt]; | |||
14329 | if (M < 0) | |||
14330 | continue; | |||
14331 | int Lane = (M % NumElts) / NumLaneElts; | |||
14332 | if ((0 <= SrcLane) && (SrcLane != Lane)) | |||
14333 | return SDValue(); | |||
14334 | SrcLane = Lane; | |||
14335 | int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts); | |||
14336 | SubLaneMask[Elt] = LocalM; | |||
14337 | } | |||
14338 | ||||
14339 | // Whole sub-lane is UNDEF. | |||
14340 | if (SrcLane < 0) | |||
14341 | continue; | |||
14342 | ||||
14343 | // Attempt to match against the candidate repeated sub-lane masks. | |||
14344 | for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) { | |||
14345 | auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) { | |||
14346 | for (int i = 0; i != NumSubLaneElts; ++i) { | |||
14347 | if (M1[i] < 0 || M2[i] < 0) | |||
14348 | continue; | |||
14349 | if (M1[i] != M2[i]) | |||
14350 | return false; | |||
14351 | } | |||
14352 | return true; | |||
14353 | }; | |||
14354 | ||||
14355 | auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane]; | |||
14356 | if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask)) | |||
14357 | continue; | |||
14358 | ||||
14359 | // Merge the sub-lane mask into the matching repeated sub-lane mask. | |||
14360 | for (int i = 0; i != NumSubLaneElts; ++i) { | |||
14361 | int M = SubLaneMask[i]; | |||
14362 | if (M < 0) | |||
14363 | continue; | |||
14364 | assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&(((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) && "Unexpected mask element") ? static_cast<void > (0) : __assert_fail ("(RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) && \"Unexpected mask element\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14365, __PRETTY_FUNCTION__)) | |||
14365 | "Unexpected mask element")(((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) && "Unexpected mask element") ? static_cast<void > (0) : __assert_fail ("(RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) && \"Unexpected mask element\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14365, __PRETTY_FUNCTION__)); | |||
14366 | RepeatedSubLaneMask[i] = M; | |||
14367 | } | |||
14368 | ||||
14369 | // Track the top most source sub-lane - by setting the remaining to UNDEF | |||
14370 | // we can greatly simplify shuffle matching. | |||
14371 | int SrcSubLane = (SrcLane * SubLaneScale) + SubLane; | |||
14372 | TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane); | |||
14373 | Dst2SrcSubLanes[DstSubLane] = SrcSubLane; | |||
14374 | break; | |||
14375 | } | |||
14376 | ||||
14377 | // Bail if we failed to find a matching repeated sub-lane mask. | |||
14378 | if (Dst2SrcSubLanes[DstSubLane] < 0) | |||
14379 | return SDValue(); | |||
14380 | } | |||
14381 | assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&((0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes && "Unexpected source lane") ? static_cast<void> (0) : __assert_fail ("0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes && \"Unexpected source lane\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14382, __PRETTY_FUNCTION__)) | |||
14382 | "Unexpected source lane")((0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes && "Unexpected source lane") ? static_cast<void> (0) : __assert_fail ("0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes && \"Unexpected source lane\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14382, __PRETTY_FUNCTION__)); | |||
14383 | ||||
14384 | // Create a repeating shuffle mask for the entire vector. | |||
14385 | SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1); | |||
14386 | for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) { | |||
14387 | int Lane = SubLane / SubLaneScale; | |||
14388 | auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale]; | |||
14389 | for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) { | |||
14390 | int M = RepeatedSubLaneMask[Elt]; | |||
14391 | if (M < 0) | |||
14392 | continue; | |||
14393 | int Idx = (SubLane * NumSubLaneElts) + Elt; | |||
14394 | RepeatedMask[Idx] = M + (Lane * NumLaneElts); | |||
14395 | } | |||
14396 | } | |||
14397 | SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask); | |||
14398 | ||||
14399 | // Shuffle each source sub-lane to its destination. | |||
14400 | SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1); | |||
14401 | for (int i = 0; i != NumElts; i += NumSubLaneElts) { | |||
14402 | int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts]; | |||
14403 | if (SrcSubLane < 0) | |||
14404 | continue; | |||
14405 | for (int j = 0; j != NumSubLaneElts; ++j) | |||
14406 | SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts); | |||
14407 | } | |||
14408 | ||||
14409 | return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT), | |||
14410 | SubLaneMask); | |||
14411 | } | |||
14412 | ||||
14413 | static bool matchVectorShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2, | |||
14414 | unsigned &ShuffleImm, | |||
14415 | ArrayRef<int> Mask) { | |||
14416 | int NumElts = VT.getVectorNumElements(); | |||
14417 | assert(VT.getScalarSizeInBits() == 64 &&((VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts == 4 || NumElts == 8) && "Unexpected data type for VSHUFPD" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts == 4 || NumElts == 8) && \"Unexpected data type for VSHUFPD\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14419, __PRETTY_FUNCTION__)) | |||
14418 | (NumElts == 2 || NumElts == 4 || NumElts == 8) &&((VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts == 4 || NumElts == 8) && "Unexpected data type for VSHUFPD" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts == 4 || NumElts == 8) && \"Unexpected data type for VSHUFPD\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14419, __PRETTY_FUNCTION__)) | |||
14419 | "Unexpected data type for VSHUFPD")((VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts == 4 || NumElts == 8) && "Unexpected data type for VSHUFPD" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && (NumElts == 2 || NumElts == 4 || NumElts == 8) && \"Unexpected data type for VSHUFPD\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14419, __PRETTY_FUNCTION__)); | |||
14420 | ||||
14421 | // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, .. | |||
14422 | // Mask for V4F64; 0/1, 4/5, 2/3, 6/7.. | |||
14423 | ShuffleImm = 0; | |||
14424 | bool ShufpdMask = true; | |||
14425 | bool CommutableMask = true; | |||
14426 | for (int i = 0; i < NumElts; ++i) { | |||
14427 | if (Mask[i] == SM_SentinelUndef) | |||
14428 | continue; | |||
14429 | if (Mask[i] < 0) | |||
14430 | return false; | |||
14431 | int Val = (i & 6) + NumElts * (i & 1); | |||
14432 | int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1); | |||
14433 | if (Mask[i] < Val || Mask[i] > Val + 1) | |||
14434 | ShufpdMask = false; | |||
14435 | if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1) | |||
14436 | CommutableMask = false; | |||
14437 | ShuffleImm |= (Mask[i] % 2) << i; | |||
14438 | } | |||
14439 | ||||
14440 | if (ShufpdMask) | |||
14441 | return true; | |||
14442 | if (CommutableMask) { | |||
14443 | std::swap(V1, V2); | |||
14444 | return true; | |||
14445 | } | |||
14446 | ||||
14447 | return false; | |||
14448 | } | |||
14449 | ||||
14450 | static SDValue lowerVectorShuffleWithSHUFPD(const SDLoc &DL, MVT VT, | |||
14451 | ArrayRef<int> Mask, SDValue V1, | |||
14452 | SDValue V2, SelectionDAG &DAG) { | |||
14453 | assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&&(((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&& "Unexpected data type for VSHUFPD") ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&& \"Unexpected data type for VSHUFPD\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14454, __PRETTY_FUNCTION__)) | |||
14454 | "Unexpected data type for VSHUFPD")(((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&& "Unexpected data type for VSHUFPD") ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64)&& \"Unexpected data type for VSHUFPD\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14454, __PRETTY_FUNCTION__)); | |||
14455 | ||||
14456 | unsigned Immediate = 0; | |||
14457 | if (!matchVectorShuffleWithSHUFPD(VT, V1, V2, Immediate, Mask)) | |||
14458 | return SDValue(); | |||
14459 | ||||
14460 | return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2, | |||
14461 | DAG.getConstant(Immediate, DL, MVT::i8)); | |||
14462 | } | |||
14463 | ||||
14464 | /// Handle lowering of 4-lane 64-bit floating point shuffles. | |||
14465 | /// | |||
14466 | /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2 | |||
14467 | /// isn't available. | |||
14468 | static SDValue lowerV4F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
14469 | const APInt &Zeroable, | |||
14470 | SDValue V1, SDValue V2, | |||
14471 | const X86Subtarget &Subtarget, | |||
14472 | SelectionDAG &DAG) { | |||
14473 | assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4f64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14473, __PRETTY_FUNCTION__)); | |||
14474 | assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4f64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14474, __PRETTY_FUNCTION__)); | |||
14475 | assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14475, __PRETTY_FUNCTION__)); | |||
14476 | ||||
14477 | if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, | |||
14478 | Zeroable, Subtarget, DAG)) | |||
14479 | return V; | |||
14480 | ||||
14481 | if (V2.isUndef()) { | |||
14482 | // Check for being able to broadcast a single element. | |||
14483 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast( | |||
14484 | DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG)) | |||
14485 | return Broadcast; | |||
14486 | ||||
14487 | // Use low duplicate instructions for masks that match their pattern. | |||
14488 | if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2})) | |||
14489 | return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1); | |||
14490 | ||||
14491 | if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) { | |||
14492 | // Non-half-crossing single input shuffles can be lowered with an | |||
14493 | // interleaved permutation. | |||
14494 | unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) | | |||
14495 | ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3); | |||
14496 | return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1, | |||
14497 | DAG.getConstant(VPERMILPMask, DL, MVT::i8)); | |||
14498 | } | |||
14499 | ||||
14500 | // With AVX2 we have direct support for this permutation. | |||
14501 | if (Subtarget.hasAVX2()) | |||
14502 | return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1, | |||
14503 | getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); | |||
14504 | ||||
14505 | // Try to create an in-lane repeating shuffle mask and then shuffle the | |||
14506 | // results into the target lanes. | |||
14507 | if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute( | |||
14508 | DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG)) | |||
14509 | return V; | |||
14510 | ||||
14511 | // Try to permute the lanes and then use a per-lane permute. | |||
14512 | if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute( | |||
14513 | DL, MVT::v4f64, V1, V2, Mask, DAG, Subtarget)) | |||
14514 | return V; | |||
14515 | ||||
14516 | // Otherwise, fall back. | |||
14517 | return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask, | |||
14518 | DAG, Subtarget); | |||
14519 | } | |||
14520 | ||||
14521 | // Use dedicated unpack instructions for masks that match their pattern. | |||
14522 | if (SDValue V = | |||
14523 | lowerVectorShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG)) | |||
14524 | return V; | |||
14525 | ||||
14526 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask, | |||
14527 | Zeroable, Subtarget, DAG)) | |||
14528 | return Blend; | |||
14529 | ||||
14530 | // Check if the blend happens to exactly fit that of SHUFPD. | |||
14531 | if (SDValue Op = | |||
14532 | lowerVectorShuffleWithSHUFPD(DL, MVT::v4f64, Mask, V1, V2, DAG)) | |||
14533 | return Op; | |||
14534 | ||||
14535 | // Try to create an in-lane repeating shuffle mask and then shuffle the | |||
14536 | // results into the target lanes. | |||
14537 | if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute( | |||
14538 | DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG)) | |||
14539 | return V; | |||
14540 | ||||
14541 | // Try to simplify this by merging 128-bit lanes to enable a lane-based | |||
14542 | // shuffle. However, if we have AVX2 and either inputs are already in place, | |||
14543 | // we will be able to shuffle even across lanes the other input in a single | |||
14544 | // instruction so skip this pattern. | |||
14545 | if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) || | |||
14546 | isShuffleMaskInputInPlace(1, Mask)))) | |||
14547 | if (SDValue Result = lowerVectorShuffleByMerging128BitLanes( | |||
14548 | DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG)) | |||
14549 | return Result; | |||
14550 | ||||
14551 | // If we have VLX support, we can use VEXPAND. | |||
14552 | if (Subtarget.hasVLX()) | |||
14553 | if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, | |||
14554 | V1, V2, DAG, Subtarget)) | |||
14555 | return V; | |||
14556 | ||||
14557 | // If we have AVX2 then we always want to lower with a blend because an v4 we | |||
14558 | // can fully permute the elements. | |||
14559 | if (Subtarget.hasAVX2()) | |||
14560 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, | |||
14561 | Mask, Subtarget, DAG); | |||
14562 | ||||
14563 | // Otherwise fall back on generic lowering. | |||
14564 | return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, | |||
14565 | Subtarget, DAG); | |||
14566 | } | |||
14567 | ||||
14568 | /// Handle lowering of 4-lane 64-bit integer shuffles. | |||
14569 | /// | |||
14570 | /// This routine is only called when we have AVX2 and thus a reasonable | |||
14571 | /// instruction set for v4i64 shuffling.. | |||
14572 | static SDValue lowerV4I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
14573 | const APInt &Zeroable, | |||
14574 | SDValue V1, SDValue V2, | |||
14575 | const X86Subtarget &Subtarget, | |||
14576 | SelectionDAG &DAG) { | |||
14577 | assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v4i64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14577, __PRETTY_FUNCTION__)); | |||
14578 | assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v4i64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14578, __PRETTY_FUNCTION__)); | |||
14579 | assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!")((Mask.size() == 4 && "Unexpected mask size for v4 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4 && \"Unexpected mask size for v4 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14579, __PRETTY_FUNCTION__)); | |||
14580 | assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!")((Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower v4i64 with AVX2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14580, __PRETTY_FUNCTION__)); | |||
14581 | ||||
14582 | if (SDValue V = lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, | |||
14583 | Zeroable, Subtarget, DAG)) | |||
14584 | return V; | |||
14585 | ||||
14586 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask, | |||
14587 | Zeroable, Subtarget, DAG)) | |||
14588 | return Blend; | |||
14589 | ||||
14590 | // Check for being able to broadcast a single element. | |||
14591 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, | |||
14592 | Mask, Subtarget, DAG)) | |||
14593 | return Broadcast; | |||
14594 | ||||
14595 | if (V2.isUndef()) { | |||
14596 | // When the shuffle is mirrored between the 128-bit lanes of the unit, we | |||
14597 | // can use lower latency instructions that will operate on both lanes. | |||
14598 | SmallVector<int, 2> RepeatedMask; | |||
14599 | if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) { | |||
14600 | SmallVector<int, 4> PSHUFDMask; | |||
14601 | scaleShuffleMask<int>(2, RepeatedMask, PSHUFDMask); | |||
14602 | return DAG.getBitcast( | |||
14603 | MVT::v4i64, | |||
14604 | DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, | |||
14605 | DAG.getBitcast(MVT::v8i32, V1), | |||
14606 | getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG))); | |||
14607 | } | |||
14608 | ||||
14609 | // AVX2 provides a direct instruction for permuting a single input across | |||
14610 | // lanes. | |||
14611 | return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1, | |||
14612 | getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); | |||
14613 | } | |||
14614 | ||||
14615 | // Try to use shift instructions. | |||
14616 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, | |||
14617 | Zeroable, Subtarget, DAG)) | |||
14618 | return Shift; | |||
14619 | ||||
14620 | // If we have VLX support, we can use VALIGN or VEXPAND. | |||
14621 | if (Subtarget.hasVLX()) { | |||
14622 | if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v4i64, V1, V2, | |||
14623 | Mask, Subtarget, DAG)) | |||
14624 | return Rotate; | |||
14625 | ||||
14626 | if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, | |||
14627 | V1, V2, DAG, Subtarget)) | |||
14628 | return V; | |||
14629 | } | |||
14630 | ||||
14631 | // Try to use PALIGNR. | |||
14632 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, | |||
14633 | Mask, Subtarget, DAG)) | |||
14634 | return Rotate; | |||
14635 | ||||
14636 | // Use dedicated unpack instructions for masks that match their pattern. | |||
14637 | if (SDValue V = | |||
14638 | lowerVectorShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG)) | |||
14639 | return V; | |||
14640 | ||||
14641 | // Try to create an in-lane repeating shuffle mask and then shuffle the | |||
14642 | // results into the target lanes. | |||
14643 | if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute( | |||
14644 | DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG)) | |||
14645 | return V; | |||
14646 | ||||
14647 | // Try to simplify this by merging 128-bit lanes to enable a lane-based | |||
14648 | // shuffle. However, if we have AVX2 and either inputs are already in place, | |||
14649 | // we will be able to shuffle even across lanes the other input in a single | |||
14650 | // instruction so skip this pattern. | |||
14651 | if (!isShuffleMaskInputInPlace(0, Mask) && | |||
14652 | !isShuffleMaskInputInPlace(1, Mask)) | |||
14653 | if (SDValue Result = lowerVectorShuffleByMerging128BitLanes( | |||
14654 | DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG)) | |||
14655 | return Result; | |||
14656 | ||||
14657 | // Otherwise fall back on generic blend lowering. | |||
14658 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, | |||
14659 | Mask, Subtarget, DAG); | |||
14660 | } | |||
14661 | ||||
14662 | /// Handle lowering of 8-lane 32-bit floating point shuffles. | |||
14663 | /// | |||
14664 | /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2 | |||
14665 | /// isn't available. | |||
14666 | static SDValue lowerV8F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
14667 | const APInt &Zeroable, | |||
14668 | SDValue V1, SDValue V2, | |||
14669 | const X86Subtarget &Subtarget, | |||
14670 | SelectionDAG &DAG) { | |||
14671 | assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8f32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14671, __PRETTY_FUNCTION__)); | |||
14672 | assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8f32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14672, __PRETTY_FUNCTION__)); | |||
14673 | assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14673, __PRETTY_FUNCTION__)); | |||
14674 | ||||
14675 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask, | |||
14676 | Zeroable, Subtarget, DAG)) | |||
14677 | return Blend; | |||
14678 | ||||
14679 | // Check for being able to broadcast a single element. | |||
14680 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, | |||
14681 | Mask, Subtarget, DAG)) | |||
14682 | return Broadcast; | |||
14683 | ||||
14684 | // If the shuffle mask is repeated in each 128-bit lane, we have many more | |||
14685 | // options to efficiently lower the shuffle. | |||
14686 | SmallVector<int, 4> RepeatedMask; | |||
14687 | if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) { | |||
14688 | assert(RepeatedMask.size() == 4 &&((RepeatedMask.size() == 4 && "Repeated masks must be half the mask width!" ) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Repeated masks must be half the mask width!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14689, __PRETTY_FUNCTION__)) | |||
14689 | "Repeated masks must be half the mask width!")((RepeatedMask.size() == 4 && "Repeated masks must be half the mask width!" ) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Repeated masks must be half the mask width!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14689, __PRETTY_FUNCTION__)); | |||
14690 | ||||
14691 | // Use even/odd duplicate instructions for masks that match their pattern. | |||
14692 | if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2})) | |||
14693 | return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1); | |||
14694 | if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3})) | |||
14695 | return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1); | |||
14696 | ||||
14697 | if (V2.isUndef()) | |||
14698 | return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1, | |||
14699 | getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG)); | |||
14700 | ||||
14701 | // Use dedicated unpack instructions for masks that match their pattern. | |||
14702 | if (SDValue V = | |||
14703 | lowerVectorShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG)) | |||
14704 | return V; | |||
14705 | ||||
14706 | // Otherwise, fall back to a SHUFPS sequence. Here it is important that we | |||
14707 | // have already handled any direct blends. | |||
14708 | return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG); | |||
14709 | } | |||
14710 | ||||
14711 | // Try to create an in-lane repeating shuffle mask and then shuffle the | |||
14712 | // results into the target lanes. | |||
14713 | if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute( | |||
14714 | DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG)) | |||
14715 | return V; | |||
14716 | ||||
14717 | // If we have a single input shuffle with different shuffle patterns in the | |||
14718 | // two 128-bit lanes use the variable mask to VPERMILPS. | |||
14719 | if (V2.isUndef()) { | |||
14720 | SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true); | |||
14721 | if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) | |||
14722 | return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask); | |||
14723 | ||||
14724 | if (Subtarget.hasAVX2()) | |||
14725 | return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1); | |||
14726 | ||||
14727 | // Otherwise, fall back. | |||
14728 | return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask, | |||
14729 | DAG, Subtarget); | |||
14730 | } | |||
14731 | ||||
14732 | // Try to simplify this by merging 128-bit lanes to enable a lane-based | |||
14733 | // shuffle. | |||
14734 | if (SDValue Result = lowerVectorShuffleByMerging128BitLanes( | |||
14735 | DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG)) | |||
14736 | return Result; | |||
14737 | // If we have VLX support, we can use VEXPAND. | |||
14738 | if (Subtarget.hasVLX()) | |||
14739 | if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, | |||
14740 | V1, V2, DAG, Subtarget)) | |||
14741 | return V; | |||
14742 | ||||
14743 | // For non-AVX512 if the Mask is of 16bit elements in lane then try to split | |||
14744 | // since after split we get a more efficient code using vpunpcklwd and | |||
14745 | // vpunpckhwd instrs than vblend. | |||
14746 | if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32)) | |||
14747 | if (SDValue V = lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, | |||
14748 | Mask, Subtarget, DAG)) | |||
14749 | return V; | |||
14750 | ||||
14751 | // If we have AVX2 then we always want to lower with a blend because at v8 we | |||
14752 | // can fully permute the elements. | |||
14753 | if (Subtarget.hasAVX2()) | |||
14754 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, | |||
14755 | Mask, Subtarget, DAG); | |||
14756 | ||||
14757 | // Otherwise fall back on generic lowering. | |||
14758 | return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, | |||
14759 | Subtarget, DAG); | |||
14760 | } | |||
14761 | ||||
14762 | /// Handle lowering of 8-lane 32-bit integer shuffles. | |||
14763 | /// | |||
14764 | /// This routine is only called when we have AVX2 and thus a reasonable | |||
14765 | /// instruction set for v8i32 shuffling.. | |||
14766 | static SDValue lowerV8I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
14767 | const APInt &Zeroable, | |||
14768 | SDValue V1, SDValue V2, | |||
14769 | const X86Subtarget &Subtarget, | |||
14770 | SelectionDAG &DAG) { | |||
14771 | assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8i32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14771, __PRETTY_FUNCTION__)); | |||
14772 | assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8i32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14772, __PRETTY_FUNCTION__)); | |||
14773 | assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14773, __PRETTY_FUNCTION__)); | |||
14774 | assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!")((Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower v8i32 with AVX2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14774, __PRETTY_FUNCTION__)); | |||
14775 | ||||
14776 | // Whenever we can lower this as a zext, that instruction is strictly faster | |||
14777 | // than any alternative. It also allows us to fold memory operands into the | |||
14778 | // shuffle in many cases. | |||
14779 | if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend( | |||
14780 | DL, MVT::v8i32, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
14781 | return ZExt; | |||
14782 | ||||
14783 | // For non-AVX512 if the Mask is of 16bit elements in lane then try to split | |||
14784 | // since after split we get a more efficient code than vblend by using | |||
14785 | // vpunpcklwd and vpunpckhwd instrs. | |||
14786 | if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() && | |||
14787 | !Subtarget.hasAVX512()) | |||
14788 | if (SDValue V = lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, | |||
14789 | Mask, Subtarget, DAG)) | |||
14790 | return V; | |||
14791 | ||||
14792 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask, | |||
14793 | Zeroable, Subtarget, DAG)) | |||
14794 | return Blend; | |||
14795 | ||||
14796 | // Check for being able to broadcast a single element. | |||
14797 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, | |||
14798 | Mask, Subtarget, DAG)) | |||
14799 | return Broadcast; | |||
14800 | ||||
14801 | // If the shuffle mask is repeated in each 128-bit lane we can use more | |||
14802 | // efficient instructions that mirror the shuffles across the two 128-bit | |||
14803 | // lanes. | |||
14804 | SmallVector<int, 4> RepeatedMask; | |||
14805 | bool Is128BitLaneRepeatedShuffle = | |||
14806 | is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask); | |||
14807 | if (Is128BitLaneRepeatedShuffle) { | |||
14808 | assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!")((RepeatedMask.size() == 4 && "Unexpected repeated mask size!" ) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Unexpected repeated mask size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14808, __PRETTY_FUNCTION__)); | |||
14809 | if (V2.isUndef()) | |||
14810 | return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1, | |||
14811 | getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG)); | |||
14812 | ||||
14813 | // Use dedicated unpack instructions for masks that match their pattern. | |||
14814 | if (SDValue V = | |||
14815 | lowerVectorShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG)) | |||
14816 | return V; | |||
14817 | } | |||
14818 | ||||
14819 | // Try to use shift instructions. | |||
14820 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, | |||
14821 | Zeroable, Subtarget, DAG)) | |||
14822 | return Shift; | |||
14823 | ||||
14824 | // If we have VLX support, we can use VALIGN or EXPAND. | |||
14825 | if (Subtarget.hasVLX()) { | |||
14826 | if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v8i32, V1, V2, | |||
14827 | Mask, Subtarget, DAG)) | |||
14828 | return Rotate; | |||
14829 | ||||
14830 | if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, | |||
14831 | V1, V2, DAG, Subtarget)) | |||
14832 | return V; | |||
14833 | } | |||
14834 | ||||
14835 | // Try to use byte rotation instructions. | |||
14836 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
14837 | DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG)) | |||
14838 | return Rotate; | |||
14839 | ||||
14840 | // Try to create an in-lane repeating shuffle mask and then shuffle the | |||
14841 | // results into the target lanes. | |||
14842 | if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute( | |||
14843 | DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG)) | |||
14844 | return V; | |||
14845 | ||||
14846 | // If the shuffle patterns aren't repeated but it is a single input, directly | |||
14847 | // generate a cross-lane VPERMD instruction. | |||
14848 | if (V2.isUndef()) { | |||
14849 | SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true); | |||
14850 | return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1); | |||
14851 | } | |||
14852 | ||||
14853 | // Assume that a single SHUFPS is faster than an alternative sequence of | |||
14854 | // multiple instructions (even if the CPU has a domain penalty). | |||
14855 | // If some CPU is harmed by the domain switch, we can fix it in a later pass. | |||
14856 | if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) { | |||
14857 | SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1); | |||
14858 | SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2); | |||
14859 | SDValue ShufPS = lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, | |||
14860 | CastV1, CastV2, DAG); | |||
14861 | return DAG.getBitcast(MVT::v8i32, ShufPS); | |||
14862 | } | |||
14863 | ||||
14864 | // Try to simplify this by merging 128-bit lanes to enable a lane-based | |||
14865 | // shuffle. | |||
14866 | if (SDValue Result = lowerVectorShuffleByMerging128BitLanes( | |||
14867 | DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG)) | |||
14868 | return Result; | |||
14869 | ||||
14870 | // Otherwise fall back on generic blend lowering. | |||
14871 | return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, | |||
14872 | Mask, Subtarget, DAG); | |||
14873 | } | |||
14874 | ||||
14875 | /// Handle lowering of 16-lane 16-bit integer shuffles. | |||
14876 | /// | |||
14877 | /// This routine is only called when we have AVX2 and thus a reasonable | |||
14878 | /// instruction set for v16i16 shuffling.. | |||
14879 | static SDValue lowerV16I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
14880 | const APInt &Zeroable, | |||
14881 | SDValue V1, SDValue V2, | |||
14882 | const X86Subtarget &Subtarget, | |||
14883 | SelectionDAG &DAG) { | |||
14884 | assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v16i16 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14884, __PRETTY_FUNCTION__)); | |||
14885 | assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v16i16 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14885, __PRETTY_FUNCTION__)); | |||
14886 | assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!")((Mask.size() == 16 && "Unexpected mask size for v16 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 16 && \"Unexpected mask size for v16 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14886, __PRETTY_FUNCTION__)); | |||
14887 | assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!")((Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower v16i16 with AVX2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14887, __PRETTY_FUNCTION__)); | |||
14888 | ||||
14889 | // Whenever we can lower this as a zext, that instruction is strictly faster | |||
14890 | // than any alternative. It also allows us to fold memory operands into the | |||
14891 | // shuffle in many cases. | |||
14892 | if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend( | |||
14893 | DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
14894 | return ZExt; | |||
14895 | ||||
14896 | // Check for being able to broadcast a single element. | |||
14897 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, | |||
14898 | Mask, Subtarget, DAG)) | |||
14899 | return Broadcast; | |||
14900 | ||||
14901 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask, | |||
14902 | Zeroable, Subtarget, DAG)) | |||
14903 | return Blend; | |||
14904 | ||||
14905 | // Use dedicated unpack instructions for masks that match their pattern. | |||
14906 | if (SDValue V = | |||
14907 | lowerVectorShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG)) | |||
14908 | return V; | |||
14909 | ||||
14910 | // Use dedicated pack instructions for masks that match their pattern. | |||
14911 | if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG, | |||
14912 | Subtarget)) | |||
14913 | return V; | |||
14914 | ||||
14915 | // Try to use shift instructions. | |||
14916 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, | |||
14917 | Zeroable, Subtarget, DAG)) | |||
14918 | return Shift; | |||
14919 | ||||
14920 | // Try to use byte rotation instructions. | |||
14921 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
14922 | DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG)) | |||
14923 | return Rotate; | |||
14924 | ||||
14925 | // Try to create an in-lane repeating shuffle mask and then shuffle the | |||
14926 | // results into the target lanes. | |||
14927 | if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute( | |||
14928 | DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG)) | |||
14929 | return V; | |||
14930 | ||||
14931 | if (V2.isUndef()) { | |||
14932 | // There are no generalized cross-lane shuffle operations available on i16 | |||
14933 | // element types. | |||
14934 | if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) { | |||
14935 | if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute( | |||
14936 | DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget)) | |||
14937 | return V; | |||
14938 | ||||
14939 | return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2, | |||
14940 | Mask, DAG, Subtarget); | |||
14941 | } | |||
14942 | ||||
14943 | SmallVector<int, 8> RepeatedMask; | |||
14944 | if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) { | |||
14945 | // As this is a single-input shuffle, the repeated mask should be | |||
14946 | // a strictly valid v8i16 mask that we can pass through to the v8i16 | |||
14947 | // lowering to handle even the v16 case. | |||
14948 | return lowerV8I16GeneralSingleInputVectorShuffle( | |||
14949 | DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG); | |||
14950 | } | |||
14951 | } | |||
14952 | ||||
14953 | if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB( | |||
14954 | DL, MVT::v16i16, Mask, V1, V2, Zeroable, Subtarget, DAG)) | |||
14955 | return PSHUFB; | |||
14956 | ||||
14957 | // AVX512BWVL can lower to VPERMW. | |||
14958 | if (Subtarget.hasBWI() && Subtarget.hasVLX()) | |||
14959 | return lowerVectorShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG); | |||
14960 | ||||
14961 | // Try to simplify this by merging 128-bit lanes to enable a lane-based | |||
14962 | // shuffle. | |||
14963 | if (SDValue Result = lowerVectorShuffleByMerging128BitLanes( | |||
14964 | DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG)) | |||
14965 | return Result; | |||
14966 | ||||
14967 | // Try to permute the lanes and then use a per-lane permute. | |||
14968 | if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute( | |||
14969 | DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget)) | |||
14970 | return V; | |||
14971 | ||||
14972 | // Otherwise fall back on generic lowering. | |||
14973 | return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, | |||
14974 | Subtarget, DAG); | |||
14975 | } | |||
14976 | ||||
14977 | /// Handle lowering of 32-lane 8-bit integer shuffles. | |||
14978 | /// | |||
14979 | /// This routine is only called when we have AVX2 and thus a reasonable | |||
14980 | /// instruction set for v32i8 shuffling.. | |||
14981 | static SDValue lowerV32I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
14982 | const APInt &Zeroable, | |||
14983 | SDValue V1, SDValue V2, | |||
14984 | const X86Subtarget &Subtarget, | |||
14985 | SelectionDAG &DAG) { | |||
14986 | assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v32i8 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14986, __PRETTY_FUNCTION__)); | |||
14987 | assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v32i8 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14987, __PRETTY_FUNCTION__)); | |||
14988 | assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!")((Mask.size() == 32 && "Unexpected mask size for v32 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 32 && \"Unexpected mask size for v32 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14988, __PRETTY_FUNCTION__)); | |||
14989 | assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!")((Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"We can only lower v32i8 with AVX2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 14989, __PRETTY_FUNCTION__)); | |||
14990 | ||||
14991 | // Whenever we can lower this as a zext, that instruction is strictly faster | |||
14992 | // than any alternative. It also allows us to fold memory operands into the | |||
14993 | // shuffle in many cases. | |||
14994 | if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend( | |||
14995 | DL, MVT::v32i8, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
14996 | return ZExt; | |||
14997 | ||||
14998 | // Check for being able to broadcast a single element. | |||
14999 | if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, | |||
15000 | Mask, Subtarget, DAG)) | |||
15001 | return Broadcast; | |||
15002 | ||||
15003 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask, | |||
15004 | Zeroable, Subtarget, DAG)) | |||
15005 | return Blend; | |||
15006 | ||||
15007 | // Use dedicated unpack instructions for masks that match their pattern. | |||
15008 | if (SDValue V = | |||
15009 | lowerVectorShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG)) | |||
15010 | return V; | |||
15011 | ||||
15012 | // Use dedicated pack instructions for masks that match their pattern. | |||
15013 | if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG, | |||
15014 | Subtarget)) | |||
15015 | return V; | |||
15016 | ||||
15017 | // Try to use shift instructions. | |||
15018 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, | |||
15019 | Zeroable, Subtarget, DAG)) | |||
15020 | return Shift; | |||
15021 | ||||
15022 | // Try to use byte rotation instructions. | |||
15023 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
15024 | DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG)) | |||
15025 | return Rotate; | |||
15026 | ||||
15027 | // Try to create an in-lane repeating shuffle mask and then shuffle the | |||
15028 | // results into the target lanes. | |||
15029 | if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute( | |||
15030 | DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG)) | |||
15031 | return V; | |||
15032 | ||||
15033 | // There are no generalized cross-lane shuffle operations available on i8 | |||
15034 | // element types. | |||
15035 | if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) { | |||
15036 | if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute( | |||
15037 | DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget)) | |||
15038 | return V; | |||
15039 | ||||
15040 | return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2, Mask, | |||
15041 | DAG, Subtarget); | |||
15042 | } | |||
15043 | ||||
15044 | if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB( | |||
15045 | DL, MVT::v32i8, Mask, V1, V2, Zeroable, Subtarget, DAG)) | |||
15046 | return PSHUFB; | |||
15047 | ||||
15048 | // AVX512VBMIVL can lower to VPERMB. | |||
15049 | if (Subtarget.hasVBMI() && Subtarget.hasVLX()) | |||
15050 | return lowerVectorShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG); | |||
15051 | ||||
15052 | // Try to simplify this by merging 128-bit lanes to enable a lane-based | |||
15053 | // shuffle. | |||
15054 | if (SDValue Result = lowerVectorShuffleByMerging128BitLanes( | |||
15055 | DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG)) | |||
15056 | return Result; | |||
15057 | ||||
15058 | // Try to permute the lanes and then use a per-lane permute. | |||
15059 | if (SDValue V = lowerVectorShuffleAsLanePermuteAndPermute( | |||
15060 | DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget)) | |||
15061 | return V; | |||
15062 | ||||
15063 | // Otherwise fall back on generic lowering. | |||
15064 | return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, | |||
15065 | Subtarget, DAG); | |||
15066 | } | |||
15067 | ||||
15068 | /// High-level routine to lower various 256-bit x86 vector shuffles. | |||
15069 | /// | |||
15070 | /// This routine either breaks down the specific type of a 256-bit x86 vector | |||
15071 | /// shuffle or splits it into two 128-bit shuffles and fuses the results back | |||
15072 | /// together based on the available instructions. | |||
15073 | static SDValue lower256BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
15074 | MVT VT, SDValue V1, SDValue V2, | |||
15075 | const APInt &Zeroable, | |||
15076 | const X86Subtarget &Subtarget, | |||
15077 | SelectionDAG &DAG) { | |||
15078 | // If we have a single input to the zero element, insert that into V1 if we | |||
15079 | // can do so cheaply. | |||
15080 | int NumElts = VT.getVectorNumElements(); | |||
15081 | int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; }); | |||
15082 | ||||
15083 | if (NumV2Elements == 1 && Mask[0] >= NumElts) | |||
15084 | if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( | |||
15085 | DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
15086 | return Insertion; | |||
15087 | ||||
15088 | // Handle special cases where the lower or upper half is UNDEF. | |||
15089 | if (SDValue V = | |||
15090 | lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG)) | |||
15091 | return V; | |||
15092 | ||||
15093 | // There is a really nice hard cut-over between AVX1 and AVX2 that means we | |||
15094 | // can check for those subtargets here and avoid much of the subtarget | |||
15095 | // querying in the per-vector-type lowering routines. With AVX1 we have | |||
15096 | // essentially *zero* ability to manipulate a 256-bit vector with integer | |||
15097 | // types. Since we'll use floating point types there eventually, just | |||
15098 | // immediately cast everything to a float and operate entirely in that domain. | |||
15099 | if (VT.isInteger() && !Subtarget.hasAVX2()) { | |||
15100 | int ElementBits = VT.getScalarSizeInBits(); | |||
15101 | if (ElementBits < 32) { | |||
15102 | // No floating point type available, if we can't use the bit operations | |||
15103 | // for masking/blending then decompose into 128-bit vectors. | |||
15104 | if (SDValue V = | |||
15105 | lowerVectorShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable, DAG)) | |||
15106 | return V; | |||
15107 | if (SDValue V = lowerVectorShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG)) | |||
15108 | return V; | |||
15109 | return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG); | |||
15110 | } | |||
15111 | ||||
15112 | MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits), | |||
15113 | VT.getVectorNumElements()); | |||
15114 | V1 = DAG.getBitcast(FpVT, V1); | |||
15115 | V2 = DAG.getBitcast(FpVT, V2); | |||
15116 | return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask)); | |||
15117 | } | |||
15118 | ||||
15119 | switch (VT.SimpleTy) { | |||
15120 | case MVT::v4f64: | |||
15121 | return lowerV4F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15122 | case MVT::v4i64: | |||
15123 | return lowerV4I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15124 | case MVT::v8f32: | |||
15125 | return lowerV8F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15126 | case MVT::v8i32: | |||
15127 | return lowerV8I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15128 | case MVT::v16i16: | |||
15129 | return lowerV16I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15130 | case MVT::v32i8: | |||
15131 | return lowerV32I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15132 | ||||
15133 | default: | |||
15134 | llvm_unreachable("Not a valid 256-bit x86 vector type!")::llvm::llvm_unreachable_internal("Not a valid 256-bit x86 vector type!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15134); | |||
15135 | } | |||
15136 | } | |||
15137 | ||||
15138 | /// Try to lower a vector shuffle as a 128-bit shuffles. | |||
15139 | static SDValue lowerV4X128VectorShuffle(const SDLoc &DL, MVT VT, | |||
15140 | ArrayRef<int> Mask, | |||
15141 | const APInt &Zeroable, | |||
15142 | SDValue V1, SDValue V2, | |||
15143 | const X86Subtarget &Subtarget, | |||
15144 | SelectionDAG &DAG) { | |||
15145 | assert(VT.getScalarSizeInBits() == 64 &&((VT.getScalarSizeInBits() == 64 && "Unexpected element type size for 128bit shuffle." ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && \"Unexpected element type size for 128bit shuffle.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15146, __PRETTY_FUNCTION__)) | |||
15146 | "Unexpected element type size for 128bit shuffle.")((VT.getScalarSizeInBits() == 64 && "Unexpected element type size for 128bit shuffle." ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() == 64 && \"Unexpected element type size for 128bit shuffle.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15146, __PRETTY_FUNCTION__)); | |||
15147 | ||||
15148 | // To handle 256 bit vector requires VLX and most probably | |||
15149 | // function lowerV2X128VectorShuffle() is better solution. | |||
15150 | assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.")((VT.is512BitVector() && "Unexpected vector size for 512bit shuffle." ) ? static_cast<void> (0) : __assert_fail ("VT.is512BitVector() && \"Unexpected vector size for 512bit shuffle.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15150, __PRETTY_FUNCTION__)); | |||
15151 | ||||
15152 | // TODO - use Zeroable like we do for lowerV2X128VectorShuffle? | |||
15153 | SmallVector<int, 4> WidenedMask; | |||
15154 | if (!canWidenShuffleElements(Mask, WidenedMask)) | |||
15155 | return SDValue(); | |||
15156 | ||||
15157 | // Try to use an insert into a zero vector. | |||
15158 | if (WidenedMask[0] == 0 && (Zeroable & 0xf0) == 0xf0 && | |||
15159 | (WidenedMask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) { | |||
15160 | unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4; | |||
15161 | MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts); | |||
15162 | SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1, | |||
15163 | DAG.getIntPtrConstant(0, DL)); | |||
15164 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, | |||
15165 | getZeroVector(VT, Subtarget, DAG, DL), LoV, | |||
15166 | DAG.getIntPtrConstant(0, DL)); | |||
15167 | } | |||
15168 | ||||
15169 | // Check for patterns which can be matched with a single insert of a 256-bit | |||
15170 | // subvector. | |||
15171 | bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, | |||
15172 | {0, 1, 2, 3, 0, 1, 2, 3}); | |||
15173 | if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, | |||
15174 | {0, 1, 2, 3, 8, 9, 10, 11})) { | |||
15175 | MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4); | |||
15176 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, | |||
15177 | OnlyUsesV1 ? V1 : V2, | |||
15178 | DAG.getIntPtrConstant(0, DL)); | |||
15179 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec, | |||
15180 | DAG.getIntPtrConstant(4, DL)); | |||
15181 | } | |||
15182 | ||||
15183 | assert(WidenedMask.size() == 4)((WidenedMask.size() == 4) ? static_cast<void> (0) : __assert_fail ("WidenedMask.size() == 4", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15183, __PRETTY_FUNCTION__)); | |||
15184 | ||||
15185 | // See if this is an insertion of the lower 128-bits of V2 into V1. | |||
15186 | bool IsInsert = true; | |||
15187 | int V2Index = -1; | |||
15188 | for (int i = 0; i < 4; ++i) { | |||
15189 | assert(WidenedMask[i] >= -1)((WidenedMask[i] >= -1) ? static_cast<void> (0) : __assert_fail ("WidenedMask[i] >= -1", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15189, __PRETTY_FUNCTION__)); | |||
15190 | if (WidenedMask[i] < 0) | |||
15191 | continue; | |||
15192 | ||||
15193 | // Make sure all V1 subvectors are in place. | |||
15194 | if (WidenedMask[i] < 4) { | |||
15195 | if (WidenedMask[i] != i) { | |||
15196 | IsInsert = false; | |||
15197 | break; | |||
15198 | } | |||
15199 | } else { | |||
15200 | // Make sure we only have a single V2 index and its the lowest 128-bits. | |||
15201 | if (V2Index >= 0 || WidenedMask[i] != 4) { | |||
15202 | IsInsert = false; | |||
15203 | break; | |||
15204 | } | |||
15205 | V2Index = i; | |||
15206 | } | |||
15207 | } | |||
15208 | if (IsInsert && V2Index >= 0) { | |||
15209 | MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2); | |||
15210 | SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2, | |||
15211 | DAG.getIntPtrConstant(0, DL)); | |||
15212 | return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL); | |||
15213 | } | |||
15214 | ||||
15215 | // Try to lower to vshuf64x2/vshuf32x4. | |||
15216 | SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)}; | |||
15217 | unsigned PermMask = 0; | |||
15218 | // Insure elements came from the same Op. | |||
15219 | for (int i = 0; i < 4; ++i) { | |||
15220 | assert(WidenedMask[i] >= -1)((WidenedMask[i] >= -1) ? static_cast<void> (0) : __assert_fail ("WidenedMask[i] >= -1", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15220, __PRETTY_FUNCTION__)); | |||
15221 | if (WidenedMask[i] < 0) | |||
15222 | continue; | |||
15223 | ||||
15224 | SDValue Op = WidenedMask[i] >= 4 ? V2 : V1; | |||
15225 | unsigned OpIndex = i / 2; | |||
15226 | if (Ops[OpIndex].isUndef()) | |||
15227 | Ops[OpIndex] = Op; | |||
15228 | else if (Ops[OpIndex] != Op) | |||
15229 | return SDValue(); | |||
15230 | ||||
15231 | // Convert the 128-bit shuffle mask selection values into 128-bit selection | |||
15232 | // bits defined by a vshuf64x2 instruction's immediate control byte. | |||
15233 | PermMask |= (WidenedMask[i] % 4) << (i * 2); | |||
15234 | } | |||
15235 | ||||
15236 | return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1], | |||
15237 | DAG.getConstant(PermMask, DL, MVT::i8)); | |||
15238 | } | |||
15239 | ||||
15240 | /// Handle lowering of 8-lane 64-bit floating point shuffles. | |||
15241 | static SDValue lowerV8F64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
15242 | const APInt &Zeroable, | |||
15243 | SDValue V1, SDValue V2, | |||
15244 | const X86Subtarget &Subtarget, | |||
15245 | SelectionDAG &DAG) { | |||
15246 | assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8f64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15246, __PRETTY_FUNCTION__)); | |||
15247 | assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8f64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15247, __PRETTY_FUNCTION__)); | |||
15248 | assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15248, __PRETTY_FUNCTION__)); | |||
15249 | ||||
15250 | if (V2.isUndef()) { | |||
15251 | // Use low duplicate instructions for masks that match their pattern. | |||
15252 | if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6})) | |||
15253 | return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1); | |||
15254 | ||||
15255 | if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) { | |||
15256 | // Non-half-crossing single input shuffles can be lowered with an | |||
15257 | // interleaved permutation. | |||
15258 | unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) | | |||
15259 | ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) | | |||
15260 | ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) | | |||
15261 | ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7); | |||
15262 | return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1, | |||
15263 | DAG.getConstant(VPERMILPMask, DL, MVT::i8)); | |||
15264 | } | |||
15265 | ||||
15266 | SmallVector<int, 4> RepeatedMask; | |||
15267 | if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) | |||
15268 | return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1, | |||
15269 | getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG)); | |||
15270 | } | |||
15271 | ||||
15272 | if (SDValue Shuf128 = | |||
15273 | lowerV4X128VectorShuffle(DL, MVT::v8f64, Mask, Zeroable, V1, V2, | |||
15274 | Subtarget, DAG)) | |||
15275 | return Shuf128; | |||
15276 | ||||
15277 | if (SDValue Unpck = | |||
15278 | lowerVectorShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG)) | |||
15279 | return Unpck; | |||
15280 | ||||
15281 | // Check if the blend happens to exactly fit that of SHUFPD. | |||
15282 | if (SDValue Op = | |||
15283 | lowerVectorShuffleWithSHUFPD(DL, MVT::v8f64, Mask, V1, V2, DAG)) | |||
15284 | return Op; | |||
15285 | ||||
15286 | if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, | |||
15287 | V2, DAG, Subtarget)) | |||
15288 | return V; | |||
15289 | ||||
15290 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask, | |||
15291 | Zeroable, Subtarget, DAG)) | |||
15292 | return Blend; | |||
15293 | ||||
15294 | return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG); | |||
15295 | } | |||
15296 | ||||
15297 | /// Handle lowering of 16-lane 32-bit floating point shuffles. | |||
15298 | static SDValue lowerV16F32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
15299 | const APInt &Zeroable, | |||
15300 | SDValue V1, SDValue V2, | |||
15301 | const X86Subtarget &Subtarget, | |||
15302 | SelectionDAG &DAG) { | |||
15303 | assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v16f32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15303, __PRETTY_FUNCTION__)); | |||
15304 | assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v16f32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15304, __PRETTY_FUNCTION__)); | |||
15305 | assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!")((Mask.size() == 16 && "Unexpected mask size for v16 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 16 && \"Unexpected mask size for v16 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15305, __PRETTY_FUNCTION__)); | |||
15306 | ||||
15307 | // If the shuffle mask is repeated in each 128-bit lane, we have many more | |||
15308 | // options to efficiently lower the shuffle. | |||
15309 | SmallVector<int, 4> RepeatedMask; | |||
15310 | if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) { | |||
15311 | assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!")((RepeatedMask.size() == 4 && "Unexpected repeated mask size!" ) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Unexpected repeated mask size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15311, __PRETTY_FUNCTION__)); | |||
15312 | ||||
15313 | // Use even/odd duplicate instructions for masks that match their pattern. | |||
15314 | if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2})) | |||
15315 | return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1); | |||
15316 | if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3})) | |||
15317 | return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1); | |||
15318 | ||||
15319 | if (V2.isUndef()) | |||
15320 | return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1, | |||
15321 | getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG)); | |||
15322 | ||||
15323 | // Use dedicated unpack instructions for masks that match their pattern. | |||
15324 | if (SDValue Unpck = | |||
15325 | lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG)) | |||
15326 | return Unpck; | |||
15327 | ||||
15328 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask, | |||
15329 | Zeroable, Subtarget, DAG)) | |||
15330 | return Blend; | |||
15331 | ||||
15332 | // Otherwise, fall back to a SHUFPS sequence. | |||
15333 | return lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG); | |||
15334 | } | |||
15335 | ||||
15336 | // If we have a single input shuffle with different shuffle patterns in the | |||
15337 | // 128-bit lanes and don't lane cross, use variable mask VPERMILPS. | |||
15338 | if (V2.isUndef() && | |||
15339 | !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) { | |||
15340 | SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true); | |||
15341 | return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask); | |||
15342 | } | |||
15343 | ||||
15344 | // If we have AVX512F support, we can use VEXPAND. | |||
15345 | if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask, | |||
15346 | V1, V2, DAG, Subtarget)) | |||
15347 | return V; | |||
15348 | ||||
15349 | return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG); | |||
15350 | } | |||
15351 | ||||
15352 | /// Handle lowering of 8-lane 64-bit integer shuffles. | |||
15353 | static SDValue lowerV8I64VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
15354 | const APInt &Zeroable, | |||
15355 | SDValue V1, SDValue V2, | |||
15356 | const X86Subtarget &Subtarget, | |||
15357 | SelectionDAG &DAG) { | |||
15358 | assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v8i64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15358, __PRETTY_FUNCTION__)); | |||
15359 | assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v8i64 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15359, __PRETTY_FUNCTION__)); | |||
15360 | assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!")((Mask.size() == 8 && "Unexpected mask size for v8 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 8 && \"Unexpected mask size for v8 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15360, __PRETTY_FUNCTION__)); | |||
15361 | ||||
15362 | if (V2.isUndef()) { | |||
15363 | // When the shuffle is mirrored between the 128-bit lanes of the unit, we | |||
15364 | // can use lower latency instructions that will operate on all four | |||
15365 | // 128-bit lanes. | |||
15366 | SmallVector<int, 2> Repeated128Mask; | |||
15367 | if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) { | |||
15368 | SmallVector<int, 4> PSHUFDMask; | |||
15369 | scaleShuffleMask<int>(2, Repeated128Mask, PSHUFDMask); | |||
15370 | return DAG.getBitcast( | |||
15371 | MVT::v8i64, | |||
15372 | DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, | |||
15373 | DAG.getBitcast(MVT::v16i32, V1), | |||
15374 | getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG))); | |||
15375 | } | |||
15376 | ||||
15377 | SmallVector<int, 4> Repeated256Mask; | |||
15378 | if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask)) | |||
15379 | return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1, | |||
15380 | getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG)); | |||
15381 | } | |||
15382 | ||||
15383 | if (SDValue Shuf128 = | |||
15384 | lowerV4X128VectorShuffle(DL, MVT::v8i64, Mask, Zeroable, | |||
15385 | V1, V2, Subtarget, DAG)) | |||
15386 | return Shuf128; | |||
15387 | ||||
15388 | // Try to use shift instructions. | |||
15389 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask, | |||
15390 | Zeroable, Subtarget, DAG)) | |||
15391 | return Shift; | |||
15392 | ||||
15393 | // Try to use VALIGN. | |||
15394 | if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v8i64, V1, V2, | |||
15395 | Mask, Subtarget, DAG)) | |||
15396 | return Rotate; | |||
15397 | ||||
15398 | // Try to use PALIGNR. | |||
15399 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, | |||
15400 | Mask, Subtarget, DAG)) | |||
15401 | return Rotate; | |||
15402 | ||||
15403 | if (SDValue Unpck = | |||
15404 | lowerVectorShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG)) | |||
15405 | return Unpck; | |||
15406 | // If we have AVX512F support, we can use VEXPAND. | |||
15407 | if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, | |||
15408 | V2, DAG, Subtarget)) | |||
15409 | return V; | |||
15410 | ||||
15411 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask, | |||
15412 | Zeroable, Subtarget, DAG)) | |||
15413 | return Blend; | |||
15414 | ||||
15415 | return lowerVectorShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG); | |||
15416 | } | |||
15417 | ||||
15418 | /// Handle lowering of 16-lane 32-bit integer shuffles. | |||
15419 | static SDValue lowerV16I32VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
15420 | const APInt &Zeroable, | |||
15421 | SDValue V1, SDValue V2, | |||
15422 | const X86Subtarget &Subtarget, | |||
15423 | SelectionDAG &DAG) { | |||
15424 | assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v16i32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15424, __PRETTY_FUNCTION__)); | |||
15425 | assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v16i32 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15425, __PRETTY_FUNCTION__)); | |||
15426 | assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!")((Mask.size() == 16 && "Unexpected mask size for v16 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 16 && \"Unexpected mask size for v16 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15426, __PRETTY_FUNCTION__)); | |||
15427 | ||||
15428 | // Whenever we can lower this as a zext, that instruction is strictly faster | |||
15429 | // than any alternative. It also allows us to fold memory operands into the | |||
15430 | // shuffle in many cases. | |||
15431 | if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend( | |||
15432 | DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
15433 | return ZExt; | |||
15434 | ||||
15435 | // If the shuffle mask is repeated in each 128-bit lane we can use more | |||
15436 | // efficient instructions that mirror the shuffles across the four 128-bit | |||
15437 | // lanes. | |||
15438 | SmallVector<int, 4> RepeatedMask; | |||
15439 | bool Is128BitLaneRepeatedShuffle = | |||
15440 | is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask); | |||
15441 | if (Is128BitLaneRepeatedShuffle) { | |||
15442 | assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!")((RepeatedMask.size() == 4 && "Unexpected repeated mask size!" ) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 4 && \"Unexpected repeated mask size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15442, __PRETTY_FUNCTION__)); | |||
15443 | if (V2.isUndef()) | |||
15444 | return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1, | |||
15445 | getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG)); | |||
15446 | ||||
15447 | // Use dedicated unpack instructions for masks that match their pattern. | |||
15448 | if (SDValue V = | |||
15449 | lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG)) | |||
15450 | return V; | |||
15451 | } | |||
15452 | ||||
15453 | // Try to use shift instructions. | |||
15454 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask, | |||
15455 | Zeroable, Subtarget, DAG)) | |||
15456 | return Shift; | |||
15457 | ||||
15458 | // Try to use VALIGN. | |||
15459 | if (SDValue Rotate = lowerVectorShuffleAsRotate(DL, MVT::v16i32, V1, V2, | |||
15460 | Mask, Subtarget, DAG)) | |||
15461 | return Rotate; | |||
15462 | ||||
15463 | // Try to use byte rotation instructions. | |||
15464 | if (Subtarget.hasBWI()) | |||
15465 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
15466 | DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG)) | |||
15467 | return Rotate; | |||
15468 | ||||
15469 | // Assume that a single SHUFPS is faster than using a permv shuffle. | |||
15470 | // If some CPU is harmed by the domain switch, we can fix it in a later pass. | |||
15471 | if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) { | |||
15472 | SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1); | |||
15473 | SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2); | |||
15474 | SDValue ShufPS = lowerVectorShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, | |||
15475 | CastV1, CastV2, DAG); | |||
15476 | return DAG.getBitcast(MVT::v16i32, ShufPS); | |||
15477 | } | |||
15478 | // If we have AVX512F support, we can use VEXPAND. | |||
15479 | if (SDValue V = lowerVectorShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, | |||
15480 | V1, V2, DAG, Subtarget)) | |||
15481 | return V; | |||
15482 | ||||
15483 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask, | |||
15484 | Zeroable, Subtarget, DAG)) | |||
15485 | return Blend; | |||
15486 | return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG); | |||
15487 | } | |||
15488 | ||||
15489 | /// Handle lowering of 32-lane 16-bit integer shuffles. | |||
15490 | static SDValue lowerV32I16VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
15491 | const APInt &Zeroable, | |||
15492 | SDValue V1, SDValue V2, | |||
15493 | const X86Subtarget &Subtarget, | |||
15494 | SelectionDAG &DAG) { | |||
15495 | assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v32i16 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15495, __PRETTY_FUNCTION__)); | |||
15496 | assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v32i16 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15496, __PRETTY_FUNCTION__)); | |||
15497 | assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!")((Mask.size() == 32 && "Unexpected mask size for v32 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 32 && \"Unexpected mask size for v32 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15497, __PRETTY_FUNCTION__)); | |||
15498 | assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!")((Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"We can only lower v32i16 with AVX-512-BWI!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15498, __PRETTY_FUNCTION__)); | |||
15499 | ||||
15500 | // Whenever we can lower this as a zext, that instruction is strictly faster | |||
15501 | // than any alternative. It also allows us to fold memory operands into the | |||
15502 | // shuffle in many cases. | |||
15503 | if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend( | |||
15504 | DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
15505 | return ZExt; | |||
15506 | ||||
15507 | // Use dedicated unpack instructions for masks that match their pattern. | |||
15508 | if (SDValue V = | |||
15509 | lowerVectorShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG)) | |||
15510 | return V; | |||
15511 | ||||
15512 | // Try to use shift instructions. | |||
15513 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask, | |||
15514 | Zeroable, Subtarget, DAG)) | |||
15515 | return Shift; | |||
15516 | ||||
15517 | // Try to use byte rotation instructions. | |||
15518 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
15519 | DL, MVT::v32i16, V1, V2, Mask, Subtarget, DAG)) | |||
15520 | return Rotate; | |||
15521 | ||||
15522 | if (V2.isUndef()) { | |||
15523 | SmallVector<int, 8> RepeatedMask; | |||
15524 | if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) { | |||
15525 | // As this is a single-input shuffle, the repeated mask should be | |||
15526 | // a strictly valid v8i16 mask that we can pass through to the v8i16 | |||
15527 | // lowering to handle even the v32 case. | |||
15528 | return lowerV8I16GeneralSingleInputVectorShuffle( | |||
15529 | DL, MVT::v32i16, V1, RepeatedMask, Subtarget, DAG); | |||
15530 | } | |||
15531 | } | |||
15532 | ||||
15533 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask, | |||
15534 | Zeroable, Subtarget, DAG)) | |||
15535 | return Blend; | |||
15536 | ||||
15537 | if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB( | |||
15538 | DL, MVT::v32i16, Mask, V1, V2, Zeroable, Subtarget, DAG)) | |||
15539 | return PSHUFB; | |||
15540 | ||||
15541 | return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG); | |||
15542 | } | |||
15543 | ||||
15544 | /// Handle lowering of 64-lane 8-bit integer shuffles. | |||
15545 | static SDValue lowerV64I8VectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
15546 | const APInt &Zeroable, | |||
15547 | SDValue V1, SDValue V2, | |||
15548 | const X86Subtarget &Subtarget, | |||
15549 | SelectionDAG &DAG) { | |||
15550 | assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!")((V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V1.getSimpleValueType() == MVT::v64i8 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15550, __PRETTY_FUNCTION__)); | |||
15551 | assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!")((V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!" ) ? static_cast<void> (0) : __assert_fail ("V2.getSimpleValueType() == MVT::v64i8 && \"Bad operand type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15551, __PRETTY_FUNCTION__)); | |||
15552 | assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!")((Mask.size() == 64 && "Unexpected mask size for v64 shuffle!" ) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 64 && \"Unexpected mask size for v64 shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15552, __PRETTY_FUNCTION__)); | |||
15553 | assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!")((Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"We can only lower v64i8 with AVX-512-BWI!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15553, __PRETTY_FUNCTION__)); | |||
15554 | ||||
15555 | // Whenever we can lower this as a zext, that instruction is strictly faster | |||
15556 | // than any alternative. It also allows us to fold memory operands into the | |||
15557 | // shuffle in many cases. | |||
15558 | if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend( | |||
15559 | DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
15560 | return ZExt; | |||
15561 | ||||
15562 | // Use dedicated unpack instructions for masks that match their pattern. | |||
15563 | if (SDValue V = | |||
15564 | lowerVectorShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG)) | |||
15565 | return V; | |||
15566 | ||||
15567 | // Use dedicated pack instructions for masks that match their pattern. | |||
15568 | if (SDValue V = lowerVectorShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG, | |||
15569 | Subtarget)) | |||
15570 | return V; | |||
15571 | ||||
15572 | // Try to use shift instructions. | |||
15573 | if (SDValue Shift = lowerVectorShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask, | |||
15574 | Zeroable, Subtarget, DAG)) | |||
15575 | return Shift; | |||
15576 | ||||
15577 | // Try to use byte rotation instructions. | |||
15578 | if (SDValue Rotate = lowerVectorShuffleAsByteRotate( | |||
15579 | DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG)) | |||
15580 | return Rotate; | |||
15581 | ||||
15582 | if (SDValue PSHUFB = lowerVectorShuffleWithPSHUFB( | |||
15583 | DL, MVT::v64i8, Mask, V1, V2, Zeroable, Subtarget, DAG)) | |||
15584 | return PSHUFB; | |||
15585 | ||||
15586 | // VBMI can use VPERMV/VPERMV3 byte shuffles. | |||
15587 | if (Subtarget.hasVBMI()) | |||
15588 | return lowerVectorShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG); | |||
15589 | ||||
15590 | // Try to create an in-lane repeating shuffle mask and then shuffle the | |||
15591 | // results into the target lanes. | |||
15592 | if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute( | |||
15593 | DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG)) | |||
15594 | return V; | |||
15595 | ||||
15596 | if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask, | |||
15597 | Zeroable, Subtarget, DAG)) | |||
15598 | return Blend; | |||
15599 | ||||
15600 | // FIXME: Implement direct support for this type! | |||
15601 | return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG); | |||
15602 | } | |||
15603 | ||||
15604 | /// High-level routine to lower various 512-bit x86 vector shuffles. | |||
15605 | /// | |||
15606 | /// This routine either breaks down the specific type of a 512-bit x86 vector | |||
15607 | /// shuffle or splits it into two 256-bit shuffles and fuses the results back | |||
15608 | /// together based on the available instructions. | |||
15609 | static SDValue lower512BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
15610 | MVT VT, SDValue V1, SDValue V2, | |||
15611 | const APInt &Zeroable, | |||
15612 | const X86Subtarget &Subtarget, | |||
15613 | SelectionDAG &DAG) { | |||
15614 | assert(Subtarget.hasAVX512() &&((Subtarget.hasAVX512() && "Cannot lower 512-bit vectors w/ basic ISA!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"Cannot lower 512-bit vectors w/ basic ISA!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15615, __PRETTY_FUNCTION__)) | |||
15615 | "Cannot lower 512-bit vectors w/ basic ISA!")((Subtarget.hasAVX512() && "Cannot lower 512-bit vectors w/ basic ISA!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"Cannot lower 512-bit vectors w/ basic ISA!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15615, __PRETTY_FUNCTION__)); | |||
15616 | ||||
15617 | // If we have a single input to the zero element, insert that into V1 if we | |||
15618 | // can do so cheaply. | |||
15619 | int NumElts = Mask.size(); | |||
15620 | int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; }); | |||
15621 | ||||
15622 | if (NumV2Elements == 1 && Mask[0] >= NumElts) | |||
15623 | if (SDValue Insertion = lowerVectorShuffleAsElementInsertion( | |||
15624 | DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG)) | |||
15625 | return Insertion; | |||
15626 | ||||
15627 | // Handle special cases where the lower or upper half is UNDEF. | |||
15628 | if (SDValue V = | |||
15629 | lowerVectorShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG)) | |||
15630 | return V; | |||
15631 | ||||
15632 | // Check for being able to broadcast a single element. | |||
15633 | if (SDValue Broadcast = | |||
15634 | lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG)) | |||
15635 | return Broadcast; | |||
15636 | ||||
15637 | // Dispatch to each element type for lowering. If we don't have support for | |||
15638 | // specific element type shuffles at 512 bits, immediately split them and | |||
15639 | // lower them. Each lowering routine of a given type is allowed to assume that | |||
15640 | // the requisite ISA extensions for that element type are available. | |||
15641 | switch (VT.SimpleTy) { | |||
15642 | case MVT::v8f64: | |||
15643 | return lowerV8F64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15644 | case MVT::v16f32: | |||
15645 | return lowerV16F32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15646 | case MVT::v8i64: | |||
15647 | return lowerV8I64VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15648 | case MVT::v16i32: | |||
15649 | return lowerV16I32VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15650 | case MVT::v32i16: | |||
15651 | return lowerV32I16VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15652 | case MVT::v64i8: | |||
15653 | return lowerV64I8VectorShuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG); | |||
15654 | ||||
15655 | default: | |||
15656 | llvm_unreachable("Not a valid 512-bit x86 vector type!")::llvm::llvm_unreachable_internal("Not a valid 512-bit x86 vector type!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15656); | |||
15657 | } | |||
15658 | } | |||
15659 | ||||
15660 | // Determine if this shuffle can be implemented with a KSHIFT instruction. | |||
15661 | // Returns the shift amount if possible or -1 if not. This is a simplified | |||
15662 | // version of matchVectorShuffleAsShift. | |||
15663 | static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask, | |||
15664 | int MaskOffset, const APInt &Zeroable) { | |||
15665 | int Size = Mask.size(); | |||
15666 | ||||
15667 | auto CheckZeros = [&](int Shift, bool Left) { | |||
15668 | for (int j = 0; j < Shift; ++j) | |||
15669 | if (!Zeroable[j + (Left ? 0 : (Size - Shift))]) | |||
15670 | return false; | |||
15671 | ||||
15672 | return true; | |||
15673 | }; | |||
15674 | ||||
15675 | auto MatchShift = [&](int Shift, bool Left) { | |||
15676 | unsigned Pos = Left ? Shift : 0; | |||
15677 | unsigned Low = Left ? 0 : Shift; | |||
15678 | unsigned Len = Size - Shift; | |||
15679 | return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset); | |||
15680 | }; | |||
15681 | ||||
15682 | for (int Shift = 1; Shift != Size; ++Shift) | |||
15683 | for (bool Left : {true, false}) | |||
15684 | if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) { | |||
15685 | Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR; | |||
15686 | return Shift; | |||
15687 | } | |||
15688 | ||||
15689 | return -1; | |||
15690 | } | |||
15691 | ||||
15692 | ||||
15693 | // Lower vXi1 vector shuffles. | |||
15694 | // There is no a dedicated instruction on AVX-512 that shuffles the masks. | |||
15695 | // The only way to shuffle bits is to sign-extend the mask vector to SIMD | |||
15696 | // vector, shuffle and then truncate it back. | |||
15697 | static SDValue lower1BitVectorShuffle(const SDLoc &DL, ArrayRef<int> Mask, | |||
15698 | MVT VT, SDValue V1, SDValue V2, | |||
15699 | const APInt &Zeroable, | |||
15700 | const X86Subtarget &Subtarget, | |||
15701 | SelectionDAG &DAG) { | |||
15702 | assert(Subtarget.hasAVX512() &&((Subtarget.hasAVX512() && "Cannot lower 512-bit vectors w/o basic ISA!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"Cannot lower 512-bit vectors w/o basic ISA!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15703, __PRETTY_FUNCTION__)) | |||
15703 | "Cannot lower 512-bit vectors w/o basic ISA!")((Subtarget.hasAVX512() && "Cannot lower 512-bit vectors w/o basic ISA!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"Cannot lower 512-bit vectors w/o basic ISA!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15703, __PRETTY_FUNCTION__)); | |||
15704 | ||||
15705 | unsigned NumElts = Mask.size(); | |||
15706 | ||||
15707 | // Try to recognize shuffles that are just padding a subvector with zeros. | |||
15708 | unsigned SubvecElts = 0; | |||
15709 | for (int i = 0; i != (int)NumElts; ++i) { | |||
15710 | if (Mask[i] >= 0 && Mask[i] != i) | |||
15711 | break; | |||
15712 | ||||
15713 | ++SubvecElts; | |||
15714 | } | |||
15715 | assert(SubvecElts != NumElts && "Identity shuffle?")((SubvecElts != NumElts && "Identity shuffle?") ? static_cast <void> (0) : __assert_fail ("SubvecElts != NumElts && \"Identity shuffle?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15715, __PRETTY_FUNCTION__)); | |||
15716 | ||||
15717 | // Clip to a power 2. | |||
15718 | SubvecElts = PowerOf2Floor(SubvecElts); | |||
15719 | ||||
15720 | // Make sure the number of zeroable bits in the top at least covers the bits | |||
15721 | // not covered by the subvector. | |||
15722 | if (Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) { | |||
15723 | MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts); | |||
15724 | SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT, | |||
15725 | V1, DAG.getIntPtrConstant(0, DL)); | |||
15726 | return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, | |||
15727 | getZeroVector(VT, Subtarget, DAG, DL), | |||
15728 | Extract, DAG.getIntPtrConstant(0, DL)); | |||
15729 | } | |||
15730 | ||||
15731 | // Try to match KSHIFTs. | |||
15732 | // TODO: Support narrower than legal shifts by widening and extracting. | |||
15733 | if (NumElts >= 16 || (Subtarget.hasDQI() && NumElts == 8)) { | |||
15734 | unsigned Offset = 0; | |||
15735 | for (SDValue V : { V1, V2 }) { | |||
15736 | unsigned Opcode; | |||
15737 | int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable); | |||
15738 | if (ShiftAmt >= 0) | |||
15739 | return DAG.getNode(Opcode, DL, VT, V, | |||
15740 | DAG.getConstant(ShiftAmt, DL, MVT::i8)); | |||
15741 | Offset += NumElts; // Increment for next iteration. | |||
15742 | } | |||
15743 | } | |||
15744 | ||||
15745 | ||||
15746 | MVT ExtVT; | |||
15747 | switch (VT.SimpleTy) { | |||
15748 | default: | |||
15749 | llvm_unreachable("Expected a vector of i1 elements")::llvm::llvm_unreachable_internal("Expected a vector of i1 elements" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15749); | |||
15750 | case MVT::v2i1: | |||
15751 | ExtVT = MVT::v2i64; | |||
15752 | break; | |||
15753 | case MVT::v4i1: | |||
15754 | ExtVT = MVT::v4i32; | |||
15755 | break; | |||
15756 | case MVT::v8i1: | |||
15757 | // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit | |||
15758 | // shuffle. | |||
15759 | ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64; | |||
15760 | break; | |||
15761 | case MVT::v16i1: | |||
15762 | // Take 512-bit type, unless we are avoiding 512-bit types and have the | |||
15763 | // 256-bit operation available. | |||
15764 | ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16; | |||
15765 | break; | |||
15766 | case MVT::v32i1: | |||
15767 | // Take 512-bit type, unless we are avoiding 512-bit types and have the | |||
15768 | // 256-bit operation available. | |||
15769 | assert(Subtarget.hasBWI() && "Expected AVX512BW support")((Subtarget.hasBWI() && "Expected AVX512BW support") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected AVX512BW support\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15769, __PRETTY_FUNCTION__)); | |||
15770 | ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8; | |||
15771 | break; | |||
15772 | case MVT::v64i1: | |||
15773 | ExtVT = MVT::v64i8; | |||
15774 | break; | |||
15775 | } | |||
15776 | ||||
15777 | V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1); | |||
15778 | V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2); | |||
15779 | ||||
15780 | SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask); | |||
15781 | // i1 was sign extended we can use X86ISD::CVT2MASK. | |||
15782 | int NumElems = VT.getVectorNumElements(); | |||
15783 | if ((Subtarget.hasBWI() && (NumElems >= 32)) || | |||
15784 | (Subtarget.hasDQI() && (NumElems < 32))) | |||
15785 | return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT), | |||
15786 | Shuffle, ISD::SETGT); | |||
15787 | ||||
15788 | return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle); | |||
15789 | } | |||
15790 | ||||
15791 | /// Helper function that returns true if the shuffle mask should be | |||
15792 | /// commuted to improve canonicalization. | |||
15793 | static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) { | |||
15794 | int NumElements = Mask.size(); | |||
15795 | ||||
15796 | int NumV1Elements = 0, NumV2Elements = 0; | |||
15797 | for (int M : Mask) | |||
15798 | if (M < 0) | |||
15799 | continue; | |||
15800 | else if (M < NumElements) | |||
15801 | ++NumV1Elements; | |||
15802 | else | |||
15803 | ++NumV2Elements; | |||
15804 | ||||
15805 | // Commute the shuffle as needed such that more elements come from V1 than | |||
15806 | // V2. This allows us to match the shuffle pattern strictly on how many | |||
15807 | // elements come from V1 without handling the symmetric cases. | |||
15808 | if (NumV2Elements > NumV1Elements) | |||
15809 | return true; | |||
15810 | ||||
15811 | assert(NumV1Elements > 0 && "No V1 indices")((NumV1Elements > 0 && "No V1 indices") ? static_cast <void> (0) : __assert_fail ("NumV1Elements > 0 && \"No V1 indices\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15811, __PRETTY_FUNCTION__)); | |||
15812 | ||||
15813 | if (NumV2Elements == 0) | |||
15814 | return false; | |||
15815 | ||||
15816 | // When the number of V1 and V2 elements are the same, try to minimize the | |||
15817 | // number of uses of V2 in the low half of the vector. When that is tied, | |||
15818 | // ensure that the sum of indices for V1 is equal to or lower than the sum | |||
15819 | // indices for V2. When those are equal, try to ensure that the number of odd | |||
15820 | // indices for V1 is lower than the number of odd indices for V2. | |||
15821 | if (NumV1Elements == NumV2Elements) { | |||
15822 | int LowV1Elements = 0, LowV2Elements = 0; | |||
15823 | for (int M : Mask.slice(0, NumElements / 2)) | |||
15824 | if (M >= NumElements) | |||
15825 | ++LowV2Elements; | |||
15826 | else if (M >= 0) | |||
15827 | ++LowV1Elements; | |||
15828 | if (LowV2Elements > LowV1Elements) | |||
15829 | return true; | |||
15830 | if (LowV2Elements == LowV1Elements) { | |||
15831 | int SumV1Indices = 0, SumV2Indices = 0; | |||
15832 | for (int i = 0, Size = Mask.size(); i < Size; ++i) | |||
15833 | if (Mask[i] >= NumElements) | |||
15834 | SumV2Indices += i; | |||
15835 | else if (Mask[i] >= 0) | |||
15836 | SumV1Indices += i; | |||
15837 | if (SumV2Indices < SumV1Indices) | |||
15838 | return true; | |||
15839 | if (SumV2Indices == SumV1Indices) { | |||
15840 | int NumV1OddIndices = 0, NumV2OddIndices = 0; | |||
15841 | for (int i = 0, Size = Mask.size(); i < Size; ++i) | |||
15842 | if (Mask[i] >= NumElements) | |||
15843 | NumV2OddIndices += i % 2; | |||
15844 | else if (Mask[i] >= 0) | |||
15845 | NumV1OddIndices += i % 2; | |||
15846 | if (NumV2OddIndices < NumV1OddIndices) | |||
15847 | return true; | |||
15848 | } | |||
15849 | } | |||
15850 | } | |||
15851 | ||||
15852 | return false; | |||
15853 | } | |||
15854 | ||||
15855 | /// Top-level lowering for x86 vector shuffles. | |||
15856 | /// | |||
15857 | /// This handles decomposition, canonicalization, and lowering of all x86 | |||
15858 | /// vector shuffles. Most of the specific lowering strategies are encapsulated | |||
15859 | /// above in helper routines. The canonicalization attempts to widen shuffles | |||
15860 | /// to involve fewer lanes of wider elements, consolidate symmetric patterns | |||
15861 | /// s.t. only one of the two inputs needs to be tested, etc. | |||
15862 | static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget &Subtarget, | |||
15863 | SelectionDAG &DAG) { | |||
15864 | ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); | |||
15865 | ArrayRef<int> Mask = SVOp->getMask(); | |||
15866 | SDValue V1 = Op.getOperand(0); | |||
15867 | SDValue V2 = Op.getOperand(1); | |||
15868 | MVT VT = Op.getSimpleValueType(); | |||
15869 | int NumElements = VT.getVectorNumElements(); | |||
15870 | SDLoc DL(Op); | |||
15871 | bool Is1BitVector = (VT.getVectorElementType() == MVT::i1); | |||
15872 | ||||
15873 | assert((VT.getSizeInBits() != 64 || Is1BitVector) &&(((VT.getSizeInBits() != 64 || Is1BitVector) && "Can't lower MMX shuffles" ) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() != 64 || Is1BitVector) && \"Can't lower MMX shuffles\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15874, __PRETTY_FUNCTION__)) | |||
15874 | "Can't lower MMX shuffles")(((VT.getSizeInBits() != 64 || Is1BitVector) && "Can't lower MMX shuffles" ) ? static_cast<void> (0) : __assert_fail ("(VT.getSizeInBits() != 64 || Is1BitVector) && \"Can't lower MMX shuffles\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15874, __PRETTY_FUNCTION__)); | |||
15875 | ||||
15876 | bool V1IsUndef = V1.isUndef(); | |||
15877 | bool V2IsUndef = V2.isUndef(); | |||
15878 | if (V1IsUndef && V2IsUndef) | |||
15879 | return DAG.getUNDEF(VT); | |||
15880 | ||||
15881 | // When we create a shuffle node we put the UNDEF node to second operand, | |||
15882 | // but in some cases the first operand may be transformed to UNDEF. | |||
15883 | // In this case we should just commute the node. | |||
15884 | if (V1IsUndef) | |||
15885 | return DAG.getCommutedVectorShuffle(*SVOp); | |||
15886 | ||||
15887 | // Check for non-undef masks pointing at an undef vector and make the masks | |||
15888 | // undef as well. This makes it easier to match the shuffle based solely on | |||
15889 | // the mask. | |||
15890 | if (V2IsUndef) | |||
15891 | for (int M : Mask) | |||
15892 | if (M >= NumElements) { | |||
15893 | SmallVector<int, 8> NewMask(Mask.begin(), Mask.end()); | |||
15894 | for (int &M : NewMask) | |||
15895 | if (M >= NumElements) | |||
15896 | M = -1; | |||
15897 | return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask); | |||
15898 | } | |||
15899 | ||||
15900 | // Check for illegal shuffle mask element index values. | |||
15901 | int MaskUpperLimit = Mask.size() * (V2IsUndef ? 1 : 2); (void)MaskUpperLimit; | |||
15902 | assert(llvm::all_of(Mask,((llvm::all_of(Mask, [&](int M) { return -1 <= M && M < MaskUpperLimit; }) && "Out of bounds shuffle index" ) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(Mask, [&](int M) { return -1 <= M && M < MaskUpperLimit; }) && \"Out of bounds shuffle index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15904, __PRETTY_FUNCTION__)) | |||
15903 | [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&((llvm::all_of(Mask, [&](int M) { return -1 <= M && M < MaskUpperLimit; }) && "Out of bounds shuffle index" ) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(Mask, [&](int M) { return -1 <= M && M < MaskUpperLimit; }) && \"Out of bounds shuffle index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15904, __PRETTY_FUNCTION__)) | |||
15904 | "Out of bounds shuffle index")((llvm::all_of(Mask, [&](int M) { return -1 <= M && M < MaskUpperLimit; }) && "Out of bounds shuffle index" ) ? static_cast<void> (0) : __assert_fail ("llvm::all_of(Mask, [&](int M) { return -1 <= M && M < MaskUpperLimit; }) && \"Out of bounds shuffle index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15904, __PRETTY_FUNCTION__)); | |||
15905 | ||||
15906 | // We actually see shuffles that are entirely re-arrangements of a set of | |||
15907 | // zero inputs. This mostly happens while decomposing complex shuffles into | |||
15908 | // simple ones. Directly lower these as a buildvector of zeros. | |||
15909 | APInt Zeroable = computeZeroableShuffleElements(Mask, V1, V2); | |||
15910 | if (Zeroable.isAllOnesValue()) | |||
15911 | return getZeroVector(VT, Subtarget, DAG, DL); | |||
15912 | ||||
15913 | bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode()); | |||
15914 | ||||
15915 | // Create an alternative mask with info about zeroable elements. | |||
15916 | // Here we do not set undef elements as zeroable. | |||
15917 | SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end()); | |||
15918 | if (V2IsZero) { | |||
15919 | assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!")((!Zeroable.isNullValue() && "V2's non-undef elements are used?!" ) ? static_cast<void> (0) : __assert_fail ("!Zeroable.isNullValue() && \"V2's non-undef elements are used?!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15919, __PRETTY_FUNCTION__)); | |||
15920 | for (int i = 0; i != NumElements; ++i) | |||
15921 | if (Mask[i] != SM_SentinelUndef && Zeroable[i]) | |||
15922 | ZeroableMask[i] = SM_SentinelZero; | |||
15923 | } | |||
15924 | ||||
15925 | // Try to collapse shuffles into using a vector type with fewer elements but | |||
15926 | // wider element types. We cap this to not form integers or floating point | |||
15927 | // elements wider than 64 bits, but it might be interesting to form i128 | |||
15928 | // integers to handle flipping the low and high halves of AVX 256-bit vectors. | |||
15929 | SmallVector<int, 16> WidenedMask; | |||
15930 | if (VT.getScalarSizeInBits() < 64 && !Is1BitVector && | |||
15931 | canWidenShuffleElements(ZeroableMask, WidenedMask)) { | |||
15932 | // Shuffle mask widening should not interfere with a broadcast opportunity | |||
15933 | // by obfuscating the operands with bitcasts. | |||
15934 | // TODO: Avoid lowering directly from this top-level function: make this | |||
15935 | // a query (canLowerAsBroadcast) and defer lowering to the type-based calls. | |||
15936 | if (SDValue Broadcast = | |||
15937 | lowerVectorShuffleAsBroadcast(DL, VT, V1, V2, Mask, Subtarget, DAG)) | |||
15938 | return Broadcast; | |||
15939 | ||||
15940 | MVT NewEltVT = VT.isFloatingPoint() | |||
15941 | ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2) | |||
15942 | : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2); | |||
15943 | int NewNumElts = NumElements / 2; | |||
15944 | MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts); | |||
15945 | // Make sure that the new vector type is legal. For example, v2f64 isn't | |||
15946 | // legal on SSE1. | |||
15947 | if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) { | |||
15948 | if (V2IsZero) { | |||
15949 | // Modify the new Mask to take all zeros from the all-zero vector. | |||
15950 | // Choose indices that are blend-friendly. | |||
15951 | bool UsedZeroVector = false; | |||
15952 | assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&((find(WidenedMask, SM_SentinelZero) != WidenedMask.end() && "V2's non-undef elements are used?!") ? static_cast<void> (0) : __assert_fail ("find(WidenedMask, SM_SentinelZero) != WidenedMask.end() && \"V2's non-undef elements are used?!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15953, __PRETTY_FUNCTION__)) | |||
15953 | "V2's non-undef elements are used?!")((find(WidenedMask, SM_SentinelZero) != WidenedMask.end() && "V2's non-undef elements are used?!") ? static_cast<void> (0) : __assert_fail ("find(WidenedMask, SM_SentinelZero) != WidenedMask.end() && \"V2's non-undef elements are used?!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15953, __PRETTY_FUNCTION__)); | |||
15954 | for (int i = 0; i != NewNumElts; ++i) | |||
15955 | if (WidenedMask[i] == SM_SentinelZero) { | |||
15956 | WidenedMask[i] = i + NewNumElts; | |||
15957 | UsedZeroVector = true; | |||
15958 | } | |||
15959 | // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits | |||
15960 | // some elements to be undef. | |||
15961 | if (UsedZeroVector) | |||
15962 | V2 = getZeroVector(NewVT, Subtarget, DAG, DL); | |||
15963 | } | |||
15964 | V1 = DAG.getBitcast(NewVT, V1); | |||
15965 | V2 = DAG.getBitcast(NewVT, V2); | |||
15966 | return DAG.getBitcast( | |||
15967 | VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask)); | |||
15968 | } | |||
15969 | } | |||
15970 | ||||
15971 | // Commute the shuffle if it will improve canonicalization. | |||
15972 | if (canonicalizeShuffleMaskWithCommute(Mask)) | |||
15973 | return DAG.getCommutedVectorShuffle(*SVOp); | |||
15974 | ||||
15975 | if (SDValue V = | |||
15976 | lowerVectorShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget)) | |||
15977 | return V; | |||
15978 | ||||
15979 | // For each vector width, delegate to a specialized lowering routine. | |||
15980 | if (VT.is128BitVector()) | |||
15981 | return lower128BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, | |||
15982 | DAG); | |||
15983 | ||||
15984 | if (VT.is256BitVector()) | |||
15985 | return lower256BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, | |||
15986 | DAG); | |||
15987 | ||||
15988 | if (VT.is512BitVector()) | |||
15989 | return lower512BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, | |||
15990 | DAG); | |||
15991 | ||||
15992 | if (Is1BitVector) | |||
15993 | return lower1BitVectorShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, | |||
15994 | DAG); | |||
15995 | ||||
15996 | llvm_unreachable("Unimplemented!")::llvm::llvm_unreachable_internal("Unimplemented!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 15996); | |||
15997 | } | |||
15998 | ||||
15999 | /// Try to lower a VSELECT instruction to a vector shuffle. | |||
16000 | static SDValue lowerVSELECTtoVectorShuffle(SDValue Op, | |||
16001 | const X86Subtarget &Subtarget, | |||
16002 | SelectionDAG &DAG) { | |||
16003 | SDValue Cond = Op.getOperand(0); | |||
16004 | SDValue LHS = Op.getOperand(1); | |||
16005 | SDValue RHS = Op.getOperand(2); | |||
16006 | MVT VT = Op.getSimpleValueType(); | |||
16007 | ||||
16008 | // Only non-legal VSELECTs reach this lowering, convert those into generic | |||
16009 | // shuffles and re-use the shuffle lowering path for blends. | |||
16010 | SmallVector<int, 32> Mask; | |||
16011 | if (createShuffleMaskFromVSELECT(Mask, Cond)) | |||
16012 | return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask); | |||
16013 | ||||
16014 | return SDValue(); | |||
16015 | } | |||
16016 | ||||
16017 | SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const { | |||
16018 | SDValue Cond = Op.getOperand(0); | |||
16019 | SDValue LHS = Op.getOperand(1); | |||
16020 | SDValue RHS = Op.getOperand(2); | |||
16021 | ||||
16022 | // A vselect where all conditions and data are constants can be optimized into | |||
16023 | // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR(). | |||
16024 | if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) && | |||
16025 | ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) && | |||
16026 | ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) | |||
16027 | return SDValue(); | |||
16028 | ||||
16029 | // Try to lower this to a blend-style vector shuffle. This can handle all | |||
16030 | // constant condition cases. | |||
16031 | if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG)) | |||
16032 | return BlendOp; | |||
16033 | ||||
16034 | // If this VSELECT has a vector if i1 as a mask, it will be directly matched | |||
16035 | // with patterns on the mask registers on AVX-512. | |||
16036 | MVT CondVT = Cond.getSimpleValueType(); | |||
16037 | unsigned CondEltSize = Cond.getScalarValueSizeInBits(); | |||
16038 | if (CondEltSize == 1) | |||
16039 | return Op; | |||
16040 | ||||
16041 | // Variable blends are only legal from SSE4.1 onward. | |||
16042 | if (!Subtarget.hasSSE41()) | |||
16043 | return SDValue(); | |||
16044 | ||||
16045 | SDLoc dl(Op); | |||
16046 | MVT VT = Op.getSimpleValueType(); | |||
16047 | unsigned EltSize = VT.getScalarSizeInBits(); | |||
16048 | unsigned NumElts = VT.getVectorNumElements(); | |||
16049 | ||||
16050 | // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition | |||
16051 | // into an i1 condition so that we can use the mask-based 512-bit blend | |||
16052 | // instructions. | |||
16053 | if (VT.getSizeInBits() == 512) { | |||
16054 | // Build a mask by testing the condition against zero. | |||
16055 | MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts); | |||
16056 | SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond, | |||
16057 | DAG.getConstant(0, dl, CondVT), | |||
16058 | ISD::SETNE); | |||
16059 | // Now return a new VSELECT using the mask. | |||
16060 | return DAG.getSelect(dl, VT, Mask, LHS, RHS); | |||
16061 | } | |||
16062 | ||||
16063 | // SEXT/TRUNC cases where the mask doesn't match the destination size. | |||
16064 | if (CondEltSize != EltSize) { | |||
16065 | // If we don't have a sign splat, rely on the expansion. | |||
16066 | if (CondEltSize != DAG.ComputeNumSignBits(Cond)) | |||
16067 | return SDValue(); | |||
16068 | ||||
16069 | MVT NewCondSVT = MVT::getIntegerVT(EltSize); | |||
16070 | MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts); | |||
16071 | Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT); | |||
16072 | return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS); | |||
16073 | } | |||
16074 | ||||
16075 | // Only some types will be legal on some subtargets. If we can emit a legal | |||
16076 | // VSELECT-matching blend, return Op, and but if we need to expand, return | |||
16077 | // a null value. | |||
16078 | switch (VT.SimpleTy) { | |||
16079 | default: | |||
16080 | // Most of the vector types have blends past SSE4.1. | |||
16081 | return Op; | |||
16082 | ||||
16083 | case MVT::v32i8: | |||
16084 | // The byte blends for AVX vectors were introduced only in AVX2. | |||
16085 | if (Subtarget.hasAVX2()) | |||
16086 | return Op; | |||
16087 | ||||
16088 | return SDValue(); | |||
16089 | ||||
16090 | case MVT::v8i16: | |||
16091 | case MVT::v16i16: { | |||
16092 | // Bitcast everything to the vXi8 type and use a vXi8 vselect. | |||
16093 | MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2); | |||
16094 | Cond = DAG.getBitcast(CastVT, Cond); | |||
16095 | LHS = DAG.getBitcast(CastVT, LHS); | |||
16096 | RHS = DAG.getBitcast(CastVT, RHS); | |||
16097 | SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS); | |||
16098 | return DAG.getBitcast(VT, Select); | |||
16099 | } | |||
16100 | } | |||
16101 | } | |||
16102 | ||||
16103 | static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) { | |||
16104 | MVT VT = Op.getSimpleValueType(); | |||
16105 | SDLoc dl(Op); | |||
16106 | ||||
16107 | if (!Op.getOperand(0).getSimpleValueType().is128BitVector()) | |||
16108 | return SDValue(); | |||
16109 | ||||
16110 | if (VT.getSizeInBits() == 8) { | |||
16111 | SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, | |||
16112 | Op.getOperand(0), Op.getOperand(1)); | |||
16113 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract); | |||
16114 | } | |||
16115 | ||||
16116 | if (VT == MVT::f32) { | |||
16117 | // EXTRACTPS outputs to a GPR32 register which will require a movd to copy | |||
16118 | // the result back to FR32 register. It's only worth matching if the | |||
16119 | // result has a single use which is a store or a bitcast to i32. And in | |||
16120 | // the case of a store, it's not worth it if the index is a constant 0, | |||
16121 | // because a MOVSSmr can be used instead, which is smaller and faster. | |||
16122 | if (!Op.hasOneUse()) | |||
16123 | return SDValue(); | |||
16124 | SDNode *User = *Op.getNode()->use_begin(); | |||
16125 | if ((User->getOpcode() != ISD::STORE || | |||
16126 | isNullConstant(Op.getOperand(1))) && | |||
16127 | (User->getOpcode() != ISD::BITCAST || | |||
16128 | User->getValueType(0) != MVT::i32)) | |||
16129 | return SDValue(); | |||
16130 | SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, | |||
16131 | DAG.getBitcast(MVT::v4i32, Op.getOperand(0)), | |||
16132 | Op.getOperand(1)); | |||
16133 | return DAG.getBitcast(MVT::f32, Extract); | |||
16134 | } | |||
16135 | ||||
16136 | if (VT == MVT::i32 || VT == MVT::i64) { | |||
16137 | // ExtractPS/pextrq works with constant index. | |||
16138 | if (isa<ConstantSDNode>(Op.getOperand(1))) | |||
16139 | return Op; | |||
16140 | } | |||
16141 | ||||
16142 | return SDValue(); | |||
16143 | } | |||
16144 | ||||
16145 | /// Extract one bit from mask vector, like v16i1 or v8i1. | |||
16146 | /// AVX-512 feature. | |||
16147 | static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG, | |||
16148 | const X86Subtarget &Subtarget) { | |||
16149 | SDValue Vec = Op.getOperand(0); | |||
16150 | SDLoc dl(Vec); | |||
16151 | MVT VecVT = Vec.getSimpleValueType(); | |||
16152 | SDValue Idx = Op.getOperand(1); | |||
16153 | MVT EltVT = Op.getSimpleValueType(); | |||
16154 | ||||
16155 | assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&(((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI( )) && "Unexpected vector type in ExtractBitFromMaskVector" ) ? static_cast<void> (0) : __assert_fail ("(VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) && \"Unexpected vector type in ExtractBitFromMaskVector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16156, __PRETTY_FUNCTION__)) | |||
16156 | "Unexpected vector type in ExtractBitFromMaskVector")(((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI( )) && "Unexpected vector type in ExtractBitFromMaskVector" ) ? static_cast<void> (0) : __assert_fail ("(VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) && \"Unexpected vector type in ExtractBitFromMaskVector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16156, __PRETTY_FUNCTION__)); | |||
16157 | ||||
16158 | // variable index can't be handled in mask registers, | |||
16159 | // extend vector to VR512/128 | |||
16160 | if (!isa<ConstantSDNode>(Idx)) { | |||
16161 | unsigned NumElts = VecVT.getVectorNumElements(); | |||
16162 | // Extending v8i1/v16i1 to 512-bit get better performance on KNL | |||
16163 | // than extending to 128/256bit. | |||
16164 | MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8; | |||
16165 | MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts); | |||
16166 | SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec); | |||
16167 | SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx); | |||
16168 | return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt); | |||
16169 | } | |||
16170 | ||||
16171 | unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); | |||
16172 | ||||
16173 | // If the kshift instructions of the correct width aren't natively supported | |||
16174 | // then we need to promote the vector to the native size to get the correct | |||
16175 | // zeroing behavior. | |||
16176 | if (VecVT.getVectorNumElements() < 16) { | |||
16177 | VecVT = MVT::v16i1; | |||
16178 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1, | |||
16179 | DAG.getUNDEF(VecVT), Vec, | |||
16180 | DAG.getIntPtrConstant(0, dl)); | |||
16181 | } | |||
16182 | ||||
16183 | // Extracts from element 0 are always allowed. | |||
16184 | if (IdxVal != 0) { | |||
16185 | // Use kshiftr instruction to move to the lower element. | |||
16186 | Vec = DAG.getNode(X86ISD::KSHIFTR, dl, VecVT, Vec, | |||
16187 | DAG.getConstant(IdxVal, dl, MVT::i8)); | |||
16188 | } | |||
16189 | ||||
16190 | // Shrink to v16i1 since that's always legal. | |||
16191 | if (VecVT.getVectorNumElements() > 16) { | |||
16192 | VecVT = MVT::v16i1; | |||
16193 | Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VecVT, Vec, | |||
16194 | DAG.getIntPtrConstant(0, dl)); | |||
16195 | } | |||
16196 | ||||
16197 | // Convert to a bitcast+aext/trunc. | |||
16198 | MVT CastVT = MVT::getIntegerVT(VecVT.getVectorNumElements()); | |||
16199 | return DAG.getAnyExtOrTrunc(DAG.getBitcast(CastVT, Vec), dl, EltVT); | |||
16200 | } | |||
16201 | ||||
16202 | SDValue | |||
16203 | X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, | |||
16204 | SelectionDAG &DAG) const { | |||
16205 | SDLoc dl(Op); | |||
16206 | SDValue Vec = Op.getOperand(0); | |||
16207 | MVT VecVT = Vec.getSimpleValueType(); | |||
16208 | SDValue Idx = Op.getOperand(1); | |||
16209 | ||||
16210 | if (VecVT.getVectorElementType() == MVT::i1) | |||
16211 | return ExtractBitFromMaskVector(Op, DAG, Subtarget); | |||
16212 | ||||
16213 | if (!isa<ConstantSDNode>(Idx)) { | |||
16214 | // Its more profitable to go through memory (1 cycles throughput) | |||
16215 | // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput) | |||
16216 | // IACA tool was used to get performance estimation | |||
16217 | // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer) | |||
16218 | // | |||
16219 | // example : extractelement <16 x i8> %a, i32 %i | |||
16220 | // | |||
16221 | // Block Throughput: 3.00 Cycles | |||
16222 | // Throughput Bottleneck: Port5 | |||
16223 | // | |||
16224 | // | Num Of | Ports pressure in cycles | | | |||
16225 | // | Uops | 0 - DV | 5 | 6 | 7 | | | |||
16226 | // --------------------------------------------- | |||
16227 | // | 1 | | 1.0 | | | CP | vmovd xmm1, edi | |||
16228 | // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1 | |||
16229 | // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0 | |||
16230 | // Total Num Of Uops: 4 | |||
16231 | // | |||
16232 | // | |||
16233 | // Block Throughput: 1.00 Cycles | |||
16234 | // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4 | |||
16235 | // | |||
16236 | // | | Ports pressure in cycles | | | |||
16237 | // |Uops| 1 | 2 - D |3 - D | 4 | 5 | | | |||
16238 | // --------------------------------------------------------- | |||
16239 | // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0 | |||
16240 | // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18] | |||
16241 | // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1] | |||
16242 | // Total Num Of Uops: 4 | |||
16243 | ||||
16244 | return SDValue(); | |||
16245 | } | |||
16246 | ||||
16247 | unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); | |||
16248 | ||||
16249 | // If this is a 256-bit vector result, first extract the 128-bit vector and | |||
16250 | // then extract the element from the 128-bit vector. | |||
16251 | if (VecVT.is256BitVector() || VecVT.is512BitVector()) { | |||
16252 | // Get the 128-bit vector. | |||
16253 | Vec = extract128BitVector(Vec, IdxVal, DAG, dl); | |||
16254 | MVT EltVT = VecVT.getVectorElementType(); | |||
16255 | ||||
16256 | unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits(); | |||
16257 | assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2")((isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(ElemsPerChunk) && \"Elements per chunk not power of 2\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16257, __PRETTY_FUNCTION__)); | |||
16258 | ||||
16259 | // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2 | |||
16260 | // this can be done with a mask. | |||
16261 | IdxVal &= ElemsPerChunk - 1; | |||
16262 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec, | |||
16263 | DAG.getConstant(IdxVal, dl, MVT::i32)); | |||
16264 | } | |||
16265 | ||||
16266 | assert(VecVT.is128BitVector() && "Unexpected vector length")((VecVT.is128BitVector() && "Unexpected vector length" ) ? static_cast<void> (0) : __assert_fail ("VecVT.is128BitVector() && \"Unexpected vector length\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16266, __PRETTY_FUNCTION__)); | |||
16267 | ||||
16268 | MVT VT = Op.getSimpleValueType(); | |||
16269 | ||||
16270 | if (VT.getSizeInBits() == 16) { | |||
16271 | // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless | |||
16272 | // we're going to zero extend the register or fold the store (SSE41 only). | |||
16273 | if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) && | |||
16274 | !(Subtarget.hasSSE41() && MayFoldIntoStore(Op))) | |||
16275 | return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, | |||
16276 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, | |||
16277 | DAG.getBitcast(MVT::v4i32, Vec), Idx)); | |||
16278 | ||||
16279 | // Transform it so it match pextrw which produces a 32-bit result. | |||
16280 | SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, | |||
16281 | Op.getOperand(0), Op.getOperand(1)); | |||
16282 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract); | |||
16283 | } | |||
16284 | ||||
16285 | if (Subtarget.hasSSE41()) | |||
16286 | if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG)) | |||
16287 | return Res; | |||
16288 | ||||
16289 | // TODO: We only extract a single element from v16i8, we can probably afford | |||
16290 | // to be more aggressive here before using the default approach of spilling to | |||
16291 | // stack. | |||
16292 | if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) { | |||
16293 | // Extract either the lowest i32 or any i16, and extract the sub-byte. | |||
16294 | int DWordIdx = IdxVal / 4; | |||
16295 | if (DWordIdx == 0) { | |||
16296 | SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, | |||
16297 | DAG.getBitcast(MVT::v4i32, Vec), | |||
16298 | DAG.getIntPtrConstant(DWordIdx, dl)); | |||
16299 | int ShiftVal = (IdxVal % 4) * 8; | |||
16300 | if (ShiftVal != 0) | |||
16301 | Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res, | |||
16302 | DAG.getConstant(ShiftVal, dl, MVT::i8)); | |||
16303 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Res); | |||
16304 | } | |||
16305 | ||||
16306 | int WordIdx = IdxVal / 2; | |||
16307 | SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, | |||
16308 | DAG.getBitcast(MVT::v8i16, Vec), | |||
16309 | DAG.getIntPtrConstant(WordIdx, dl)); | |||
16310 | int ShiftVal = (IdxVal % 2) * 8; | |||
16311 | if (ShiftVal != 0) | |||
16312 | Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res, | |||
16313 | DAG.getConstant(ShiftVal, dl, MVT::i8)); | |||
16314 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Res); | |||
16315 | } | |||
16316 | ||||
16317 | if (VT.getSizeInBits() == 32) { | |||
16318 | if (IdxVal == 0) | |||
16319 | return Op; | |||
16320 | ||||
16321 | // SHUFPS the element to the lowest double word, then movss. | |||
16322 | int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 }; | |||
16323 | Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask); | |||
16324 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, | |||
16325 | DAG.getIntPtrConstant(0, dl)); | |||
16326 | } | |||
16327 | ||||
16328 | if (VT.getSizeInBits() == 64) { | |||
16329 | // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b | |||
16330 | // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught | |||
16331 | // to match extract_elt for f64. | |||
16332 | if (IdxVal == 0) | |||
16333 | return Op; | |||
16334 | ||||
16335 | // UNPCKHPD the element to the lowest double word, then movsd. | |||
16336 | // Note if the lower 64 bits of the result of the UNPCKHPD is then stored | |||
16337 | // to a f64mem, the whole operation is folded into a single MOVHPDmr. | |||
16338 | int Mask[2] = { 1, -1 }; | |||
16339 | Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask); | |||
16340 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec, | |||
16341 | DAG.getIntPtrConstant(0, dl)); | |||
16342 | } | |||
16343 | ||||
16344 | return SDValue(); | |||
16345 | } | |||
16346 | ||||
16347 | /// Insert one bit to mask vector, like v16i1 or v8i1. | |||
16348 | /// AVX-512 feature. | |||
16349 | static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG, | |||
16350 | const X86Subtarget &Subtarget) { | |||
16351 | SDLoc dl(Op); | |||
16352 | SDValue Vec = Op.getOperand(0); | |||
16353 | SDValue Elt = Op.getOperand(1); | |||
16354 | SDValue Idx = Op.getOperand(2); | |||
16355 | MVT VecVT = Vec.getSimpleValueType(); | |||
16356 | ||||
16357 | if (!isa<ConstantSDNode>(Idx)) { | |||
16358 | // Non constant index. Extend source and destination, | |||
16359 | // insert element and then truncate the result. | |||
16360 | unsigned NumElts = VecVT.getVectorNumElements(); | |||
16361 | MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8; | |||
16362 | MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts); | |||
16363 | SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT, | |||
16364 | DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec), | |||
16365 | DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx); | |||
16366 | return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp); | |||
16367 | } | |||
16368 | ||||
16369 | // Copy into a k-register, extract to v1i1 and insert_subvector. | |||
16370 | SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt); | |||
16371 | ||||
16372 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, | |||
16373 | Op.getOperand(2)); | |||
16374 | } | |||
16375 | ||||
16376 | SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, | |||
16377 | SelectionDAG &DAG) const { | |||
16378 | MVT VT = Op.getSimpleValueType(); | |||
16379 | MVT EltVT = VT.getVectorElementType(); | |||
16380 | unsigned NumElts = VT.getVectorNumElements(); | |||
16381 | ||||
16382 | if (EltVT == MVT::i1) | |||
16383 | return InsertBitToMaskVector(Op, DAG, Subtarget); | |||
16384 | ||||
16385 | SDLoc dl(Op); | |||
16386 | SDValue N0 = Op.getOperand(0); | |||
16387 | SDValue N1 = Op.getOperand(1); | |||
16388 | SDValue N2 = Op.getOperand(2); | |||
16389 | if (!isa<ConstantSDNode>(N2)) | |||
16390 | return SDValue(); | |||
16391 | auto *N2C = cast<ConstantSDNode>(N2); | |||
16392 | unsigned IdxVal = N2C->getZExtValue(); | |||
16393 | ||||
16394 | bool IsZeroElt = X86::isZeroNode(N1); | |||
16395 | bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1); | |||
16396 | ||||
16397 | // If we are inserting a element, see if we can do this more efficiently with | |||
16398 | // a blend shuffle with a rematerializable vector than a costly integer | |||
16399 | // insertion. | |||
16400 | if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() && | |||
16401 | 16 <= EltVT.getSizeInBits()) { | |||
16402 | SmallVector<int, 8> BlendMask; | |||
16403 | for (unsigned i = 0; i != NumElts; ++i) | |||
16404 | BlendMask.push_back(i == IdxVal ? i + NumElts : i); | |||
16405 | SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl) | |||
16406 | : getOnesVector(VT, DAG, dl); | |||
16407 | return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask); | |||
16408 | } | |||
16409 | ||||
16410 | // If the vector is wider than 128 bits, extract the 128-bit subvector, insert | |||
16411 | // into that, and then insert the subvector back into the result. | |||
16412 | if (VT.is256BitVector() || VT.is512BitVector()) { | |||
16413 | // With a 256-bit vector, we can insert into the zero element efficiently | |||
16414 | // using a blend if we have AVX or AVX2 and the right data type. | |||
16415 | if (VT.is256BitVector() && IdxVal == 0) { | |||
16416 | // TODO: It is worthwhile to cast integer to floating point and back | |||
16417 | // and incur a domain crossing penalty if that's what we'll end up | |||
16418 | // doing anyway after extracting to a 128-bit vector. | |||
16419 | if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) || | |||
16420 | (Subtarget.hasAVX2() && EltVT == MVT::i32)) { | |||
16421 | SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1); | |||
16422 | N2 = DAG.getIntPtrConstant(1, dl); | |||
16423 | return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2); | |||
16424 | } | |||
16425 | } | |||
16426 | ||||
16427 | // Get the desired 128-bit vector chunk. | |||
16428 | SDValue V = extract128BitVector(N0, IdxVal, DAG, dl); | |||
16429 | ||||
16430 | // Insert the element into the desired chunk. | |||
16431 | unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits(); | |||
16432 | assert(isPowerOf2_32(NumEltsIn128))((isPowerOf2_32(NumEltsIn128)) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(NumEltsIn128)", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16432, __PRETTY_FUNCTION__)); | |||
16433 | // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo. | |||
16434 | unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1); | |||
16435 | ||||
16436 | V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1, | |||
16437 | DAG.getConstant(IdxIn128, dl, MVT::i32)); | |||
16438 | ||||
16439 | // Insert the changed part back into the bigger vector | |||
16440 | return insert128BitVector(N0, V, IdxVal, DAG, dl); | |||
16441 | } | |||
16442 | assert(VT.is128BitVector() && "Only 128-bit vector types should be left!")((VT.is128BitVector() && "Only 128-bit vector types should be left!" ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only 128-bit vector types should be left!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16442, __PRETTY_FUNCTION__)); | |||
16443 | ||||
16444 | // Transform it so it match pinsr{b,w} which expects a GR32 as its second | |||
16445 | // argument. SSE41 required for pinsrb. | |||
16446 | if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) { | |||
16447 | unsigned Opc; | |||
16448 | if (VT == MVT::v8i16) { | |||
16449 | assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW")((Subtarget.hasSSE2() && "SSE2 required for PINSRW") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"SSE2 required for PINSRW\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16449, __PRETTY_FUNCTION__)); | |||
16450 | Opc = X86ISD::PINSRW; | |||
16451 | } else { | |||
16452 | assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector")((VT == MVT::v16i8 && "PINSRB requires v16i8 vector") ? static_cast<void> (0) : __assert_fail ("VT == MVT::v16i8 && \"PINSRB requires v16i8 vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16452, __PRETTY_FUNCTION__)); | |||
16453 | assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB")((Subtarget.hasSSE41() && "SSE41 required for PINSRB" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE41() && \"SSE41 required for PINSRB\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16453, __PRETTY_FUNCTION__)); | |||
16454 | Opc = X86ISD::PINSRB; | |||
16455 | } | |||
16456 | ||||
16457 | if (N1.getValueType() != MVT::i32) | |||
16458 | N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1); | |||
16459 | if (N2.getValueType() != MVT::i32) | |||
16460 | N2 = DAG.getIntPtrConstant(IdxVal, dl); | |||
16461 | return DAG.getNode(Opc, dl, VT, N0, N1, N2); | |||
16462 | } | |||
16463 | ||||
16464 | if (Subtarget.hasSSE41()) { | |||
16465 | if (EltVT == MVT::f32) { | |||
16466 | // Bits [7:6] of the constant are the source select. This will always be | |||
16467 | // zero here. The DAG Combiner may combine an extract_elt index into | |||
16468 | // these bits. For example (insert (extract, 3), 2) could be matched by | |||
16469 | // putting the '3' into bits [7:6] of X86ISD::INSERTPS. | |||
16470 | // Bits [5:4] of the constant are the destination select. This is the | |||
16471 | // value of the incoming immediate. | |||
16472 | // Bits [3:0] of the constant are the zero mask. The DAG Combiner may | |||
16473 | // combine either bitwise AND or insert of float 0.0 to set these bits. | |||
16474 | ||||
16475 | bool MinSize = DAG.getMachineFunction().getFunction().optForMinSize(); | |||
16476 | if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) { | |||
16477 | // If this is an insertion of 32-bits into the low 32-bits of | |||
16478 | // a vector, we prefer to generate a blend with immediate rather | |||
16479 | // than an insertps. Blends are simpler operations in hardware and so | |||
16480 | // will always have equal or better performance than insertps. | |||
16481 | // But if optimizing for size and there's a load folding opportunity, | |||
16482 | // generate insertps because blendps does not have a 32-bit memory | |||
16483 | // operand form. | |||
16484 | N2 = DAG.getIntPtrConstant(1, dl); | |||
16485 | N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); | |||
16486 | return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1, N2); | |||
16487 | } | |||
16488 | N2 = DAG.getIntPtrConstant(IdxVal << 4, dl); | |||
16489 | // Create this as a scalar to vector.. | |||
16490 | N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1); | |||
16491 | return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2); | |||
16492 | } | |||
16493 | ||||
16494 | // PINSR* works with constant index. | |||
16495 | if (EltVT == MVT::i32 || EltVT == MVT::i64) | |||
16496 | return Op; | |||
16497 | } | |||
16498 | ||||
16499 | return SDValue(); | |||
16500 | } | |||
16501 | ||||
16502 | static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget, | |||
16503 | SelectionDAG &DAG) { | |||
16504 | SDLoc dl(Op); | |||
16505 | MVT OpVT = Op.getSimpleValueType(); | |||
16506 | ||||
16507 | // It's always cheaper to replace a xor+movd with xorps and simplifies further | |||
16508 | // combines. | |||
16509 | if (X86::isZeroNode(Op.getOperand(0))) | |||
16510 | return getZeroVector(OpVT, Subtarget, DAG, dl); | |||
16511 | ||||
16512 | // If this is a 256-bit vector result, first insert into a 128-bit | |||
16513 | // vector and then insert into the 256-bit vector. | |||
16514 | if (!OpVT.is128BitVector()) { | |||
16515 | // Insert into a 128-bit vector. | |||
16516 | unsigned SizeFactor = OpVT.getSizeInBits() / 128; | |||
16517 | MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(), | |||
16518 | OpVT.getVectorNumElements() / SizeFactor); | |||
16519 | ||||
16520 | Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0)); | |||
16521 | ||||
16522 | // Insert the 128-bit vector. | |||
16523 | return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl); | |||
16524 | } | |||
16525 | assert(OpVT.is128BitVector() && "Expected an SSE type!")((OpVT.is128BitVector() && "Expected an SSE type!") ? static_cast<void> (0) : __assert_fail ("OpVT.is128BitVector() && \"Expected an SSE type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16525, __PRETTY_FUNCTION__)); | |||
16526 | ||||
16527 | // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen. | |||
16528 | if (OpVT == MVT::v4i32) | |||
16529 | return Op; | |||
16530 | ||||
16531 | SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); | |||
16532 | return DAG.getBitcast( | |||
16533 | OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt)); | |||
16534 | } | |||
16535 | ||||
16536 | // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a | |||
16537 | // simple superregister reference or explicit instructions to insert | |||
16538 | // the upper bits of a vector. | |||
16539 | static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, | |||
16540 | SelectionDAG &DAG) { | |||
16541 | assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1)((Op.getSimpleValueType().getVectorElementType() == MVT::i1) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().getVectorElementType() == MVT::i1" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16541, __PRETTY_FUNCTION__)); | |||
16542 | ||||
16543 | return insert1BitVector(Op, DAG, Subtarget); | |||
16544 | } | |||
16545 | ||||
16546 | static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget, | |||
16547 | SelectionDAG &DAG) { | |||
16548 | assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&((Op.getSimpleValueType().getVectorElementType() == MVT::i1 && "Only vXi1 extract_subvectors need custom lowering") ? static_cast <void> (0) : __assert_fail ("Op.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Only vXi1 extract_subvectors need custom lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16549, __PRETTY_FUNCTION__)) | |||
16549 | "Only vXi1 extract_subvectors need custom lowering")((Op.getSimpleValueType().getVectorElementType() == MVT::i1 && "Only vXi1 extract_subvectors need custom lowering") ? static_cast <void> (0) : __assert_fail ("Op.getSimpleValueType().getVectorElementType() == MVT::i1 && \"Only vXi1 extract_subvectors need custom lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16549, __PRETTY_FUNCTION__)); | |||
16550 | ||||
16551 | SDLoc dl(Op); | |||
16552 | SDValue Vec = Op.getOperand(0); | |||
16553 | SDValue Idx = Op.getOperand(1); | |||
16554 | ||||
16555 | if (!isa<ConstantSDNode>(Idx)) | |||
16556 | return SDValue(); | |||
16557 | ||||
16558 | unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue(); | |||
16559 | if (IdxVal == 0) // the operation is legal | |||
16560 | return Op; | |||
16561 | ||||
16562 | MVT VecVT = Vec.getSimpleValueType(); | |||
16563 | unsigned NumElems = VecVT.getVectorNumElements(); | |||
16564 | ||||
16565 | // Extend to natively supported kshift. | |||
16566 | MVT WideVecVT = VecVT; | |||
16567 | if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) { | |||
16568 | WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1; | |||
16569 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT, | |||
16570 | DAG.getUNDEF(WideVecVT), Vec, | |||
16571 | DAG.getIntPtrConstant(0, dl)); | |||
16572 | } | |||
16573 | ||||
16574 | // Shift to the LSB. | |||
16575 | Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec, | |||
16576 | DAG.getConstant(IdxVal, dl, MVT::i8)); | |||
16577 | ||||
16578 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec, | |||
16579 | DAG.getIntPtrConstant(0, dl)); | |||
16580 | } | |||
16581 | ||||
16582 | // Returns the appropriate wrapper opcode for a global reference. | |||
16583 | unsigned X86TargetLowering::getGlobalWrapperKind( | |||
16584 | const GlobalValue *GV, const unsigned char OpFlags) const { | |||
16585 | // References to absolute symbols are never PC-relative. | |||
16586 | if (GV && GV->isAbsoluteSymbolRef()) | |||
16587 | return X86ISD::Wrapper; | |||
16588 | ||||
16589 | CodeModel::Model M = getTargetMachine().getCodeModel(); | |||
16590 | if (Subtarget.isPICStyleRIPRel() && | |||
16591 | (M == CodeModel::Small || M == CodeModel::Kernel)) | |||
16592 | return X86ISD::WrapperRIP; | |||
16593 | ||||
16594 | // GOTPCREL references must always use RIP. | |||
16595 | if (OpFlags == X86II::MO_GOTPCREL) | |||
16596 | return X86ISD::WrapperRIP; | |||
16597 | ||||
16598 | return X86ISD::Wrapper; | |||
16599 | } | |||
16600 | ||||
16601 | // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as | |||
16602 | // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is | |||
16603 | // one of the above mentioned nodes. It has to be wrapped because otherwise | |||
16604 | // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only | |||
16605 | // be used to form addressing mode. These wrapped nodes will be selected | |||
16606 | // into MOV32ri. | |||
16607 | SDValue | |||
16608 | X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const { | |||
16609 | ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); | |||
16610 | ||||
16611 | // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the | |||
16612 | // global base reg. | |||
16613 | unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr); | |||
16614 | ||||
16615 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
16616 | SDValue Result = DAG.getTargetConstantPool( | |||
16617 | CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(), OpFlag); | |||
16618 | SDLoc DL(CP); | |||
16619 | Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result); | |||
16620 | // With PIC, the address is actually $g + Offset. | |||
16621 | if (OpFlag) { | |||
16622 | Result = | |||
16623 | DAG.getNode(ISD::ADD, DL, PtrVT, | |||
16624 | DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result); | |||
16625 | } | |||
16626 | ||||
16627 | return Result; | |||
16628 | } | |||
16629 | ||||
16630 | SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { | |||
16631 | JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); | |||
16632 | ||||
16633 | // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the | |||
16634 | // global base reg. | |||
16635 | unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr); | |||
16636 | ||||
16637 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
16638 | SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag); | |||
16639 | SDLoc DL(JT); | |||
16640 | Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result); | |||
16641 | ||||
16642 | // With PIC, the address is actually $g + Offset. | |||
16643 | if (OpFlag) | |||
16644 | Result = | |||
16645 | DAG.getNode(ISD::ADD, DL, PtrVT, | |||
16646 | DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result); | |||
16647 | ||||
16648 | return Result; | |||
16649 | } | |||
16650 | ||||
16651 | SDValue | |||
16652 | X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const { | |||
16653 | const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol(); | |||
16654 | ||||
16655 | // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the | |||
16656 | // global base reg. | |||
16657 | const Module *Mod = DAG.getMachineFunction().getFunction().getParent(); | |||
16658 | unsigned char OpFlag = Subtarget.classifyGlobalReference(nullptr, *Mod); | |||
16659 | ||||
16660 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
16661 | SDValue Result = DAG.getTargetExternalSymbol(Sym, PtrVT, OpFlag); | |||
16662 | ||||
16663 | SDLoc DL(Op); | |||
16664 | Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result); | |||
16665 | ||||
16666 | // With PIC, the address is actually $g + Offset. | |||
16667 | if (OpFlag) { | |||
16668 | Result = | |||
16669 | DAG.getNode(ISD::ADD, DL, PtrVT, | |||
16670 | DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result); | |||
16671 | } | |||
16672 | ||||
16673 | // For symbols that require a load from a stub to get the address, emit the | |||
16674 | // load. | |||
16675 | if (isGlobalStubReference(OpFlag)) | |||
16676 | Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, | |||
16677 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
16678 | ||||
16679 | return Result; | |||
16680 | } | |||
16681 | ||||
16682 | SDValue | |||
16683 | X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const { | |||
16684 | // Create the TargetBlockAddressAddress node. | |||
16685 | unsigned char OpFlags = | |||
16686 | Subtarget.classifyBlockAddressReference(); | |||
16687 | const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); | |||
16688 | int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset(); | |||
16689 | SDLoc dl(Op); | |||
16690 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
16691 | SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags); | |||
16692 | Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result); | |||
16693 | ||||
16694 | // With PIC, the address is actually $g + Offset. | |||
16695 | if (isGlobalRelativeToPICBase(OpFlags)) { | |||
16696 | Result = DAG.getNode(ISD::ADD, dl, PtrVT, | |||
16697 | DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result); | |||
16698 | } | |||
16699 | ||||
16700 | return Result; | |||
16701 | } | |||
16702 | ||||
16703 | SDValue X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, | |||
16704 | const SDLoc &dl, int64_t Offset, | |||
16705 | SelectionDAG &DAG) const { | |||
16706 | // Create the TargetGlobalAddress node, folding in the constant | |||
16707 | // offset if it is legal. | |||
16708 | unsigned char OpFlags = Subtarget.classifyGlobalReference(GV); | |||
16709 | CodeModel::Model M = DAG.getTarget().getCodeModel(); | |||
16710 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
16711 | SDValue Result; | |||
16712 | if (OpFlags == X86II::MO_NO_FLAG && | |||
16713 | X86::isOffsetSuitableForCodeModel(Offset, M)) { | |||
16714 | // A direct static reference to a global. | |||
16715 | Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, Offset); | |||
16716 | Offset = 0; | |||
16717 | } else { | |||
16718 | Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, OpFlags); | |||
16719 | } | |||
16720 | ||||
16721 | Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result); | |||
16722 | ||||
16723 | // With PIC, the address is actually $g + Offset. | |||
16724 | if (isGlobalRelativeToPICBase(OpFlags)) { | |||
16725 | Result = DAG.getNode(ISD::ADD, dl, PtrVT, | |||
16726 | DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result); | |||
16727 | } | |||
16728 | ||||
16729 | // For globals that require a load from a stub to get the address, emit the | |||
16730 | // load. | |||
16731 | if (isGlobalStubReference(OpFlags)) | |||
16732 | Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result, | |||
16733 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
16734 | ||||
16735 | // If there was a non-zero offset that we didn't fold, create an explicit | |||
16736 | // addition for it. | |||
16737 | if (Offset != 0) | |||
16738 | Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result, | |||
16739 | DAG.getConstant(Offset, dl, PtrVT)); | |||
16740 | ||||
16741 | return Result; | |||
16742 | } | |||
16743 | ||||
16744 | SDValue | |||
16745 | X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const { | |||
16746 | const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); | |||
16747 | int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset(); | |||
16748 | return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG); | |||
16749 | } | |||
16750 | ||||
16751 | static SDValue | |||
16752 | GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA, | |||
16753 | SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg, | |||
16754 | unsigned char OperandFlags, bool LocalDynamic = false) { | |||
16755 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | |||
16756 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
16757 | SDLoc dl(GA); | |||
16758 | SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, | |||
16759 | GA->getValueType(0), | |||
16760 | GA->getOffset(), | |||
16761 | OperandFlags); | |||
16762 | ||||
16763 | X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR | |||
16764 | : X86ISD::TLSADDR; | |||
16765 | ||||
16766 | if (InFlag) { | |||
16767 | SDValue Ops[] = { Chain, TGA, *InFlag }; | |||
16768 | Chain = DAG.getNode(CallType, dl, NodeTys, Ops); | |||
16769 | } else { | |||
16770 | SDValue Ops[] = { Chain, TGA }; | |||
16771 | Chain = DAG.getNode(CallType, dl, NodeTys, Ops); | |||
16772 | } | |||
16773 | ||||
16774 | // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. | |||
16775 | MFI.setAdjustsStack(true); | |||
16776 | MFI.setHasCalls(true); | |||
16777 | ||||
16778 | SDValue Flag = Chain.getValue(1); | |||
16779 | return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag); | |||
16780 | } | |||
16781 | ||||
16782 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit | |||
16783 | static SDValue | |||
16784 | LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG, | |||
16785 | const EVT PtrVT) { | |||
16786 | SDValue InFlag; | |||
16787 | SDLoc dl(GA); // ? function entry point might be better | |||
16788 | SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, | |||
16789 | DAG.getNode(X86ISD::GlobalBaseReg, | |||
16790 | SDLoc(), PtrVT), InFlag); | |||
16791 | InFlag = Chain.getValue(1); | |||
16792 | ||||
16793 | return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD); | |||
16794 | } | |||
16795 | ||||
16796 | // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit | |||
16797 | static SDValue | |||
16798 | LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG, | |||
16799 | const EVT PtrVT) { | |||
16800 | return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, | |||
16801 | X86::RAX, X86II::MO_TLSGD); | |||
16802 | } | |||
16803 | ||||
16804 | static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA, | |||
16805 | SelectionDAG &DAG, | |||
16806 | const EVT PtrVT, | |||
16807 | bool is64Bit) { | |||
16808 | SDLoc dl(GA); | |||
16809 | ||||
16810 | // Get the start address of the TLS block for this module. | |||
16811 | X86MachineFunctionInfo *MFI = DAG.getMachineFunction() | |||
16812 | .getInfo<X86MachineFunctionInfo>(); | |||
16813 | MFI->incNumLocalDynamicTLSAccesses(); | |||
16814 | ||||
16815 | SDValue Base; | |||
16816 | if (is64Bit) { | |||
16817 | Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX, | |||
16818 | X86II::MO_TLSLD, /*LocalDynamic=*/true); | |||
16819 | } else { | |||
16820 | SDValue InFlag; | |||
16821 | SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX, | |||
16822 | DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag); | |||
16823 | InFlag = Chain.getValue(1); | |||
16824 | Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, | |||
16825 | X86II::MO_TLSLDM, /*LocalDynamic=*/true); | |||
16826 | } | |||
16827 | ||||
16828 | // Note: the CleanupLocalDynamicTLSPass will remove redundant computations | |||
16829 | // of Base. | |||
16830 | ||||
16831 | // Build x@dtpoff. | |||
16832 | unsigned char OperandFlags = X86II::MO_DTPOFF; | |||
16833 | unsigned WrapperKind = X86ISD::Wrapper; | |||
16834 | SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, | |||
16835 | GA->getValueType(0), | |||
16836 | GA->getOffset(), OperandFlags); | |||
16837 | SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); | |||
16838 | ||||
16839 | // Add x@dtpoff with the base. | |||
16840 | return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base); | |||
16841 | } | |||
16842 | ||||
16843 | // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model. | |||
16844 | static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG, | |||
16845 | const EVT PtrVT, TLSModel::Model model, | |||
16846 | bool is64Bit, bool isPIC) { | |||
16847 | SDLoc dl(GA); | |||
16848 | ||||
16849 | // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit). | |||
16850 | Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(), | |||
16851 | is64Bit ? 257 : 256)); | |||
16852 | ||||
16853 | SDValue ThreadPointer = | |||
16854 | DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl), | |||
16855 | MachinePointerInfo(Ptr)); | |||
16856 | ||||
16857 | unsigned char OperandFlags = 0; | |||
16858 | // Most TLS accesses are not RIP relative, even on x86-64. One exception is | |||
16859 | // initialexec. | |||
16860 | unsigned WrapperKind = X86ISD::Wrapper; | |||
16861 | if (model == TLSModel::LocalExec) { | |||
16862 | OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; | |||
16863 | } else if (model == TLSModel::InitialExec) { | |||
16864 | if (is64Bit) { | |||
16865 | OperandFlags = X86II::MO_GOTTPOFF; | |||
16866 | WrapperKind = X86ISD::WrapperRIP; | |||
16867 | } else { | |||
16868 | OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF; | |||
16869 | } | |||
16870 | } else { | |||
16871 | llvm_unreachable("Unexpected model")::llvm::llvm_unreachable_internal("Unexpected model", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16871); | |||
16872 | } | |||
16873 | ||||
16874 | // emit "addl x@ntpoff,%eax" (local exec) | |||
16875 | // or "addl x@indntpoff,%eax" (initial exec) | |||
16876 | // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic) | |||
16877 | SDValue TGA = | |||
16878 | DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0), | |||
16879 | GA->getOffset(), OperandFlags); | |||
16880 | SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA); | |||
16881 | ||||
16882 | if (model == TLSModel::InitialExec) { | |||
16883 | if (isPIC && !is64Bit) { | |||
16884 | Offset = DAG.getNode(ISD::ADD, dl, PtrVT, | |||
16885 | DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), | |||
16886 | Offset); | |||
16887 | } | |||
16888 | ||||
16889 | Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, | |||
16890 | MachinePointerInfo::getGOT(DAG.getMachineFunction())); | |||
16891 | } | |||
16892 | ||||
16893 | // The address of the thread local variable is the add of the thread | |||
16894 | // pointer with the offset of the variable. | |||
16895 | return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); | |||
16896 | } | |||
16897 | ||||
16898 | SDValue | |||
16899 | X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const { | |||
16900 | ||||
16901 | GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); | |||
16902 | ||||
16903 | if (DAG.getTarget().useEmulatedTLS()) | |||
16904 | return LowerToTLSEmulatedModel(GA, DAG); | |||
16905 | ||||
16906 | const GlobalValue *GV = GA->getGlobal(); | |||
16907 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
16908 | bool PositionIndependent = isPositionIndependent(); | |||
16909 | ||||
16910 | if (Subtarget.isTargetELF()) { | |||
16911 | TLSModel::Model model = DAG.getTarget().getTLSModel(GV); | |||
16912 | switch (model) { | |||
16913 | case TLSModel::GeneralDynamic: | |||
16914 | if (Subtarget.is64Bit()) | |||
16915 | return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT); | |||
16916 | return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT); | |||
16917 | case TLSModel::LocalDynamic: | |||
16918 | return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT, | |||
16919 | Subtarget.is64Bit()); | |||
16920 | case TLSModel::InitialExec: | |||
16921 | case TLSModel::LocalExec: | |||
16922 | return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(), | |||
16923 | PositionIndependent); | |||
16924 | } | |||
16925 | llvm_unreachable("Unknown TLS model.")::llvm::llvm_unreachable_internal("Unknown TLS model.", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 16925); | |||
16926 | } | |||
16927 | ||||
16928 | if (Subtarget.isTargetDarwin()) { | |||
16929 | // Darwin only has one model of TLS. Lower to that. | |||
16930 | unsigned char OpFlag = 0; | |||
16931 | unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ? | |||
16932 | X86ISD::WrapperRIP : X86ISD::Wrapper; | |||
16933 | ||||
16934 | // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the | |||
16935 | // global base reg. | |||
16936 | bool PIC32 = PositionIndependent && !Subtarget.is64Bit(); | |||
16937 | if (PIC32) | |||
16938 | OpFlag = X86II::MO_TLVP_PIC_BASE; | |||
16939 | else | |||
16940 | OpFlag = X86II::MO_TLVP; | |||
16941 | SDLoc DL(Op); | |||
16942 | SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL, | |||
16943 | GA->getValueType(0), | |||
16944 | GA->getOffset(), OpFlag); | |||
16945 | SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result); | |||
16946 | ||||
16947 | // With PIC32, the address is actually $g + Offset. | |||
16948 | if (PIC32) | |||
16949 | Offset = DAG.getNode(ISD::ADD, DL, PtrVT, | |||
16950 | DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), | |||
16951 | Offset); | |||
16952 | ||||
16953 | // Lowering the machine isd will make sure everything is in the right | |||
16954 | // location. | |||
16955 | SDValue Chain = DAG.getEntryNode(); | |||
16956 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
16957 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL); | |||
16958 | SDValue Args[] = { Chain, Offset }; | |||
16959 | Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args); | |||
16960 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true), | |||
16961 | DAG.getIntPtrConstant(0, DL, true), | |||
16962 | Chain.getValue(1), DL); | |||
16963 | ||||
16964 | // TLSCALL will be codegen'ed as call. Inform MFI that function has calls. | |||
16965 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | |||
16966 | MFI.setAdjustsStack(true); | |||
16967 | ||||
16968 | // And our return value (tls address) is in the standard call return value | |||
16969 | // location. | |||
16970 | unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; | |||
16971 | return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1)); | |||
16972 | } | |||
16973 | ||||
16974 | if (Subtarget.isTargetKnownWindowsMSVC() || | |||
16975 | Subtarget.isTargetWindowsItanium() || | |||
16976 | Subtarget.isTargetWindowsGNU()) { | |||
16977 | // Just use the implicit TLS architecture | |||
16978 | // Need to generate something similar to: | |||
16979 | // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage | |||
16980 | // ; from TEB | |||
16981 | // mov ecx, dword [rel _tls_index]: Load index (from C runtime) | |||
16982 | // mov rcx, qword [rdx+rcx*8] | |||
16983 | // mov eax, .tls$:tlsvar | |||
16984 | // [rax+rcx] contains the address | |||
16985 | // Windows 64bit: gs:0x58 | |||
16986 | // Windows 32bit: fs:__tls_array | |||
16987 | ||||
16988 | SDLoc dl(GA); | |||
16989 | SDValue Chain = DAG.getEntryNode(); | |||
16990 | ||||
16991 | // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or | |||
16992 | // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly | |||
16993 | // use its literal value of 0x2C. | |||
16994 | Value *Ptr = Constant::getNullValue(Subtarget.is64Bit() | |||
16995 | ? Type::getInt8PtrTy(*DAG.getContext(), | |||
16996 | 256) | |||
16997 | : Type::getInt32PtrTy(*DAG.getContext(), | |||
16998 | 257)); | |||
16999 | ||||
17000 | SDValue TlsArray = Subtarget.is64Bit() | |||
17001 | ? DAG.getIntPtrConstant(0x58, dl) | |||
17002 | : (Subtarget.isTargetWindowsGNU() | |||
17003 | ? DAG.getIntPtrConstant(0x2C, dl) | |||
17004 | : DAG.getExternalSymbol("_tls_array", PtrVT)); | |||
17005 | ||||
17006 | SDValue ThreadPointer = | |||
17007 | DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr)); | |||
17008 | ||||
17009 | SDValue res; | |||
17010 | if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) { | |||
17011 | res = ThreadPointer; | |||
17012 | } else { | |||
17013 | // Load the _tls_index variable | |||
17014 | SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT); | |||
17015 | if (Subtarget.is64Bit()) | |||
17016 | IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX, | |||
17017 | MachinePointerInfo(), MVT::i32); | |||
17018 | else | |||
17019 | IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo()); | |||
17020 | ||||
17021 | auto &DL = DAG.getDataLayout(); | |||
17022 | SDValue Scale = | |||
17023 | DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8); | |||
17024 | IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale); | |||
17025 | ||||
17026 | res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX); | |||
17027 | } | |||
17028 | ||||
17029 | res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo()); | |||
17030 | ||||
17031 | // Get the offset of start of .tls section | |||
17032 | SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, | |||
17033 | GA->getValueType(0), | |||
17034 | GA->getOffset(), X86II::MO_SECREL); | |||
17035 | SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA); | |||
17036 | ||||
17037 | // The address of the thread local variable is the add of the thread | |||
17038 | // pointer with the offset of the variable. | |||
17039 | return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset); | |||
17040 | } | |||
17041 | ||||
17042 | llvm_unreachable("TLS not implemented for this target.")::llvm::llvm_unreachable_internal("TLS not implemented for this target." , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17042); | |||
17043 | } | |||
17044 | ||||
17045 | /// Lower SRA_PARTS and friends, which return two i32 values | |||
17046 | /// and take a 2 x i32 value to shift plus a shift amount. | |||
17047 | /// TODO: Can this be moved to general expansion code? | |||
17048 | static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) { | |||
17049 | assert(Op.getNumOperands() == 3 && "Not a double-shift!")((Op.getNumOperands() == 3 && "Not a double-shift!") ? static_cast<void> (0) : __assert_fail ("Op.getNumOperands() == 3 && \"Not a double-shift!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17049, __PRETTY_FUNCTION__)); | |||
17050 | MVT VT = Op.getSimpleValueType(); | |||
17051 | unsigned VTBits = VT.getSizeInBits(); | |||
17052 | SDLoc dl(Op); | |||
17053 | bool isSRA = Op.getOpcode() == ISD::SRA_PARTS; | |||
17054 | SDValue ShOpLo = Op.getOperand(0); | |||
17055 | SDValue ShOpHi = Op.getOperand(1); | |||
17056 | SDValue ShAmt = Op.getOperand(2); | |||
17057 | // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and | |||
17058 | // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away | |||
17059 | // during isel. | |||
17060 | SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, | |||
17061 | DAG.getConstant(VTBits - 1, dl, MVT::i8)); | |||
17062 | SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi, | |||
17063 | DAG.getConstant(VTBits - 1, dl, MVT::i8)) | |||
17064 | : DAG.getConstant(0, dl, VT); | |||
17065 | ||||
17066 | SDValue Tmp2, Tmp3; | |||
17067 | if (Op.getOpcode() == ISD::SHL_PARTS) { | |||
17068 | Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt); | |||
17069 | Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt); | |||
17070 | } else { | |||
17071 | Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt); | |||
17072 | Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt); | |||
17073 | } | |||
17074 | ||||
17075 | // If the shift amount is larger or equal than the width of a part we can't | |||
17076 | // rely on the results of shld/shrd. Insert a test and select the appropriate | |||
17077 | // values for large shift amounts. | |||
17078 | SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt, | |||
17079 | DAG.getConstant(VTBits, dl, MVT::i8)); | |||
17080 | SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode, | |||
17081 | DAG.getConstant(0, dl, MVT::i8), ISD::SETNE); | |||
17082 | ||||
17083 | SDValue Hi, Lo; | |||
17084 | if (Op.getOpcode() == ISD::SHL_PARTS) { | |||
17085 | Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); | |||
17086 | Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); | |||
17087 | } else { | |||
17088 | Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2); | |||
17089 | Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3); | |||
17090 | } | |||
17091 | ||||
17092 | return DAG.getMergeValues({ Lo, Hi }, dl); | |||
17093 | } | |||
17094 | ||||
17095 | static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget, | |||
17096 | SelectionDAG &DAG) { | |||
17097 | MVT VT = Op.getSimpleValueType(); | |||
17098 | assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&(((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR ) && "Unexpected funnel shift opcode!") ? static_cast <void> (0) : __assert_fail ("(Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) && \"Unexpected funnel shift opcode!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17099, __PRETTY_FUNCTION__)) | |||
17099 | "Unexpected funnel shift opcode!")(((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR ) && "Unexpected funnel shift opcode!") ? static_cast <void> (0) : __assert_fail ("(Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) && \"Unexpected funnel shift opcode!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17099, __PRETTY_FUNCTION__)); | |||
17100 | assert((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&(((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) && "Unexpected funnel shift type!") ? static_cast<void> ( 0) : __assert_fail ("(VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) && \"Unexpected funnel shift type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17101, __PRETTY_FUNCTION__)) | |||
17101 | "Unexpected funnel shift type!")(((VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) && "Unexpected funnel shift type!") ? static_cast<void> ( 0) : __assert_fail ("(VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) && \"Unexpected funnel shift type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17101, __PRETTY_FUNCTION__)); | |||
17102 | ||||
17103 | SDLoc DL(Op); | |||
17104 | SDValue Op0 = Op.getOperand(0); | |||
17105 | SDValue Op1 = Op.getOperand(1); | |||
17106 | SDValue Amt = Op.getOperand(2); | |||
17107 | ||||
17108 | // Expand slow SHLD/SHRD cases if we are not optimizing for size. | |||
17109 | bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); | |||
17110 | if (!OptForSize && Subtarget.isSHLDSlow()) | |||
17111 | return SDValue(); | |||
17112 | ||||
17113 | bool IsFSHR = Op.getOpcode() == ISD::FSHR; | |||
17114 | if (IsFSHR) | |||
17115 | std::swap(Op0, Op1); | |||
17116 | ||||
17117 | // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo. | |||
17118 | if (VT == MVT::i16) | |||
17119 | Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, | |||
17120 | DAG.getConstant(15, DL, Amt.getValueType())); | |||
17121 | ||||
17122 | unsigned SHDOp = (IsFSHR ? X86ISD::SHRD : X86ISD::SHLD); | |||
17123 | return DAG.getNode(SHDOp, DL, VT, Op0, Op1, Amt); | |||
17124 | } | |||
17125 | ||||
17126 | // Try to use a packed vector operation to handle i64 on 32-bit targets when | |||
17127 | // AVX512DQ is enabled. | |||
17128 | static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG, | |||
17129 | const X86Subtarget &Subtarget) { | |||
17130 | assert((Op.getOpcode() == ISD::SINT_TO_FP ||(((Op.getOpcode() == ISD::SINT_TO_FP || Op.getOpcode() == ISD ::UINT_TO_FP) && "Unexpected opcode!") ? static_cast< void> (0) : __assert_fail ("(Op.getOpcode() == ISD::SINT_TO_FP || Op.getOpcode() == ISD::UINT_TO_FP) && \"Unexpected opcode!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17131, __PRETTY_FUNCTION__)) | |||
17131 | Op.getOpcode() == ISD::UINT_TO_FP) && "Unexpected opcode!")(((Op.getOpcode() == ISD::SINT_TO_FP || Op.getOpcode() == ISD ::UINT_TO_FP) && "Unexpected opcode!") ? static_cast< void> (0) : __assert_fail ("(Op.getOpcode() == ISD::SINT_TO_FP || Op.getOpcode() == ISD::UINT_TO_FP) && \"Unexpected opcode!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17131, __PRETTY_FUNCTION__)); | |||
17132 | SDValue Src = Op.getOperand(0); | |||
17133 | MVT SrcVT = Src.getSimpleValueType(); | |||
17134 | MVT VT = Op.getSimpleValueType(); | |||
17135 | ||||
17136 | if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() || | |||
17137 | (VT != MVT::f32 && VT != MVT::f64)) | |||
17138 | return SDValue(); | |||
17139 | ||||
17140 | // Pack the i64 into a vector, do the operation and extract. | |||
17141 | ||||
17142 | // Using 256-bit to ensure result is 128-bits for f32 case. | |||
17143 | unsigned NumElts = Subtarget.hasVLX() ? 4 : 8; | |||
17144 | MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts); | |||
17145 | MVT VecVT = MVT::getVectorVT(VT, NumElts); | |||
17146 | ||||
17147 | SDLoc dl(Op); | |||
17148 | SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src); | |||
17149 | SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec); | |||
17150 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec, | |||
17151 | DAG.getIntPtrConstant(0, dl)); | |||
17152 | } | |||
17153 | ||||
17154 | SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op, | |||
17155 | SelectionDAG &DAG) const { | |||
17156 | SDValue Src = Op.getOperand(0); | |||
17157 | MVT SrcVT = Src.getSimpleValueType(); | |||
17158 | MVT VT = Op.getSimpleValueType(); | |||
17159 | SDLoc dl(Op); | |||
17160 | ||||
17161 | if (SrcVT.isVector()) { | |||
17162 | if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) { | |||
17163 | return DAG.getNode(X86ISD::CVTSI2P, dl, VT, | |||
17164 | DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src, | |||
17165 | DAG.getUNDEF(SrcVT))); | |||
17166 | } | |||
17167 | return SDValue(); | |||
17168 | } | |||
17169 | ||||
17170 | assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&((SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && "Unknown SINT_TO_FP to lower!") ? static_cast<void> (0 ) : __assert_fail ("SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && \"Unknown SINT_TO_FP to lower!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17171, __PRETTY_FUNCTION__)) | |||
17171 | "Unknown SINT_TO_FP to lower!")((SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && "Unknown SINT_TO_FP to lower!") ? static_cast<void> (0 ) : __assert_fail ("SrcVT <= MVT::i64 && SrcVT >= MVT::i16 && \"Unknown SINT_TO_FP to lower!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17171, __PRETTY_FUNCTION__)); | |||
17172 | ||||
17173 | // These are really Legal; return the operand so the caller accepts it as | |||
17174 | // Legal. | |||
17175 | if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(VT)) | |||
17176 | return Op; | |||
17177 | if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && Subtarget.is64Bit()) | |||
17178 | return Op; | |||
17179 | ||||
17180 | if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget)) | |||
17181 | return V; | |||
17182 | ||||
17183 | SDValue ValueToStore = Op.getOperand(0); | |||
17184 | if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(VT) && | |||
17185 | !Subtarget.is64Bit()) | |||
17186 | // Bitcasting to f64 here allows us to do a single 64-bit store from | |||
17187 | // an SSE register, avoiding the store forwarding penalty that would come | |||
17188 | // with two 32-bit stores. | |||
17189 | ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore); | |||
17190 | ||||
17191 | unsigned Size = SrcVT.getSizeInBits()/8; | |||
17192 | MachineFunction &MF = DAG.getMachineFunction(); | |||
17193 | auto PtrVT = getPointerTy(MF.getDataLayout()); | |||
17194 | int SSFI = MF.getFrameInfo().CreateStackObject(Size, Size, false); | |||
17195 | SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); | |||
17196 | SDValue Chain = DAG.getStore( | |||
17197 | DAG.getEntryNode(), dl, ValueToStore, StackSlot, | |||
17198 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI)); | |||
17199 | return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG); | |||
17200 | } | |||
17201 | ||||
17202 | SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, | |||
17203 | SDValue StackSlot, | |||
17204 | SelectionDAG &DAG) const { | |||
17205 | // Build the FILD | |||
17206 | SDLoc DL(Op); | |||
17207 | SDVTList Tys; | |||
17208 | bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType()); | |||
17209 | if (useSSE) | |||
17210 | Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue); | |||
17211 | else | |||
17212 | Tys = DAG.getVTList(Op.getValueType(), MVT::Other); | |||
17213 | ||||
17214 | unsigned ByteSize = SrcVT.getSizeInBits()/8; | |||
17215 | ||||
17216 | FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot); | |||
17217 | MachineMemOperand *MMO; | |||
17218 | if (FI) { | |||
17219 | int SSFI = FI->getIndex(); | |||
17220 | MMO = DAG.getMachineFunction().getMachineMemOperand( | |||
17221 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI), | |||
17222 | MachineMemOperand::MOLoad, ByteSize, ByteSize); | |||
17223 | } else { | |||
17224 | MMO = cast<LoadSDNode>(StackSlot)->getMemOperand(); | |||
17225 | StackSlot = StackSlot.getOperand(1); | |||
17226 | } | |||
17227 | SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) }; | |||
17228 | SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG : | |||
17229 | X86ISD::FILD, DL, | |||
17230 | Tys, Ops, SrcVT, MMO); | |||
17231 | ||||
17232 | if (useSSE) { | |||
17233 | Chain = Result.getValue(1); | |||
17234 | SDValue InFlag = Result.getValue(2); | |||
17235 | ||||
17236 | // FIXME: Currently the FST is glued to the FILD_FLAG. This | |||
17237 | // shouldn't be necessary except that RFP cannot be live across | |||
17238 | // multiple blocks. When stackifier is fixed, they can be uncoupled. | |||
17239 | MachineFunction &MF = DAG.getMachineFunction(); | |||
17240 | unsigned SSFISize = Op.getValueSizeInBits()/8; | |||
17241 | int SSFI = MF.getFrameInfo().CreateStackObject(SSFISize, SSFISize, false); | |||
17242 | auto PtrVT = getPointerTy(MF.getDataLayout()); | |||
17243 | SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); | |||
17244 | Tys = DAG.getVTList(MVT::Other); | |||
17245 | SDValue Ops[] = { | |||
17246 | Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag | |||
17247 | }; | |||
17248 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( | |||
17249 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI), | |||
17250 | MachineMemOperand::MOStore, SSFISize, SSFISize); | |||
17251 | ||||
17252 | Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, | |||
17253 | Ops, Op.getValueType(), MMO); | |||
17254 | Result = DAG.getLoad( | |||
17255 | Op.getValueType(), DL, Chain, StackSlot, | |||
17256 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI)); | |||
17257 | } | |||
17258 | ||||
17259 | return Result; | |||
17260 | } | |||
17261 | ||||
17262 | /// 64-bit unsigned integer to double expansion. | |||
17263 | static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG, | |||
17264 | const X86Subtarget &Subtarget) { | |||
17265 | // This algorithm is not obvious. Here it is what we're trying to output: | |||
17266 | /* | |||
17267 | movq %rax, %xmm0 | |||
17268 | punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U } | |||
17269 | subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 } | |||
17270 | #ifdef __SSE3__ | |||
17271 | haddpd %xmm0, %xmm0 | |||
17272 | #else | |||
17273 | pshufd $0x4e, %xmm0, %xmm1 | |||
17274 | addpd %xmm1, %xmm0 | |||
17275 | #endif | |||
17276 | */ | |||
17277 | ||||
17278 | SDLoc dl(Op); | |||
17279 | LLVMContext *Context = DAG.getContext(); | |||
17280 | ||||
17281 | // Build some magic constants. | |||
17282 | static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 }; | |||
17283 | Constant *C0 = ConstantDataVector::get(*Context, CV0); | |||
17284 | auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); | |||
17285 | SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, 16); | |||
17286 | ||||
17287 | SmallVector<Constant*,2> CV1; | |||
17288 | CV1.push_back( | |||
17289 | ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(), | |||
17290 | APInt(64, 0x4330000000000000ULL)))); | |||
17291 | CV1.push_back( | |||
17292 | ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(), | |||
17293 | APInt(64, 0x4530000000000000ULL)))); | |||
17294 | Constant *C1 = ConstantVector::get(CV1); | |||
17295 | SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, 16); | |||
17296 | ||||
17297 | // Load the 64-bit value into an XMM register. | |||
17298 | SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, | |||
17299 | Op.getOperand(0)); | |||
17300 | SDValue CLod0 = | |||
17301 | DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0, | |||
17302 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), | |||
17303 | /* Alignment = */ 16); | |||
17304 | SDValue Unpck1 = | |||
17305 | getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0); | |||
17306 | ||||
17307 | SDValue CLod1 = | |||
17308 | DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, | |||
17309 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), | |||
17310 | /* Alignment = */ 16); | |||
17311 | SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1); | |||
17312 | // TODO: Are there any fast-math-flags to propagate here? | |||
17313 | SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1); | |||
17314 | SDValue Result; | |||
17315 | ||||
17316 | if (Subtarget.hasSSE3()) { | |||
17317 | // FIXME: The 'haddpd' instruction may be slower than 'shuffle + addsd'. | |||
17318 | Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub); | |||
17319 | } else { | |||
17320 | SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1}); | |||
17321 | Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub); | |||
17322 | } | |||
17323 | ||||
17324 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result, | |||
17325 | DAG.getIntPtrConstant(0, dl)); | |||
17326 | } | |||
17327 | ||||
17328 | /// 32-bit unsigned integer to float expansion. | |||
17329 | static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG, | |||
17330 | const X86Subtarget &Subtarget) { | |||
17331 | SDLoc dl(Op); | |||
17332 | // FP constant to bias correct the final result. | |||
17333 | SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, | |||
17334 | MVT::f64); | |||
17335 | ||||
17336 | // Load the 32-bit value into an XMM register. | |||
17337 | SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, | |||
17338 | Op.getOperand(0)); | |||
17339 | ||||
17340 | // Zero out the upper parts of the register. | |||
17341 | Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG); | |||
17342 | ||||
17343 | Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, | |||
17344 | DAG.getBitcast(MVT::v2f64, Load), | |||
17345 | DAG.getIntPtrConstant(0, dl)); | |||
17346 | ||||
17347 | // Or the load with the bias. | |||
17348 | SDValue Or = DAG.getNode( | |||
17349 | ISD::OR, dl, MVT::v2i64, | |||
17350 | DAG.getBitcast(MVT::v2i64, | |||
17351 | DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Load)), | |||
17352 | DAG.getBitcast(MVT::v2i64, | |||
17353 | DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias))); | |||
17354 | Or = | |||
17355 | DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, | |||
17356 | DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl)); | |||
17357 | ||||
17358 | // Subtract the bias. | |||
17359 | // TODO: Are there any fast-math-flags to propagate here? | |||
17360 | SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias); | |||
17361 | ||||
17362 | // Handle final rounding. | |||
17363 | return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType()); | |||
17364 | } | |||
17365 | ||||
17366 | static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG, | |||
17367 | const X86Subtarget &Subtarget, | |||
17368 | const SDLoc &DL) { | |||
17369 | if (Op.getSimpleValueType() != MVT::v2f64) | |||
17370 | return SDValue(); | |||
17371 | ||||
17372 | SDValue N0 = Op.getOperand(0); | |||
17373 | assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type")((N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type" ) ? static_cast<void> (0) : __assert_fail ("N0.getSimpleValueType() == MVT::v2i32 && \"Unexpected input type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17373, __PRETTY_FUNCTION__)); | |||
17374 | ||||
17375 | // Legalize to v4i32 type. | |||
17376 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0, | |||
17377 | DAG.getUNDEF(MVT::v2i32)); | |||
17378 | ||||
17379 | if (Subtarget.hasAVX512()) | |||
17380 | return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0); | |||
17381 | ||||
17382 | // Same implementation as VectorLegalizer::ExpandUINT_TO_FLOAT, | |||
17383 | // but using v2i32 to v2f64 with X86ISD::CVTSI2P. | |||
17384 | SDValue HalfWord = DAG.getConstant(16, DL, MVT::v4i32); | |||
17385 | SDValue HalfWordMask = DAG.getConstant(0x0000FFFF, DL, MVT::v4i32); | |||
17386 | ||||
17387 | // Two to the power of half-word-size. | |||
17388 | SDValue TWOHW = DAG.getConstantFP(1 << 16, DL, MVT::v2f64); | |||
17389 | ||||
17390 | // Clear upper part of LO, lower HI. | |||
17391 | SDValue HI = DAG.getNode(ISD::SRL, DL, MVT::v4i32, N0, HalfWord); | |||
17392 | SDValue LO = DAG.getNode(ISD::AND, DL, MVT::v4i32, N0, HalfWordMask); | |||
17393 | ||||
17394 | SDValue fHI = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, HI); | |||
17395 | fHI = DAG.getNode(ISD::FMUL, DL, MVT::v2f64, fHI, TWOHW); | |||
17396 | SDValue fLO = DAG.getNode(X86ISD::CVTSI2P, DL, MVT::v2f64, LO); | |||
17397 | ||||
17398 | // Add the two halves. | |||
17399 | return DAG.getNode(ISD::FADD, DL, MVT::v2f64, fHI, fLO); | |||
17400 | } | |||
17401 | ||||
17402 | static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG, | |||
17403 | const X86Subtarget &Subtarget) { | |||
17404 | // The algorithm is the following: | |||
17405 | // #ifdef __SSE4_1__ | |||
17406 | // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa); | |||
17407 | // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16), | |||
17408 | // (uint4) 0x53000000, 0xaa); | |||
17409 | // #else | |||
17410 | // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000; | |||
17411 | // uint4 hi = (v >> 16) | (uint4) 0x53000000; | |||
17412 | // #endif | |||
17413 | // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f); | |||
17414 | // return (float4) lo + fhi; | |||
17415 | ||||
17416 | // We shouldn't use it when unsafe-fp-math is enabled though: we might later | |||
17417 | // reassociate the two FADDs, and if we do that, the algorithm fails | |||
17418 | // spectacularly (PR24512). | |||
17419 | // FIXME: If we ever have some kind of Machine FMF, this should be marked | |||
17420 | // as non-fast and always be enabled. Why isn't SDAG FMF enough? Because | |||
17421 | // there's also the MachineCombiner reassociations happening on Machine IR. | |||
17422 | if (DAG.getTarget().Options.UnsafeFPMath) | |||
17423 | return SDValue(); | |||
17424 | ||||
17425 | SDLoc DL(Op); | |||
17426 | SDValue V = Op->getOperand(0); | |||
17427 | MVT VecIntVT = V.getSimpleValueType(); | |||
17428 | bool Is128 = VecIntVT == MVT::v4i32; | |||
17429 | MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32; | |||
17430 | // If we convert to something else than the supported type, e.g., to v4f64, | |||
17431 | // abort early. | |||
17432 | if (VecFloatVT != Op->getSimpleValueType(0)) | |||
17433 | return SDValue(); | |||
17434 | ||||
17435 | assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&(((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) && "Unsupported custom type") ? static_cast<void> (0) : __assert_fail ("(VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) && \"Unsupported custom type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17436, __PRETTY_FUNCTION__)) | |||
17436 | "Unsupported custom type")(((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) && "Unsupported custom type") ? static_cast<void> (0) : __assert_fail ("(VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) && \"Unsupported custom type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17436, __PRETTY_FUNCTION__)); | |||
17437 | ||||
17438 | // In the #idef/#else code, we have in common: | |||
17439 | // - The vector of constants: | |||
17440 | // -- 0x4b000000 | |||
17441 | // -- 0x53000000 | |||
17442 | // - A shift: | |||
17443 | // -- v >> 16 | |||
17444 | ||||
17445 | // Create the splat vector for 0x4b000000. | |||
17446 | SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT); | |||
17447 | // Create the splat vector for 0x53000000. | |||
17448 | SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT); | |||
17449 | ||||
17450 | // Create the right shift. | |||
17451 | SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT); | |||
17452 | SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift); | |||
17453 | ||||
17454 | SDValue Low, High; | |||
17455 | if (Subtarget.hasSSE41()) { | |||
17456 | MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16; | |||
17457 | // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa); | |||
17458 | SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow); | |||
17459 | SDValue VecBitcast = DAG.getBitcast(VecI16VT, V); | |||
17460 | // Low will be bitcasted right away, so do not bother bitcasting back to its | |||
17461 | // original type. | |||
17462 | Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast, | |||
17463 | VecCstLowBitcast, DAG.getConstant(0xaa, DL, MVT::i32)); | |||
17464 | // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16), | |||
17465 | // (uint4) 0x53000000, 0xaa); | |||
17466 | SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh); | |||
17467 | SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift); | |||
17468 | // High will be bitcasted right away, so do not bother bitcasting back to | |||
17469 | // its original type. | |||
17470 | High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast, | |||
17471 | VecCstHighBitcast, DAG.getConstant(0xaa, DL, MVT::i32)); | |||
17472 | } else { | |||
17473 | SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT); | |||
17474 | // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000; | |||
17475 | SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask); | |||
17476 | Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow); | |||
17477 | ||||
17478 | // uint4 hi = (v >> 16) | (uint4) 0x53000000; | |||
17479 | High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh); | |||
17480 | } | |||
17481 | ||||
17482 | // Create the vector constant for -(0x1.0p39f + 0x1.0p23f). | |||
17483 | SDValue VecCstFAdd = DAG.getConstantFP( | |||
17484 | APFloat(APFloat::IEEEsingle(), APInt(32, 0xD3000080)), DL, VecFloatVT); | |||
17485 | ||||
17486 | // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f); | |||
17487 | SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High); | |||
17488 | // TODO: Are there any fast-math-flags to propagate here? | |||
17489 | SDValue FHigh = | |||
17490 | DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd); | |||
17491 | // return (float4) lo + fhi; | |||
17492 | SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low); | |||
17493 | return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh); | |||
17494 | } | |||
17495 | ||||
17496 | static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG, | |||
17497 | const X86Subtarget &Subtarget) { | |||
17498 | SDValue N0 = Op.getOperand(0); | |||
17499 | MVT SrcVT = N0.getSimpleValueType(); | |||
17500 | SDLoc dl(Op); | |||
17501 | ||||
17502 | switch (SrcVT.SimpleTy) { | |||
17503 | default: | |||
17504 | llvm_unreachable("Custom UINT_TO_FP is not supported!")::llvm::llvm_unreachable_internal("Custom UINT_TO_FP is not supported!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17504); | |||
17505 | case MVT::v2i32: | |||
17506 | return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl); | |||
17507 | case MVT::v4i32: | |||
17508 | case MVT::v8i32: | |||
17509 | assert(!Subtarget.hasAVX512())((!Subtarget.hasAVX512()) ? static_cast<void> (0) : __assert_fail ("!Subtarget.hasAVX512()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17509, __PRETTY_FUNCTION__)); | |||
17510 | return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget); | |||
17511 | } | |||
17512 | } | |||
17513 | ||||
17514 | SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op, | |||
17515 | SelectionDAG &DAG) const { | |||
17516 | SDValue N0 = Op.getOperand(0); | |||
17517 | SDLoc dl(Op); | |||
17518 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
17519 | ||||
17520 | if (Op.getSimpleValueType().isVector()) | |||
17521 | return lowerUINT_TO_FP_vec(Op, DAG, Subtarget); | |||
17522 | ||||
17523 | MVT SrcVT = N0.getSimpleValueType(); | |||
17524 | MVT DstVT = Op.getSimpleValueType(); | |||
17525 | ||||
17526 | if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) && | |||
17527 | (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) { | |||
17528 | // Conversions from unsigned i32 to f32/f64 are legal, | |||
17529 | // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode. | |||
17530 | return Op; | |||
17531 | } | |||
17532 | ||||
17533 | if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget)) | |||
17534 | return V; | |||
17535 | ||||
17536 | if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64) | |||
17537 | return LowerUINT_TO_FP_i64(Op, DAG, Subtarget); | |||
17538 | if (SrcVT == MVT::i32 && X86ScalarSSEf64) | |||
17539 | return LowerUINT_TO_FP_i32(Op, DAG, Subtarget); | |||
17540 | if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32) | |||
17541 | return SDValue(); | |||
17542 | ||||
17543 | // Make a 64-bit buffer, and use it to build an FILD. | |||
17544 | SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64); | |||
17545 | if (SrcVT == MVT::i32) { | |||
17546 | SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl); | |||
17547 | SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), | |||
17548 | StackSlot, MachinePointerInfo()); | |||
17549 | SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32), | |||
17550 | OffsetSlot, MachinePointerInfo()); | |||
17551 | SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG); | |||
17552 | return Fild; | |||
17553 | } | |||
17554 | ||||
17555 | assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP")((SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP" ) ? static_cast<void> (0) : __assert_fail ("SrcVT == MVT::i64 && \"Unexpected type in UINT_TO_FP\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17555, __PRETTY_FUNCTION__)); | |||
17556 | SDValue ValueToStore = Op.getOperand(0); | |||
17557 | if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) | |||
17558 | // Bitcasting to f64 here allows us to do a single 64-bit store from | |||
17559 | // an SSE register, avoiding the store forwarding penalty that would come | |||
17560 | // with two 32-bit stores. | |||
17561 | ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore); | |||
17562 | SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, ValueToStore, StackSlot, | |||
17563 | MachinePointerInfo()); | |||
17564 | // For i64 source, we need to add the appropriate power of 2 if the input | |||
17565 | // was negative. This is the same as the optimization in | |||
17566 | // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here, | |||
17567 | // we must be careful to do the computation in x87 extended precision, not | |||
17568 | // in SSE. (The generic code can't know it's OK to do this, or how to.) | |||
17569 | int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex(); | |||
17570 | MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand( | |||
17571 | MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI), | |||
17572 | MachineMemOperand::MOLoad, 8, 8); | |||
17573 | ||||
17574 | SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other); | |||
17575 | SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) }; | |||
17576 | SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, | |||
17577 | MVT::i64, MMO); | |||
17578 | ||||
17579 | APInt FF(32, 0x5F800000ULL); | |||
17580 | ||||
17581 | // Check whether the sign bit is set. | |||
17582 | SDValue SignSet = DAG.getSetCC( | |||
17583 | dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64), | |||
17584 | Op.getOperand(0), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT); | |||
17585 | ||||
17586 | // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits. | |||
17587 | SDValue FudgePtr = DAG.getConstantPool( | |||
17588 | ConstantInt::get(*DAG.getContext(), FF.zext(64)), PtrVT); | |||
17589 | ||||
17590 | // Get a pointer to FF if the sign bit was set, or to 0 otherwise. | |||
17591 | SDValue Zero = DAG.getIntPtrConstant(0, dl); | |||
17592 | SDValue Four = DAG.getIntPtrConstant(4, dl); | |||
17593 | SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Zero, Four); | |||
17594 | FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset); | |||
17595 | ||||
17596 | // Load the value out, extending it from f32 to f80. | |||
17597 | // FIXME: Avoid the extend by constructing the right constant pool? | |||
17598 | SDValue Fudge = DAG.getExtLoad( | |||
17599 | ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(), FudgePtr, | |||
17600 | MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32, | |||
17601 | /* Alignment = */ 4); | |||
17602 | // Extend everything to 80 bits to force it to be done on x87. | |||
17603 | // TODO: Are there any fast-math-flags to propagate here? | |||
17604 | SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge); | |||
17605 | return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, | |||
17606 | DAG.getIntPtrConstant(0, dl)); | |||
17607 | } | |||
17608 | ||||
17609 | // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation | |||
17610 | // is legal, or has an fp128 or f16 source (which needs to be promoted to f32), | |||
17611 | // just return an <SDValue(), SDValue()> pair. | |||
17612 | // Otherwise it is assumed to be a conversion from one of f32, f64 or f80 | |||
17613 | // to i16, i32 or i64, and we lower it to a legal sequence. | |||
17614 | // If lowered to the final integer result we return a <result, SDValue()> pair. | |||
17615 | // Otherwise we lower it to a sequence ending with a FIST, return a | |||
17616 | // <FIST, StackSlot> pair, and the caller is responsible for loading | |||
17617 | // the final integer result from StackSlot. | |||
17618 | std::pair<SDValue,SDValue> | |||
17619 | X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG, | |||
17620 | bool IsSigned, bool IsReplace) const { | |||
17621 | SDLoc DL(Op); | |||
17622 | ||||
17623 | EVT DstTy = Op.getValueType(); | |||
17624 | EVT TheVT = Op.getOperand(0).getValueType(); | |||
17625 | auto PtrVT = getPointerTy(DAG.getDataLayout()); | |||
17626 | ||||
17627 | if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) { | |||
17628 | // f16 must be promoted before using the lowering in this routine. | |||
17629 | // fp128 does not use this lowering. | |||
17630 | return std::make_pair(SDValue(), SDValue()); | |||
17631 | } | |||
17632 | ||||
17633 | // If using FIST to compute an unsigned i64, we'll need some fixup | |||
17634 | // to handle values above the maximum signed i64. A FIST is always | |||
17635 | // used for the 32-bit subtarget, but also for f80 on a 64-bit target. | |||
17636 | bool UnsignedFixup = !IsSigned && | |||
17637 | DstTy == MVT::i64 && | |||
17638 | (!Subtarget.is64Bit() || | |||
17639 | !isScalarFPTypeInSSEReg(TheVT)); | |||
17640 | ||||
17641 | if (!IsSigned && DstTy != MVT::i64 && !Subtarget.hasAVX512()) { | |||
17642 | // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST. | |||
17643 | // The low 32 bits of the fist result will have the correct uint32 result. | |||
17644 | assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT")((DstTy == MVT::i32 && "Unexpected FP_TO_UINT") ? static_cast <void> (0) : __assert_fail ("DstTy == MVT::i32 && \"Unexpected FP_TO_UINT\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17644, __PRETTY_FUNCTION__)); | |||
17645 | DstTy = MVT::i64; | |||
17646 | } | |||
17647 | ||||
17648 | assert(DstTy.getSimpleVT() <= MVT::i64 &&((DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT () >= MVT::i16 && "Unknown FP_TO_INT to lower!") ? static_cast<void> (0) : __assert_fail ("DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT() >= MVT::i16 && \"Unknown FP_TO_INT to lower!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17650, __PRETTY_FUNCTION__)) | |||
17649 | DstTy.getSimpleVT() >= MVT::i16 &&((DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT () >= MVT::i16 && "Unknown FP_TO_INT to lower!") ? static_cast<void> (0) : __assert_fail ("DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT() >= MVT::i16 && \"Unknown FP_TO_INT to lower!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17650, __PRETTY_FUNCTION__)) | |||
17650 | "Unknown FP_TO_INT to lower!")((DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT () >= MVT::i16 && "Unknown FP_TO_INT to lower!") ? static_cast<void> (0) : __assert_fail ("DstTy.getSimpleVT() <= MVT::i64 && DstTy.getSimpleVT() >= MVT::i16 && \"Unknown FP_TO_INT to lower!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17650, __PRETTY_FUNCTION__)); | |||
17651 | ||||
17652 | // These are really Legal. | |||
17653 | if (DstTy == MVT::i32 && | |||
17654 | isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) | |||
17655 | return std::make_pair(SDValue(), SDValue()); | |||
17656 | if (Subtarget.is64Bit() && | |||
17657 | DstTy == MVT::i64 && | |||
17658 | isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType())) | |||
17659 | return std::make_pair(SDValue(), SDValue()); | |||
17660 | ||||
17661 | // We lower FP->int64 into FISTP64 followed by a load from a temporary | |||
17662 | // stack slot. | |||
17663 | MachineFunction &MF = DAG.getMachineFunction(); | |||
17664 | unsigned MemSize = DstTy.getSizeInBits()/8; | |||
17665 | int SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false); | |||
17666 | SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT); | |||
17667 | ||||
17668 | unsigned Opc; | |||
17669 | switch (DstTy.getSimpleVT().SimpleTy) { | |||
17670 | default: llvm_unreachable("Invalid FP_TO_SINT to lower!")::llvm::llvm_unreachable_internal("Invalid FP_TO_SINT to lower!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17670); | |||
17671 | case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break; | |||
17672 | case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break; | |||
17673 | case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break; | |||
17674 | } | |||
17675 | ||||
17676 | SDValue Chain = DAG.getEntryNode(); | |||
17677 | SDValue Value = Op.getOperand(0); | |||
17678 | SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment. | |||
17679 | ||||
17680 | if (UnsignedFixup) { | |||
17681 | // | |||
17682 | // Conversion to unsigned i64 is implemented with a select, | |||
17683 | // depending on whether the source value fits in the range | |||
17684 | // of a signed i64. Let Thresh be the FP equivalent of | |||
17685 | // 0x8000000000000000ULL. | |||
17686 | // | |||
17687 | // Adjust i32 = (Value < Thresh) ? 0 : 0x80000000; | |||
17688 | // FistSrc = (Value < Thresh) ? Value : (Value - Thresh); | |||
17689 | // Fist-to-mem64 FistSrc | |||
17690 | // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent | |||
17691 | // to XOR'ing the high 32 bits with Adjust. | |||
17692 | // | |||
17693 | // Being a power of 2, Thresh is exactly representable in all FP formats. | |||
17694 | // For X87 we'd like to use the smallest FP type for this constant, but | |||
17695 | // for DAG type consistency we have to match the FP operand type. | |||
17696 | ||||
17697 | APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000)); | |||
17698 | LLVM_ATTRIBUTE_UNUSED__attribute__((__unused__)) APFloat::opStatus Status = APFloat::opOK; | |||
17699 | bool LosesInfo = false; | |||
17700 | if (TheVT == MVT::f64) | |||
17701 | // The rounding mode is irrelevant as the conversion should be exact. | |||
17702 | Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, | |||
17703 | &LosesInfo); | |||
17704 | else if (TheVT == MVT::f80) | |||
17705 | Status = Thresh.convert(APFloat::x87DoubleExtended(), | |||
17706 | APFloat::rmNearestTiesToEven, &LosesInfo); | |||
17707 | ||||
17708 | assert(Status == APFloat::opOK && !LosesInfo &&((Status == APFloat::opOK && !LosesInfo && "FP conversion should have been exact" ) ? static_cast<void> (0) : __assert_fail ("Status == APFloat::opOK && !LosesInfo && \"FP conversion should have been exact\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17709, __PRETTY_FUNCTION__)) | |||
17709 | "FP conversion should have been exact")((Status == APFloat::opOK && !LosesInfo && "FP conversion should have been exact" ) ? static_cast<void> (0) : __assert_fail ("Status == APFloat::opOK && !LosesInfo && \"FP conversion should have been exact\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17709, __PRETTY_FUNCTION__)); | |||
17710 | ||||
17711 | SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT); | |||
17712 | ||||
17713 | SDValue Cmp = DAG.getSetCC(DL, | |||
17714 | getSetCCResultType(DAG.getDataLayout(), | |||
17715 | *DAG.getContext(), TheVT), | |||
17716 | Value, ThreshVal, ISD::SETLT); | |||
17717 | Adjust = DAG.getSelect(DL, MVT::i32, Cmp, | |||
17718 | DAG.getConstant(0, DL, MVT::i32), | |||
17719 | DAG.getConstant(0x80000000, DL, MVT::i32)); | |||
17720 | SDValue Sub = DAG.getNode(ISD::FSUB, DL, TheVT, Value, ThreshVal); | |||
17721 | Cmp = DAG.getSetCC(DL, getSetCCResultType(DAG.getDataLayout(), | |||
17722 | *DAG.getContext(), TheVT), | |||
17723 | Value, ThreshVal, ISD::SETLT); | |||
17724 | Value = DAG.getSelect(DL, TheVT, Cmp, Value, Sub); | |||
17725 | } | |||
17726 | ||||
17727 | // FIXME This causes a redundant load/store if the SSE-class value is already | |||
17728 | // in memory, such as if it is on the callstack. | |||
17729 | if (isScalarFPTypeInSSEReg(TheVT)) { | |||
17730 | assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!")((DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!" ) ? static_cast<void> (0) : __assert_fail ("DstTy == MVT::i64 && \"Invalid FP_TO_SINT to lower!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17730, __PRETTY_FUNCTION__)); | |||
17731 | Chain = DAG.getStore(Chain, DL, Value, StackSlot, | |||
17732 | MachinePointerInfo::getFixedStack(MF, SSFI)); | |||
17733 | SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other); | |||
17734 | SDValue Ops[] = { | |||
17735 | Chain, StackSlot, DAG.getValueType(TheVT) | |||
17736 | }; | |||
17737 | ||||
17738 | MachineMemOperand *MMO = | |||
17739 | MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI), | |||
17740 | MachineMemOperand::MOLoad, MemSize, MemSize); | |||
17741 | Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO); | |||
17742 | Chain = Value.getValue(1); | |||
17743 | SSFI = MF.getFrameInfo().CreateStackObject(MemSize, MemSize, false); | |||
17744 | StackSlot = DAG.getFrameIndex(SSFI, PtrVT); | |||
17745 | } | |||
17746 | ||||
17747 | MachineMemOperand *MMO = | |||
17748 | MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI), | |||
17749 | MachineMemOperand::MOStore, MemSize, MemSize); | |||
17750 | ||||
17751 | if (UnsignedFixup) { | |||
17752 | ||||
17753 | // Insert the FIST, load its result as two i32's, | |||
17754 | // and XOR the high i32 with Adjust. | |||
17755 | ||||
17756 | SDValue FistOps[] = { Chain, Value, StackSlot }; | |||
17757 | SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), | |||
17758 | FistOps, DstTy, MMO); | |||
17759 | ||||
17760 | SDValue Low32 = | |||
17761 | DAG.getLoad(MVT::i32, DL, FIST, StackSlot, MachinePointerInfo()); | |||
17762 | SDValue HighAddr = DAG.getMemBasePlusOffset(StackSlot, 4, DL); | |||
17763 | ||||
17764 | SDValue High32 = | |||
17765 | DAG.getLoad(MVT::i32, DL, FIST, HighAddr, MachinePointerInfo()); | |||
17766 | High32 = DAG.getNode(ISD::XOR, DL, MVT::i32, High32, Adjust); | |||
17767 | ||||
17768 | if (Subtarget.is64Bit()) { | |||
17769 | // Join High32 and Low32 into a 64-bit result. | |||
17770 | // (High32 << 32) | Low32 | |||
17771 | Low32 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Low32); | |||
17772 | High32 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, High32); | |||
17773 | High32 = DAG.getNode(ISD::SHL, DL, MVT::i64, High32, | |||
17774 | DAG.getConstant(32, DL, MVT::i8)); | |||
17775 | SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i64, High32, Low32); | |||
17776 | return std::make_pair(Result, SDValue()); | |||
17777 | } | |||
17778 | ||||
17779 | SDValue ResultOps[] = { Low32, High32 }; | |||
17780 | ||||
17781 | SDValue pair = IsReplace | |||
17782 | ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResultOps) | |||
17783 | : DAG.getMergeValues(ResultOps, DL); | |||
17784 | return std::make_pair(pair, SDValue()); | |||
17785 | } else { | |||
17786 | // Build the FP_TO_INT*_IN_MEM | |||
17787 | SDValue Ops[] = { Chain, Value, StackSlot }; | |||
17788 | SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other), | |||
17789 | Ops, DstTy, MMO); | |||
17790 | return std::make_pair(FIST, StackSlot); | |||
17791 | } | |||
17792 | } | |||
17793 | ||||
17794 | static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG, | |||
17795 | const X86Subtarget &Subtarget) { | |||
17796 | MVT VT = Op->getSimpleValueType(0); | |||
17797 | SDValue In = Op->getOperand(0); | |||
17798 | MVT InVT = In.getSimpleValueType(); | |||
17799 | SDLoc dl(Op); | |||
17800 | ||||
17801 | assert(VT.isVector() && InVT.isVector() && "Expected vector type")((VT.isVector() && InVT.isVector() && "Expected vector type" ) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && InVT.isVector() && \"Expected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17801, __PRETTY_FUNCTION__)); | |||
17802 | assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&((VT.getVectorNumElements() == VT.getVectorNumElements() && "Expected same number of elements") ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == VT.getVectorNumElements() && \"Expected same number of elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17803, __PRETTY_FUNCTION__)) | |||
17803 | "Expected same number of elements")((VT.getVectorNumElements() == VT.getVectorNumElements() && "Expected same number of elements") ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == VT.getVectorNumElements() && \"Expected same number of elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17803, __PRETTY_FUNCTION__)); | |||
17804 | assert((VT.getVectorElementType() == MVT::i16 ||(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType () == MVT::i32 || VT.getVectorElementType() == MVT::i64) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17807, __PRETTY_FUNCTION__)) | |||
17805 | VT.getVectorElementType() == MVT::i32 ||(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType () == MVT::i32 || VT.getVectorElementType() == MVT::i64) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17807, __PRETTY_FUNCTION__)) | |||
17806 | VT.getVectorElementType() == MVT::i64) &&(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType () == MVT::i32 || VT.getVectorElementType() == MVT::i64) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17807, __PRETTY_FUNCTION__)) | |||
17807 | "Unexpected element type")(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType () == MVT::i32 || VT.getVectorElementType() == MVT::i64) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17807, __PRETTY_FUNCTION__)); | |||
17808 | assert((InVT.getVectorElementType() == MVT::i8 ||(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType () == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17811, __PRETTY_FUNCTION__)) | |||
17809 | InVT.getVectorElementType() == MVT::i16 ||(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType () == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17811, __PRETTY_FUNCTION__)) | |||
17810 | InVT.getVectorElementType() == MVT::i32) &&(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType () == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17811, __PRETTY_FUNCTION__)) | |||
17811 | "Unexpected element type")(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType () == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17811, __PRETTY_FUNCTION__)); | |||
17812 | ||||
17813 | // Custom legalize v8i8->v8i64 on CPUs without avx512bw. | |||
17814 | if (InVT == MVT::v8i8) { | |||
17815 | if (!ExperimentalVectorWideningLegalization || VT != MVT::v8i64) | |||
17816 | return SDValue(); | |||
17817 | ||||
17818 | In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), | |||
17819 | MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8)); | |||
17820 | // FIXME: This should be ANY_EXTEND_VECTOR_INREG for ANY_EXTEND input. | |||
17821 | return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, dl, VT, In); | |||
17822 | } | |||
17823 | ||||
17824 | if (Subtarget.hasInt256()) | |||
17825 | return Op; | |||
17826 | ||||
17827 | // Optimize vectors in AVX mode: | |||
17828 | // | |||
17829 | // v8i16 -> v8i32 | |||
17830 | // Use vpmovzwd for 4 lower elements v8i16 -> v4i32. | |||
17831 | // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32. | |||
17832 | // Concat upper and lower parts. | |||
17833 | // | |||
17834 | // v4i32 -> v4i64 | |||
17835 | // Use vpmovzdq for 4 lower elements v4i32 -> v2i64. | |||
17836 | // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64. | |||
17837 | // Concat upper and lower parts. | |||
17838 | // | |||
17839 | ||||
17840 | MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), | |||
17841 | VT.getVectorNumElements() / 2); | |||
17842 | ||||
17843 | SDValue OpLo = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, dl, HalfVT, In); | |||
17844 | ||||
17845 | SDValue ZeroVec = DAG.getConstant(0, dl, InVT); | |||
17846 | SDValue Undef = DAG.getUNDEF(InVT); | |||
17847 | bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND; | |||
17848 | SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef); | |||
17849 | OpHi = DAG.getBitcast(HalfVT, OpHi); | |||
17850 | ||||
17851 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); | |||
17852 | } | |||
17853 | ||||
17854 | // Helper to split and extend a v16i1 mask to v16i8 or v16i16. | |||
17855 | static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In, | |||
17856 | const SDLoc &dl, SelectionDAG &DAG) { | |||
17857 | assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.")(((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT." ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v16i8 || VT == MVT::v16i16) && \"Unexpected VT.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17857, __PRETTY_FUNCTION__)); | |||
17858 | SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In, | |||
17859 | DAG.getIntPtrConstant(0, dl)); | |||
17860 | SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In, | |||
17861 | DAG.getIntPtrConstant(8, dl)); | |||
17862 | Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo); | |||
17863 | Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi); | |||
17864 | SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi); | |||
17865 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Res); | |||
17866 | } | |||
17867 | ||||
17868 | static SDValue LowerZERO_EXTEND_Mask(SDValue Op, | |||
17869 | const X86Subtarget &Subtarget, | |||
17870 | SelectionDAG &DAG) { | |||
17871 | MVT VT = Op->getSimpleValueType(0); | |||
17872 | SDValue In = Op->getOperand(0); | |||
17873 | MVT InVT = In.getSimpleValueType(); | |||
17874 | assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!")((InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!" ) ? static_cast<void> (0) : __assert_fail ("InVT.getVectorElementType() == MVT::i1 && \"Unexpected input type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17874, __PRETTY_FUNCTION__)); | |||
17875 | SDLoc DL(Op); | |||
17876 | unsigned NumElts = VT.getVectorNumElements(); | |||
17877 | ||||
17878 | // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This | |||
17879 | // avoids a constant pool load. | |||
17880 | if (VT.getVectorElementType() != MVT::i8) { | |||
17881 | SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In); | |||
17882 | return DAG.getNode(ISD::SRL, DL, VT, Extend, | |||
17883 | DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT)); | |||
17884 | } | |||
17885 | ||||
17886 | // Extend VT if BWI is not supported. | |||
17887 | MVT ExtVT = VT; | |||
17888 | if (!Subtarget.hasBWI()) { | |||
17889 | // If v16i32 is to be avoided, we'll need to split and concatenate. | |||
17890 | if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) | |||
17891 | return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG); | |||
17892 | ||||
17893 | ExtVT = MVT::getVectorVT(MVT::i32, NumElts); | |||
17894 | } | |||
17895 | ||||
17896 | // Widen to 512-bits if VLX is not supported. | |||
17897 | MVT WideVT = ExtVT; | |||
17898 | if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) { | |||
17899 | NumElts *= 512 / ExtVT.getSizeInBits(); | |||
17900 | InVT = MVT::getVectorVT(MVT::i1, NumElts); | |||
17901 | In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT), | |||
17902 | In, DAG.getIntPtrConstant(0, DL)); | |||
17903 | WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), | |||
17904 | NumElts); | |||
17905 | } | |||
17906 | ||||
17907 | SDValue One = DAG.getConstant(1, DL, WideVT); | |||
17908 | SDValue Zero = DAG.getConstant(0, DL, WideVT); | |||
17909 | ||||
17910 | SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero); | |||
17911 | ||||
17912 | // Truncate if we had to extend above. | |||
17913 | if (VT != ExtVT) { | |||
17914 | WideVT = MVT::getVectorVT(MVT::i8, NumElts); | |||
17915 | SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal); | |||
17916 | } | |||
17917 | ||||
17918 | // Extract back to 128/256-bit if we widened. | |||
17919 | if (WideVT != VT) | |||
17920 | SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal, | |||
17921 | DAG.getIntPtrConstant(0, DL)); | |||
17922 | ||||
17923 | return SelectedVal; | |||
17924 | } | |||
17925 | ||||
17926 | static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget, | |||
17927 | SelectionDAG &DAG) { | |||
17928 | SDValue In = Op.getOperand(0); | |||
17929 | MVT SVT = In.getSimpleValueType(); | |||
17930 | ||||
17931 | if (SVT.getVectorElementType() == MVT::i1) | |||
17932 | return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG); | |||
17933 | ||||
17934 | assert(Subtarget.hasAVX() && "Expected AVX support")((Subtarget.hasAVX() && "Expected AVX support") ? static_cast <void> (0) : __assert_fail ("Subtarget.hasAVX() && \"Expected AVX support\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17934, __PRETTY_FUNCTION__)); | |||
17935 | return LowerAVXExtend(Op, DAG, Subtarget); | |||
17936 | } | |||
17937 | ||||
17938 | /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS. | |||
17939 | /// It makes use of the fact that vectors with enough leading sign/zero bits | |||
17940 | /// prevent the PACKSS/PACKUS from saturating the results. | |||
17941 | /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates | |||
17942 | /// within each 128-bit lane. | |||
17943 | static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In, | |||
17944 | const SDLoc &DL, SelectionDAG &DAG, | |||
17945 | const X86Subtarget &Subtarget) { | |||
17946 | assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&(((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) && "Unexpected PACK opcode") ? static_cast<void> (0) : __assert_fail ("(Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) && \"Unexpected PACK opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17947, __PRETTY_FUNCTION__)) | |||
17947 | "Unexpected PACK opcode")(((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) && "Unexpected PACK opcode") ? static_cast<void> (0) : __assert_fail ("(Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) && \"Unexpected PACK opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17947, __PRETTY_FUNCTION__)); | |||
17948 | ||||
17949 | // Requires SSE2 but AVX512 has fast vector truncate. | |||
17950 | if (!Subtarget.hasSSE2() || Subtarget.hasAVX512() || !DstVT.isVector()) | |||
17951 | return SDValue(); | |||
17952 | ||||
17953 | EVT SrcVT = In.getValueType(); | |||
17954 | ||||
17955 | // No truncation required, we might get here due to recursive calls. | |||
17956 | if (SrcVT == DstVT) | |||
17957 | return In; | |||
17958 | ||||
17959 | // We only support vector truncation to 64bits or greater from a | |||
17960 | // 128bits or greater source. | |||
17961 | unsigned DstSizeInBits = DstVT.getSizeInBits(); | |||
17962 | unsigned SrcSizeInBits = SrcVT.getSizeInBits(); | |||
17963 | if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0) | |||
17964 | return SDValue(); | |||
17965 | ||||
17966 | unsigned NumElems = SrcVT.getVectorNumElements(); | |||
17967 | if (!isPowerOf2_32(NumElems)) | |||
17968 | return SDValue(); | |||
17969 | ||||
17970 | LLVMContext &Ctx = *DAG.getContext(); | |||
17971 | assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation")((DstVT.getVectorNumElements() == NumElems && "Illegal truncation" ) ? static_cast<void> (0) : __assert_fail ("DstVT.getVectorNumElements() == NumElems && \"Illegal truncation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17971, __PRETTY_FUNCTION__)); | |||
17972 | assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation")((SrcSizeInBits > DstSizeInBits && "Illegal truncation" ) ? static_cast<void> (0) : __assert_fail ("SrcSizeInBits > DstSizeInBits && \"Illegal truncation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 17972, __PRETTY_FUNCTION__)); | |||
17973 | ||||
17974 | EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2); | |||
17975 | ||||
17976 | // Pack to the largest type possible: | |||
17977 | // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB. | |||
17978 | EVT InVT = MVT::i16, OutVT = MVT::i8; | |||
17979 | if (SrcVT.getScalarSizeInBits() > 16 && | |||
17980 | (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) { | |||
17981 | InVT = MVT::i32; | |||
17982 | OutVT = MVT::i16; | |||
17983 | } | |||
17984 | ||||
17985 | // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector. | |||
17986 | if (SrcVT.is128BitVector()) { | |||
17987 | InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits()); | |||
17988 | OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits()); | |||
17989 | In = DAG.getBitcast(InVT, In); | |||
17990 | SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, In); | |||
17991 | Res = extractSubVector(Res, 0, DAG, DL, 64); | |||
17992 | return DAG.getBitcast(DstVT, Res); | |||
17993 | } | |||
17994 | ||||
17995 | // Extract lower/upper subvectors. | |||
17996 | unsigned NumSubElts = NumElems / 2; | |||
17997 | SDValue Lo = extractSubVector(In, 0 * NumSubElts, DAG, DL, SrcSizeInBits / 2); | |||
17998 | SDValue Hi = extractSubVector(In, 1 * NumSubElts, DAG, DL, SrcSizeInBits / 2); | |||
17999 | ||||
18000 | unsigned SubSizeInBits = SrcSizeInBits / 2; | |||
18001 | InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits()); | |||
18002 | OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits()); | |||
18003 | ||||
18004 | // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors. | |||
18005 | if (SrcVT.is256BitVector() && DstVT.is128BitVector()) { | |||
18006 | Lo = DAG.getBitcast(InVT, Lo); | |||
18007 | Hi = DAG.getBitcast(InVT, Hi); | |||
18008 | SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi); | |||
18009 | return DAG.getBitcast(DstVT, Res); | |||
18010 | } | |||
18011 | ||||
18012 | // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors. | |||
18013 | // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK). | |||
18014 | if (SrcVT.is512BitVector() && Subtarget.hasInt256()) { | |||
18015 | Lo = DAG.getBitcast(InVT, Lo); | |||
18016 | Hi = DAG.getBitcast(InVT, Hi); | |||
18017 | SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi); | |||
18018 | ||||
18019 | // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)), | |||
18020 | // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)). | |||
18021 | Res = DAG.getBitcast(MVT::v4i64, Res); | |||
18022 | Res = DAG.getVectorShuffle(MVT::v4i64, DL, Res, Res, {0, 2, 1, 3}); | |||
18023 | ||||
18024 | if (DstVT.is256BitVector()) | |||
18025 | return DAG.getBitcast(DstVT, Res); | |||
18026 | ||||
18027 | // If 512bit -> 128bit truncate another stage. | |||
18028 | EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems); | |||
18029 | Res = DAG.getBitcast(PackedVT, Res); | |||
18030 | return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget); | |||
18031 | } | |||
18032 | ||||
18033 | // Recursively pack lower/upper subvectors, concat result and pack again. | |||
18034 | assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater")((SrcSizeInBits >= 256 && "Expected 256-bit vector or greater" ) ? static_cast<void> (0) : __assert_fail ("SrcSizeInBits >= 256 && \"Expected 256-bit vector or greater\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18034, __PRETTY_FUNCTION__)); | |||
18035 | EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumSubElts); | |||
18036 | Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget); | |||
18037 | Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget); | |||
18038 | ||||
18039 | PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems); | |||
18040 | SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi); | |||
18041 | return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget); | |||
18042 | } | |||
18043 | ||||
18044 | static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG, | |||
18045 | const X86Subtarget &Subtarget) { | |||
18046 | ||||
18047 | SDLoc DL(Op); | |||
18048 | MVT VT = Op.getSimpleValueType(); | |||
18049 | SDValue In = Op.getOperand(0); | |||
18050 | MVT InVT = In.getSimpleValueType(); | |||
18051 | ||||
18052 | assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.")((VT.getVectorElementType() == MVT::i1 && "Unexpected vector type." ) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i1 && \"Unexpected vector type.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18052, __PRETTY_FUNCTION__)); | |||
18053 | ||||
18054 | // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q. | |||
18055 | unsigned ShiftInx = InVT.getScalarSizeInBits() - 1; | |||
18056 | if (InVT.getScalarSizeInBits() <= 16) { | |||
18057 | if (Subtarget.hasBWI()) { | |||
18058 | // legal, will go to VPMOVB2M, VPMOVW2M | |||
18059 | if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) { | |||
18060 | // We need to shift to get the lsb into sign position. | |||
18061 | // Shift packed bytes not supported natively, bitcast to word | |||
18062 | MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16); | |||
18063 | In = DAG.getNode(ISD::SHL, DL, ExtVT, | |||
18064 | DAG.getBitcast(ExtVT, In), | |||
18065 | DAG.getConstant(ShiftInx, DL, ExtVT)); | |||
18066 | In = DAG.getBitcast(InVT, In); | |||
18067 | } | |||
18068 | return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), | |||
18069 | In, ISD::SETGT); | |||
18070 | } | |||
18071 | // Use TESTD/Q, extended vector to packed dword/qword. | |||
18072 | assert((InVT.is256BitVector() || InVT.is128BitVector()) &&(((InVT.is256BitVector() || InVT.is128BitVector()) && "Unexpected vector type.") ? static_cast<void> (0) : __assert_fail ("(InVT.is256BitVector() || InVT.is128BitVector()) && \"Unexpected vector type.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18073, __PRETTY_FUNCTION__)) | |||
18073 | "Unexpected vector type.")(((InVT.is256BitVector() || InVT.is128BitVector()) && "Unexpected vector type.") ? static_cast<void> (0) : __assert_fail ("(InVT.is256BitVector() || InVT.is128BitVector()) && \"Unexpected vector type.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18073, __PRETTY_FUNCTION__)); | |||
18074 | unsigned NumElts = InVT.getVectorNumElements(); | |||
18075 | assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements")(((NumElts == 8 || NumElts == 16) && "Unexpected number of elements" ) ? static_cast<void> (0) : __assert_fail ("(NumElts == 8 || NumElts == 16) && \"Unexpected number of elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18075, __PRETTY_FUNCTION__)); | |||
18076 | // We need to change to a wider element type that we have support for. | |||
18077 | // For 8 element vectors this is easy, we either extend to v8i32 or v8i64. | |||
18078 | // For 16 element vectors we extend to v16i32 unless we are explicitly | |||
18079 | // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors | |||
18080 | // we need to split into two 8 element vectors which we can extend to v8i32, | |||
18081 | // truncate and concat the results. There's an additional complication if | |||
18082 | // the original type is v16i8. In that case we can't split the v16i8 so | |||
18083 | // first we pre-extend it to v16i16 which we can split to v8i16, then extend | |||
18084 | // to v8i32, truncate that to v8i1 and concat the two halves. | |||
18085 | if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) { | |||
18086 | if (InVT == MVT::v16i8) { | |||
18087 | // First we need to sign extend up to 256-bits so we can split that. | |||
18088 | InVT = MVT::v16i16; | |||
18089 | In = DAG.getNode(ISD::SIGN_EXTEND, DL, InVT, In); | |||
18090 | } | |||
18091 | SDValue Lo = extract128BitVector(In, 0, DAG, DL); | |||
18092 | SDValue Hi = extract128BitVector(In, 8, DAG, DL); | |||
18093 | // We're split now, just emit two truncates and a concat. The two | |||
18094 | // truncates will trigger legalization to come back to this function. | |||
18095 | Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo); | |||
18096 | Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi); | |||
18097 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); | |||
18098 | } | |||
18099 | // We either have 8 elements or we're allowed to use 512-bit vectors. | |||
18100 | // If we have VLX, we want to use the narrowest vector that can get the | |||
18101 | // job done so we use vXi32. | |||
18102 | MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts); | |||
18103 | MVT ExtVT = MVT::getVectorVT(EltVT, NumElts); | |||
18104 | In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In); | |||
18105 | InVT = ExtVT; | |||
18106 | ShiftInx = InVT.getScalarSizeInBits() - 1; | |||
18107 | } | |||
18108 | ||||
18109 | if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) { | |||
18110 | // We need to shift to get the lsb into sign position. | |||
18111 | In = DAG.getNode(ISD::SHL, DL, InVT, In, | |||
18112 | DAG.getConstant(ShiftInx, DL, InVT)); | |||
18113 | } | |||
18114 | // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m. | |||
18115 | if (Subtarget.hasDQI()) | |||
18116 | return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT); | |||
18117 | return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE); | |||
18118 | } | |||
18119 | ||||
18120 | SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const { | |||
18121 | SDLoc DL(Op); | |||
18122 | MVT VT = Op.getSimpleValueType(); | |||
18123 | SDValue In = Op.getOperand(0); | |||
18124 | MVT InVT = In.getSimpleValueType(); | |||
18125 | unsigned InNumEltBits = InVT.getScalarSizeInBits(); | |||
18126 | ||||
18127 | assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&((VT.getVectorNumElements() == InVT.getVectorNumElements() && "Invalid TRUNCATE operation") ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == InVT.getVectorNumElements() && \"Invalid TRUNCATE operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18128, __PRETTY_FUNCTION__)) | |||
18128 | "Invalid TRUNCATE operation")((VT.getVectorNumElements() == InVT.getVectorNumElements() && "Invalid TRUNCATE operation") ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == InVT.getVectorNumElements() && \"Invalid TRUNCATE operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18128, __PRETTY_FUNCTION__)); | |||
18129 | ||||
18130 | // If called by the legalizer just return. | |||
18131 | if (!DAG.getTargetLoweringInfo().isTypeLegal(InVT)) | |||
18132 | return SDValue(); | |||
18133 | ||||
18134 | if (VT.getVectorElementType() == MVT::i1) | |||
18135 | return LowerTruncateVecI1(Op, DAG, Subtarget); | |||
18136 | ||||
18137 | // vpmovqb/w/d, vpmovdb/w, vpmovwb | |||
18138 | if (Subtarget.hasAVX512()) { | |||
18139 | // word to byte only under BWI. Otherwise we have to promoted to v16i32 | |||
18140 | // and then truncate that. But we should only do that if we haven't been | |||
18141 | // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be | |||
18142 | // handled by isel patterns. | |||
18143 | if (InVT != MVT::v16i16 || Subtarget.hasBWI() || | |||
18144 | Subtarget.canExtendTo512DQ()) | |||
18145 | return Op; | |||
18146 | } | |||
18147 | ||||
18148 | unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16); | |||
18149 | unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8; | |||
18150 | ||||
18151 | // Truncate with PACKUS if we are truncating a vector with leading zero bits | |||
18152 | // that extend all the way to the packed/truncated value. | |||
18153 | // Pre-SSE41 we can only use PACKUSWB. | |||
18154 | KnownBits Known = DAG.computeKnownBits(In); | |||
18155 | if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros()) | |||
18156 | if (SDValue V = | |||
18157 | truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget)) | |||
18158 | return V; | |||
18159 | ||||
18160 | // Truncate with PACKSS if we are truncating a vector with sign-bits that | |||
18161 | // extend all the way to the packed/truncated value. | |||
18162 | if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In)) | |||
18163 | if (SDValue V = | |||
18164 | truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget)) | |||
18165 | return V; | |||
18166 | ||||
18167 | if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) { | |||
18168 | // On AVX2, v4i64 -> v4i32 becomes VPERMD. | |||
18169 | if (Subtarget.hasInt256()) { | |||
18170 | static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1}; | |||
18171 | In = DAG.getBitcast(MVT::v8i32, In); | |||
18172 | In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask); | |||
18173 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In, | |||
18174 | DAG.getIntPtrConstant(0, DL)); | |||
18175 | } | |||
18176 | ||||
18177 | SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, | |||
18178 | DAG.getIntPtrConstant(0, DL)); | |||
18179 | SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, | |||
18180 | DAG.getIntPtrConstant(2, DL)); | |||
18181 | OpLo = DAG.getBitcast(MVT::v4i32, OpLo); | |||
18182 | OpHi = DAG.getBitcast(MVT::v4i32, OpHi); | |||
18183 | static const int ShufMask[] = {0, 2, 4, 6}; | |||
18184 | return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask); | |||
18185 | } | |||
18186 | ||||
18187 | if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) { | |||
18188 | // On AVX2, v8i32 -> v8i16 becomes PSHUFB. | |||
18189 | if (Subtarget.hasInt256()) { | |||
18190 | In = DAG.getBitcast(MVT::v32i8, In); | |||
18191 | ||||
18192 | // The PSHUFB mask: | |||
18193 | static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13, | |||
18194 | -1, -1, -1, -1, -1, -1, -1, -1, | |||
18195 | 16, 17, 20, 21, 24, 25, 28, 29, | |||
18196 | -1, -1, -1, -1, -1, -1, -1, -1 }; | |||
18197 | In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1); | |||
18198 | In = DAG.getBitcast(MVT::v4i64, In); | |||
18199 | ||||
18200 | static const int ShufMask2[] = {0, 2, -1, -1}; | |||
18201 | In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2); | |||
18202 | In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In, | |||
18203 | DAG.getIntPtrConstant(0, DL)); | |||
18204 | return DAG.getBitcast(VT, In); | |||
18205 | } | |||
18206 | ||||
18207 | SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, | |||
18208 | DAG.getIntPtrConstant(0, DL)); | |||
18209 | ||||
18210 | SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In, | |||
18211 | DAG.getIntPtrConstant(4, DL)); | |||
18212 | ||||
18213 | OpLo = DAG.getBitcast(MVT::v16i8, OpLo); | |||
18214 | OpHi = DAG.getBitcast(MVT::v16i8, OpHi); | |||
18215 | ||||
18216 | // The PSHUFB mask: | |||
18217 | static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13, | |||
18218 | -1, -1, -1, -1, -1, -1, -1, -1}; | |||
18219 | ||||
18220 | OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1); | |||
18221 | OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1); | |||
18222 | ||||
18223 | OpLo = DAG.getBitcast(MVT::v4i32, OpLo); | |||
18224 | OpHi = DAG.getBitcast(MVT::v4i32, OpHi); | |||
18225 | ||||
18226 | // The MOVLHPS Mask: | |||
18227 | static const int ShufMask2[] = {0, 1, 4, 5}; | |||
18228 | SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2); | |||
18229 | return DAG.getBitcast(MVT::v8i16, res); | |||
18230 | } | |||
18231 | ||||
18232 | if (VT == MVT::v16i8 && InVT == MVT::v16i16) { | |||
18233 | // Use an AND to zero uppper bits for PACKUS. | |||
18234 | In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT)); | |||
18235 | ||||
18236 | SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In, | |||
18237 | DAG.getIntPtrConstant(0, DL)); | |||
18238 | SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In, | |||
18239 | DAG.getIntPtrConstant(8, DL)); | |||
18240 | return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi); | |||
18241 | } | |||
18242 | ||||
18243 | // Handle truncation of V256 to V128 using shuffles. | |||
18244 | assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!")((VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!") ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && InVT.is256BitVector() && \"Unexpected types!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18244, __PRETTY_FUNCTION__)); | |||
18245 | ||||
18246 | assert(Subtarget.hasAVX() && "256-bit vector without AVX!")((Subtarget.hasAVX() && "256-bit vector without AVX!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX() && \"256-bit vector without AVX!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18246, __PRETTY_FUNCTION__)); | |||
18247 | ||||
18248 | unsigned NumElems = VT.getVectorNumElements(); | |||
18249 | MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2); | |||
18250 | ||||
18251 | SmallVector<int, 16> MaskVec(NumElems * 2, -1); | |||
18252 | // Prepare truncation shuffle mask | |||
18253 | for (unsigned i = 0; i != NumElems; ++i) | |||
18254 | MaskVec[i] = i * 2; | |||
18255 | In = DAG.getBitcast(NVT, In); | |||
18256 | SDValue V = DAG.getVectorShuffle(NVT, DL, In, In, MaskVec); | |||
18257 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, | |||
18258 | DAG.getIntPtrConstant(0, DL)); | |||
18259 | } | |||
18260 | ||||
18261 | SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const { | |||
18262 | bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT; | |||
18263 | MVT VT = Op.getSimpleValueType(); | |||
18264 | ||||
18265 | if (VT.isVector()) { | |||
18266 | SDValue Src = Op.getOperand(0); | |||
18267 | SDLoc dl(Op); | |||
18268 | ||||
18269 | if (VT == MVT::v2i1 && Src.getSimpleValueType() == MVT::v2f64) { | |||
18270 | MVT ResVT = MVT::v4i32; | |||
18271 | MVT TruncVT = MVT::v4i1; | |||
18272 | unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI; | |||
18273 | if (!IsSigned && !Subtarget.hasVLX()) { | |||
18274 | // Widen to 512-bits. | |||
18275 | ResVT = MVT::v8i32; | |||
18276 | TruncVT = MVT::v8i1; | |||
18277 | Opc = ISD::FP_TO_UINT; | |||
18278 | Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, | |||
18279 | DAG.getUNDEF(MVT::v8f64), | |||
18280 | Src, DAG.getIntPtrConstant(0, dl)); | |||
18281 | } | |||
18282 | SDValue Res = DAG.getNode(Opc, dl, ResVT, Src); | |||
18283 | Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res); | |||
18284 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res, | |||
18285 | DAG.getIntPtrConstant(0, dl)); | |||
18286 | } | |||
18287 | ||||
18288 | assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!")((Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasDQI() && Subtarget.hasVLX() && \"Requires AVX512DQVL!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18288, __PRETTY_FUNCTION__)); | |||
18289 | if (VT == MVT::v2i64 && Src.getSimpleValueType() == MVT::v2f32) { | |||
18290 | return DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl, VT, | |||
18291 | DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, | |||
18292 | DAG.getUNDEF(MVT::v2f32))); | |||
18293 | } | |||
18294 | ||||
18295 | return SDValue(); | |||
18296 | } | |||
18297 | ||||
18298 | assert(!VT.isVector())((!VT.isVector()) ? static_cast<void> (0) : __assert_fail ("!VT.isVector()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18298, __PRETTY_FUNCTION__)); | |||
18299 | ||||
18300 | std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG, | |||
18301 | IsSigned, /*IsReplace=*/ false); | |||
18302 | SDValue FIST = Vals.first, StackSlot = Vals.second; | |||
18303 | // If FP_TO_INTHelper failed, the node is actually supposed to be Legal. | |||
18304 | if (!FIST.getNode()) | |||
18305 | return Op; | |||
18306 | ||||
18307 | if (StackSlot.getNode()) | |||
18308 | // Load the result. | |||
18309 | return DAG.getLoad(VT, SDLoc(Op), FIST, StackSlot, MachinePointerInfo()); | |||
18310 | ||||
18311 | // The node is the result. | |||
18312 | return FIST; | |||
18313 | } | |||
18314 | ||||
18315 | static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) { | |||
18316 | SDLoc DL(Op); | |||
18317 | MVT VT = Op.getSimpleValueType(); | |||
18318 | SDValue In = Op.getOperand(0); | |||
18319 | MVT SVT = In.getSimpleValueType(); | |||
18320 | ||||
18321 | assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!")((SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!" ) ? static_cast<void> (0) : __assert_fail ("SVT == MVT::v2f32 && \"Only customize MVT::v2f32 type legalization!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18321, __PRETTY_FUNCTION__)); | |||
18322 | ||||
18323 | return DAG.getNode(X86ISD::VFPEXT, DL, VT, | |||
18324 | DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, | |||
18325 | In, DAG.getUNDEF(SVT))); | |||
18326 | } | |||
18327 | ||||
18328 | /// The only differences between FABS and FNEG are the mask and the logic op. | |||
18329 | /// FNEG also has a folding opportunity for FNEG(FABS(x)). | |||
18330 | static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) { | |||
18331 | assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&(((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG ) && "Wrong opcode for lowering FABS or FNEG.") ? static_cast <void> (0) : __assert_fail ("(Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) && \"Wrong opcode for lowering FABS or FNEG.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18332, __PRETTY_FUNCTION__)) | |||
18332 | "Wrong opcode for lowering FABS or FNEG.")(((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG ) && "Wrong opcode for lowering FABS or FNEG.") ? static_cast <void> (0) : __assert_fail ("(Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) && \"Wrong opcode for lowering FABS or FNEG.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18332, __PRETTY_FUNCTION__)); | |||
18333 | ||||
18334 | bool IsFABS = (Op.getOpcode() == ISD::FABS); | |||
18335 | ||||
18336 | // If this is a FABS and it has an FNEG user, bail out to fold the combination | |||
18337 | // into an FNABS. We'll lower the FABS after that if it is still in use. | |||
18338 | if (IsFABS) | |||
18339 | for (SDNode *User : Op->uses()) | |||
18340 | if (User->getOpcode() == ISD::FNEG) | |||
18341 | return Op; | |||
18342 | ||||
18343 | SDLoc dl(Op); | |||
18344 | MVT VT = Op.getSimpleValueType(); | |||
18345 | ||||
18346 | bool IsF128 = (VT == MVT::f128); | |||
18347 | assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT ::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFABSorFNEG" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFABSorFNEG\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18350, __PRETTY_FUNCTION__)) | |||
18348 | VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT ::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFABSorFNEG" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFABSorFNEG\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18350, __PRETTY_FUNCTION__)) | |||
18349 | VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT ::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFABSorFNEG" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFABSorFNEG\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18350, __PRETTY_FUNCTION__)) | |||
18350 | "Unexpected type in LowerFABSorFNEG")(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT ::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFABSorFNEG" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFABSorFNEG\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18350, __PRETTY_FUNCTION__)); | |||
18351 | ||||
18352 | // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to | |||
18353 | // decide if we should generate a 16-byte constant mask when we only need 4 or | |||
18354 | // 8 bytes for the scalar case. | |||
18355 | ||||
18356 | // There are no scalar bitwise logical SSE/AVX instructions, so we | |||
18357 | // generate a 16-byte vector constant and logic op even for the scalar case. | |||
18358 | // Using a 16-byte mask allows folding the load of the mask with | |||
18359 | // the logic op, so it can save (~4 bytes) on code size. | |||
18360 | bool IsFakeVector = !VT.isVector() && !IsF128; | |||
18361 | MVT LogicVT = VT; | |||
18362 | if (IsFakeVector) | |||
18363 | LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32; | |||
18364 | ||||
18365 | unsigned EltBits = VT.getScalarSizeInBits(); | |||
18366 | // For FABS, mask is 0x7f...; for FNEG, mask is 0x80... | |||
18367 | APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) : | |||
18368 | APInt::getSignMask(EltBits); | |||
18369 | const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT); | |||
18370 | SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT); | |||
18371 | ||||
18372 | SDValue Op0 = Op.getOperand(0); | |||
18373 | bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS); | |||
18374 | unsigned LogicOp = IsFABS ? X86ISD::FAND : | |||
18375 | IsFNABS ? X86ISD::FOR : | |||
18376 | X86ISD::FXOR; | |||
18377 | SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0; | |||
18378 | ||||
18379 | if (VT.isVector() || IsF128) | |||
18380 | return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask); | |||
18381 | ||||
18382 | // For the scalar case extend to a 128-bit vector, perform the logic op, | |||
18383 | // and extract the scalar result back out. | |||
18384 | Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand); | |||
18385 | SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask); | |||
18386 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode, | |||
18387 | DAG.getIntPtrConstant(0, dl)); | |||
18388 | } | |||
18389 | ||||
18390 | static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) { | |||
18391 | SDValue Mag = Op.getOperand(0); | |||
18392 | SDValue Sign = Op.getOperand(1); | |||
18393 | SDLoc dl(Op); | |||
18394 | ||||
18395 | // If the sign operand is smaller, extend it first. | |||
18396 | MVT VT = Op.getSimpleValueType(); | |||
18397 | if (Sign.getSimpleValueType().bitsLT(VT)) | |||
18398 | Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign); | |||
18399 | ||||
18400 | // And if it is bigger, shrink it first. | |||
18401 | if (Sign.getSimpleValueType().bitsGT(VT)) | |||
18402 | Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl)); | |||
18403 | ||||
18404 | // At this point the operands and the result should have the same | |||
18405 | // type, and that won't be f80 since that is not custom lowered. | |||
18406 | bool IsF128 = (VT == MVT::f128); | |||
18407 | assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT ::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFCOPYSIGN" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFCOPYSIGN\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18410, __PRETTY_FUNCTION__)) | |||
18408 | VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT ::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFCOPYSIGN" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFCOPYSIGN\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18410, __PRETTY_FUNCTION__)) | |||
18409 | VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT ::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFCOPYSIGN" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFCOPYSIGN\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18410, __PRETTY_FUNCTION__)) | |||
18410 | "Unexpected type in LowerFCOPYSIGN")(((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT ::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && "Unexpected type in LowerFCOPYSIGN" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 || VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) && \"Unexpected type in LowerFCOPYSIGN\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18410, __PRETTY_FUNCTION__)); | |||
18411 | ||||
18412 | const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT); | |||
18413 | ||||
18414 | // Perform all scalar logic operations as 16-byte vectors because there are no | |||
18415 | // scalar FP logic instructions in SSE. | |||
18416 | // TODO: This isn't necessary. If we used scalar types, we might avoid some | |||
18417 | // unnecessary splats, but we might miss load folding opportunities. Should | |||
18418 | // this decision be based on OptimizeForSize? | |||
18419 | bool IsFakeVector = !VT.isVector() && !IsF128; | |||
18420 | MVT LogicVT = VT; | |||
18421 | if (IsFakeVector) | |||
18422 | LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32; | |||
18423 | ||||
18424 | // The mask constants are automatically splatted for vector types. | |||
18425 | unsigned EltSizeInBits = VT.getScalarSizeInBits(); | |||
18426 | SDValue SignMask = DAG.getConstantFP( | |||
18427 | APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT); | |||
18428 | SDValue MagMask = DAG.getConstantFP( | |||
18429 | APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT); | |||
18430 | ||||
18431 | // First, clear all bits but the sign bit from the second operand (sign). | |||
18432 | if (IsFakeVector) | |||
18433 | Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign); | |||
18434 | SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask); | |||
18435 | ||||
18436 | // Next, clear the sign bit from the first operand (magnitude). | |||
18437 | // TODO: If we had general constant folding for FP logic ops, this check | |||
18438 | // wouldn't be necessary. | |||
18439 | SDValue MagBits; | |||
18440 | if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) { | |||
18441 | APFloat APF = Op0CN->getValueAPF(); | |||
18442 | APF.clearSign(); | |||
18443 | MagBits = DAG.getConstantFP(APF, dl, LogicVT); | |||
18444 | } else { | |||
18445 | // If the magnitude operand wasn't a constant, we need to AND out the sign. | |||
18446 | if (IsFakeVector) | |||
18447 | Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag); | |||
18448 | MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask); | |||
18449 | } | |||
18450 | ||||
18451 | // OR the magnitude value with the sign bit. | |||
18452 | SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit); | |||
18453 | return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or, | |||
18454 | DAG.getIntPtrConstant(0, dl)); | |||
18455 | } | |||
18456 | ||||
18457 | static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) { | |||
18458 | SDValue N0 = Op.getOperand(0); | |||
18459 | SDLoc dl(Op); | |||
18460 | MVT VT = Op.getSimpleValueType(); | |||
18461 | ||||
18462 | MVT OpVT = N0.getSimpleValueType(); | |||
18463 | assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&(((OpVT == MVT::f32 || OpVT == MVT::f64) && "Unexpected type for FGETSIGN" ) ? static_cast<void> (0) : __assert_fail ("(OpVT == MVT::f32 || OpVT == MVT::f64) && \"Unexpected type for FGETSIGN\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18464, __PRETTY_FUNCTION__)) | |||
18464 | "Unexpected type for FGETSIGN")(((OpVT == MVT::f32 || OpVT == MVT::f64) && "Unexpected type for FGETSIGN" ) ? static_cast<void> (0) : __assert_fail ("(OpVT == MVT::f32 || OpVT == MVT::f64) && \"Unexpected type for FGETSIGN\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18464, __PRETTY_FUNCTION__)); | |||
18465 | ||||
18466 | // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1). | |||
18467 | MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64); | |||
18468 | SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0); | |||
18469 | Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res); | |||
18470 | Res = DAG.getZExtOrTrunc(Res, dl, VT); | |||
18471 | Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT)); | |||
18472 | return Res; | |||
18473 | } | |||
18474 | ||||
18475 | /// Helper for creating a X86ISD::SETCC node. | |||
18476 | static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl, | |||
18477 | SelectionDAG &DAG) { | |||
18478 | return DAG.getNode(X86ISD::SETCC, dl, MVT::i8, | |||
18479 | DAG.getConstant(Cond, dl, MVT::i8), EFLAGS); | |||
18480 | } | |||
18481 | ||||
18482 | // Check whether an OR'd tree is PTEST-able. | |||
18483 | static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC, | |||
18484 | const X86Subtarget &Subtarget, | |||
18485 | SelectionDAG &DAG) { | |||
18486 | assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.")((Op.getOpcode() == ISD::OR && "Only check OR'd tree." ) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::OR && \"Only check OR'd tree.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18486, __PRETTY_FUNCTION__)); | |||
18487 | ||||
18488 | if (!Subtarget.hasSSE41()) | |||
18489 | return SDValue(); | |||
18490 | ||||
18491 | if (!Op->hasOneUse()) | |||
18492 | return SDValue(); | |||
18493 | ||||
18494 | SDNode *N = Op.getNode(); | |||
18495 | SDLoc DL(N); | |||
18496 | ||||
18497 | SmallVector<SDValue, 8> Opnds; | |||
18498 | DenseMap<SDValue, unsigned> VecInMap; | |||
18499 | SmallVector<SDValue, 8> VecIns; | |||
18500 | EVT VT = MVT::Other; | |||
18501 | ||||
18502 | // Recognize a special case where a vector is casted into wide integer to | |||
18503 | // test all 0s. | |||
18504 | Opnds.push_back(N->getOperand(0)); | |||
18505 | Opnds.push_back(N->getOperand(1)); | |||
18506 | ||||
18507 | for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) { | |||
18508 | SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot; | |||
18509 | // BFS traverse all OR'd operands. | |||
18510 | if (I->getOpcode() == ISD::OR) { | |||
18511 | Opnds.push_back(I->getOperand(0)); | |||
18512 | Opnds.push_back(I->getOperand(1)); | |||
18513 | // Re-evaluate the number of nodes to be traversed. | |||
18514 | e += 2; // 2 more nodes (LHS and RHS) are pushed. | |||
18515 | continue; | |||
18516 | } | |||
18517 | ||||
18518 | // Quit if a non-EXTRACT_VECTOR_ELT | |||
18519 | if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
18520 | return SDValue(); | |||
18521 | ||||
18522 | // Quit if without a constant index. | |||
18523 | SDValue Idx = I->getOperand(1); | |||
18524 | if (!isa<ConstantSDNode>(Idx)) | |||
18525 | return SDValue(); | |||
18526 | ||||
18527 | SDValue ExtractedFromVec = I->getOperand(0); | |||
18528 | DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec); | |||
18529 | if (M == VecInMap.end()) { | |||
18530 | VT = ExtractedFromVec.getValueType(); | |||
18531 | // Quit if not 128/256-bit vector. | |||
18532 | if (!VT.is128BitVector() && !VT.is256BitVector()) | |||
18533 | return SDValue(); | |||
18534 | // Quit if not the same type. | |||
18535 | if (VecInMap.begin() != VecInMap.end() && | |||
18536 | VT != VecInMap.begin()->first.getValueType()) | |||
18537 | return SDValue(); | |||
18538 | M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first; | |||
18539 | VecIns.push_back(ExtractedFromVec); | |||
18540 | } | |||
18541 | M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue(); | |||
18542 | } | |||
18543 | ||||
18544 | assert((VT.is128BitVector() || VT.is256BitVector()) &&(((VT.is128BitVector() || VT.is256BitVector()) && "Not extracted from 128-/256-bit vector." ) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector()) && \"Not extracted from 128-/256-bit vector.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18545, __PRETTY_FUNCTION__)) | |||
18545 | "Not extracted from 128-/256-bit vector.")(((VT.is128BitVector() || VT.is256BitVector()) && "Not extracted from 128-/256-bit vector." ) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector()) && \"Not extracted from 128-/256-bit vector.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18545, __PRETTY_FUNCTION__)); | |||
18546 | ||||
18547 | unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U; | |||
18548 | ||||
18549 | for (DenseMap<SDValue, unsigned>::const_iterator | |||
18550 | I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) { | |||
18551 | // Quit if not all elements are used. | |||
18552 | if (I->second != FullMask) | |||
18553 | return SDValue(); | |||
18554 | } | |||
18555 | ||||
18556 | MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64; | |||
18557 | ||||
18558 | // Cast all vectors into TestVT for PTEST. | |||
18559 | for (unsigned i = 0, e = VecIns.size(); i < e; ++i) | |||
18560 | VecIns[i] = DAG.getBitcast(TestVT, VecIns[i]); | |||
18561 | ||||
18562 | // If more than one full vector is evaluated, OR them first before PTEST. | |||
18563 | for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) { | |||
18564 | // Each iteration will OR 2 nodes and append the result until there is only | |||
18565 | // 1 node left, i.e. the final OR'd value of all vectors. | |||
18566 | SDValue LHS = VecIns[Slot]; | |||
18567 | SDValue RHS = VecIns[Slot + 1]; | |||
18568 | VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS)); | |||
18569 | } | |||
18570 | ||||
18571 | SDValue Res = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, | |||
18572 | VecIns.back(), VecIns.back()); | |||
18573 | return getSETCC(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE, Res, DL, DAG); | |||
18574 | } | |||
18575 | ||||
18576 | /// return true if \c Op has a use that doesn't just read flags. | |||
18577 | static bool hasNonFlagsUse(SDValue Op) { | |||
18578 | for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE; | |||
18579 | ++UI) { | |||
18580 | SDNode *User = *UI; | |||
18581 | unsigned UOpNo = UI.getOperandNo(); | |||
18582 | if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) { | |||
18583 | // Look pass truncate. | |||
18584 | UOpNo = User->use_begin().getOperandNo(); | |||
18585 | User = *User->use_begin(); | |||
18586 | } | |||
18587 | ||||
18588 | if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC && | |||
18589 | !(User->getOpcode() == ISD::SELECT && UOpNo == 0)) | |||
18590 | return true; | |||
18591 | } | |||
18592 | return false; | |||
18593 | } | |||
18594 | ||||
18595 | /// Emit nodes that will be selected as "test Op0,Op0", or something | |||
18596 | /// equivalent. | |||
18597 | static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl, | |||
18598 | SelectionDAG &DAG, const X86Subtarget &Subtarget) { | |||
18599 | // CF and OF aren't always set the way we want. Determine which | |||
18600 | // of these we need. | |||
18601 | bool NeedCF = false; | |||
18602 | bool NeedOF = false; | |||
18603 | switch (X86CC) { | |||
18604 | default: break; | |||
18605 | case X86::COND_A: case X86::COND_AE: | |||
18606 | case X86::COND_B: case X86::COND_BE: | |||
18607 | NeedCF = true; | |||
18608 | break; | |||
18609 | case X86::COND_G: case X86::COND_GE: | |||
18610 | case X86::COND_L: case X86::COND_LE: | |||
18611 | case X86::COND_O: case X86::COND_NO: { | |||
18612 | // Check if we really need to set the | |||
18613 | // Overflow flag. If NoSignedWrap is present | |||
18614 | // that is not actually needed. | |||
18615 | switch (Op->getOpcode()) { | |||
18616 | case ISD::ADD: | |||
18617 | case ISD::SUB: | |||
18618 | case ISD::MUL: | |||
18619 | case ISD::SHL: | |||
18620 | if (Op.getNode()->getFlags().hasNoSignedWrap()) | |||
18621 | break; | |||
18622 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
18623 | default: | |||
18624 | NeedOF = true; | |||
18625 | break; | |||
18626 | } | |||
18627 | break; | |||
18628 | } | |||
18629 | } | |||
18630 | // See if we can use the EFLAGS value from the operand instead of | |||
18631 | // doing a separate TEST. TEST always sets OF and CF to 0, so unless | |||
18632 | // we prove that the arithmetic won't overflow, we can't use OF or CF. | |||
18633 | if (Op.getResNo() != 0 || NeedOF || NeedCF) { | |||
18634 | // Emit a CMP with 0, which is the TEST pattern. | |||
18635 | return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, | |||
18636 | DAG.getConstant(0, dl, Op.getValueType())); | |||
18637 | } | |||
18638 | unsigned Opcode = 0; | |||
18639 | unsigned NumOperands = 0; | |||
18640 | ||||
18641 | SDValue ArithOp = Op; | |||
18642 | ||||
18643 | // NOTICE: In the code below we use ArithOp to hold the arithmetic operation | |||
18644 | // which may be the result of a CAST. We use the variable 'Op', which is the | |||
18645 | // non-casted variable when we check for possible users. | |||
18646 | switch (ArithOp.getOpcode()) { | |||
18647 | case ISD::ADD: | |||
18648 | // We only want to rewrite this as a target-specific node with attached | |||
18649 | // flags if there is a reasonable chance of either using that to do custom | |||
18650 | // instructions selection that can fold some of the memory operands, or if | |||
18651 | // only the flags are used. If there are other uses, leave the node alone | |||
18652 | // and emit a test instruction. | |||
18653 | for (SDNode::use_iterator UI = Op.getNode()->use_begin(), | |||
18654 | UE = Op.getNode()->use_end(); UI != UE; ++UI) | |||
18655 | if (UI->getOpcode() != ISD::CopyToReg && | |||
18656 | UI->getOpcode() != ISD::SETCC && | |||
18657 | UI->getOpcode() != ISD::STORE) | |||
18658 | goto default_case; | |||
18659 | ||||
18660 | if (auto *C = dyn_cast<ConstantSDNode>(ArithOp.getOperand(1))) { | |||
18661 | // An add of one will be selected as an INC. | |||
18662 | if (C->isOne() && | |||
18663 | (!Subtarget.slowIncDec() || | |||
18664 | DAG.getMachineFunction().getFunction().optForSize())) { | |||
18665 | Opcode = X86ISD::INC; | |||
18666 | NumOperands = 1; | |||
18667 | break; | |||
18668 | } | |||
18669 | ||||
18670 | // An add of negative one (subtract of one) will be selected as a DEC. | |||
18671 | if (C->isAllOnesValue() && | |||
18672 | (!Subtarget.slowIncDec() || | |||
18673 | DAG.getMachineFunction().getFunction().optForSize())) { | |||
18674 | Opcode = X86ISD::DEC; | |||
18675 | NumOperands = 1; | |||
18676 | break; | |||
18677 | } | |||
18678 | } | |||
18679 | ||||
18680 | // Otherwise use a regular EFLAGS-setting add. | |||
18681 | Opcode = X86ISD::ADD; | |||
18682 | NumOperands = 2; | |||
18683 | break; | |||
18684 | ||||
18685 | case ISD::AND: | |||
18686 | // If the primary 'and' result isn't used, don't bother using X86ISD::AND, | |||
18687 | // because a TEST instruction will be better. | |||
18688 | if (!hasNonFlagsUse(Op)) | |||
18689 | break; | |||
18690 | ||||
18691 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
18692 | case ISD::SUB: | |||
18693 | case ISD::OR: | |||
18694 | case ISD::XOR: | |||
18695 | // Similar to ISD::ADD above, check if the uses will preclude useful | |||
18696 | // lowering of the target-specific node. | |||
18697 | for (SDNode::use_iterator UI = Op.getNode()->use_begin(), | |||
18698 | UE = Op.getNode()->use_end(); UI != UE; ++UI) | |||
18699 | if (UI->getOpcode() != ISD::CopyToReg && | |||
18700 | UI->getOpcode() != ISD::SETCC && | |||
18701 | UI->getOpcode() != ISD::STORE) | |||
18702 | goto default_case; | |||
18703 | ||||
18704 | // Otherwise use a regular EFLAGS-setting instruction. | |||
18705 | switch (ArithOp.getOpcode()) { | |||
18706 | default: llvm_unreachable("unexpected operator!")::llvm::llvm_unreachable_internal("unexpected operator!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18706); | |||
18707 | case ISD::SUB: Opcode = X86ISD::SUB; break; | |||
18708 | case ISD::XOR: Opcode = X86ISD::XOR; break; | |||
18709 | case ISD::AND: Opcode = X86ISD::AND; break; | |||
18710 | case ISD::OR: Opcode = X86ISD::OR; break; | |||
18711 | } | |||
18712 | ||||
18713 | NumOperands = 2; | |||
18714 | break; | |||
18715 | case X86ISD::ADD: | |||
18716 | case X86ISD::SUB: | |||
18717 | case X86ISD::INC: | |||
18718 | case X86ISD::DEC: | |||
18719 | case X86ISD::OR: | |||
18720 | case X86ISD::XOR: | |||
18721 | case X86ISD::AND: | |||
18722 | return SDValue(Op.getNode(), 1); | |||
18723 | default: | |||
18724 | default_case: | |||
18725 | break; | |||
18726 | } | |||
18727 | ||||
18728 | if (Opcode == 0) { | |||
18729 | // Emit a CMP with 0, which is the TEST pattern. | |||
18730 | return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, | |||
18731 | DAG.getConstant(0, dl, Op.getValueType())); | |||
18732 | } | |||
18733 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); | |||
18734 | SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands); | |||
18735 | ||||
18736 | SDValue New = DAG.getNode(Opcode, dl, VTs, Ops); | |||
18737 | DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New); | |||
18738 | return SDValue(New.getNode(), 1); | |||
18739 | } | |||
18740 | ||||
18741 | /// Emit nodes that will be selected as "cmp Op0,Op1", or something | |||
18742 | /// equivalent. | |||
18743 | SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, | |||
18744 | const SDLoc &dl, SelectionDAG &DAG) const { | |||
18745 | if (isNullConstant(Op1)) | |||
18746 | return EmitTest(Op0, X86CC, dl, DAG, Subtarget); | |||
18747 | ||||
18748 | if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 || | |||
18749 | Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) { | |||
18750 | // Only promote the compare up to I32 if it is a 16 bit operation | |||
18751 | // with an immediate. 16 bit immediates are to be avoided. | |||
18752 | if (Op0.getValueType() == MVT::i16 && | |||
18753 | ((isa<ConstantSDNode>(Op0) && | |||
18754 | !cast<ConstantSDNode>(Op0)->getAPIntValue().isSignedIntN(8)) || | |||
18755 | (isa<ConstantSDNode>(Op1) && | |||
18756 | !cast<ConstantSDNode>(Op1)->getAPIntValue().isSignedIntN(8))) && | |||
18757 | !DAG.getMachineFunction().getFunction().optForMinSize() && | |||
18758 | !Subtarget.isAtom()) { | |||
18759 | unsigned ExtendOp = | |||
18760 | isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND; | |||
18761 | Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0); | |||
18762 | Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1); | |||
18763 | } | |||
18764 | // Use SUB instead of CMP to enable CSE between SUB and CMP. | |||
18765 | SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32); | |||
18766 | SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1); | |||
18767 | return SDValue(Sub.getNode(), 1); | |||
18768 | } | |||
18769 | assert(Op0.getValueType().isFloatingPoint() && "Unexpected VT!")((Op0.getValueType().isFloatingPoint() && "Unexpected VT!" ) ? static_cast<void> (0) : __assert_fail ("Op0.getValueType().isFloatingPoint() && \"Unexpected VT!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18769, __PRETTY_FUNCTION__)); | |||
18770 | return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1); | |||
18771 | } | |||
18772 | ||||
18773 | /// Convert a comparison if required by the subtarget. | |||
18774 | SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp, | |||
18775 | SelectionDAG &DAG) const { | |||
18776 | // If the subtarget does not support the FUCOMI instruction, floating-point | |||
18777 | // comparisons have to be converted. | |||
18778 | if (Subtarget.hasCMov() || | |||
18779 | Cmp.getOpcode() != X86ISD::CMP || | |||
18780 | !Cmp.getOperand(0).getValueType().isFloatingPoint() || | |||
18781 | !Cmp.getOperand(1).getValueType().isFloatingPoint()) | |||
18782 | return Cmp; | |||
18783 | ||||
18784 | // The instruction selector will select an FUCOM instruction instead of | |||
18785 | // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence | |||
18786 | // build an SDNode sequence that transfers the result from FPSW into EFLAGS: | |||
18787 | // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8)))) | |||
18788 | SDLoc dl(Cmp); | |||
18789 | SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp); | |||
18790 | SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW); | |||
18791 | SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW, | |||
18792 | DAG.getConstant(8, dl, MVT::i8)); | |||
18793 | SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl); | |||
18794 | ||||
18795 | // Some 64-bit targets lack SAHF support, but they do support FCOMI. | |||
18796 | assert(Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?")((Subtarget.hasLAHFSAHF() && "Target doesn't support SAHF or FCOMI?" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasLAHFSAHF() && \"Target doesn't support SAHF or FCOMI?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18796, __PRETTY_FUNCTION__)); | |||
18797 | return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl); | |||
18798 | } | |||
18799 | ||||
18800 | /// Check if replacement of SQRT with RSQRT should be disabled. | |||
18801 | bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const { | |||
18802 | EVT VT = Op.getValueType(); | |||
18803 | ||||
18804 | // We never want to use both SQRT and RSQRT instructions for the same input. | |||
18805 | if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op)) | |||
18806 | return false; | |||
18807 | ||||
18808 | if (VT.isVector()) | |||
18809 | return Subtarget.hasFastVectorFSQRT(); | |||
18810 | return Subtarget.hasFastScalarFSQRT(); | |||
18811 | } | |||
18812 | ||||
18813 | /// The minimum architected relative accuracy is 2^-12. We need one | |||
18814 | /// Newton-Raphson step to have a good float result (24 bits of precision). | |||
18815 | SDValue X86TargetLowering::getSqrtEstimate(SDValue Op, | |||
18816 | SelectionDAG &DAG, int Enabled, | |||
18817 | int &RefinementSteps, | |||
18818 | bool &UseOneConstNR, | |||
18819 | bool Reciprocal) const { | |||
18820 | EVT VT = Op.getValueType(); | |||
18821 | ||||
18822 | // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps. | |||
18823 | // It is likely not profitable to do this for f64 because a double-precision | |||
18824 | // rsqrt estimate with refinement on x86 prior to FMA requires at least 16 | |||
18825 | // instructions: convert to single, rsqrtss, convert back to double, refine | |||
18826 | // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA | |||
18827 | // along with FMA, this could be a throughput win. | |||
18828 | // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32 | |||
18829 | // after legalize types. | |||
18830 | if ((VT == MVT::f32 && Subtarget.hasSSE1()) || | |||
18831 | (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) || | |||
18832 | (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) || | |||
18833 | (VT == MVT::v8f32 && Subtarget.hasAVX()) || | |||
18834 | (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) { | |||
18835 | if (RefinementSteps == ReciprocalEstimate::Unspecified) | |||
18836 | RefinementSteps = 1; | |||
18837 | ||||
18838 | UseOneConstNR = false; | |||
18839 | // There is no FSQRT for 512-bits, but there is RSQRT14. | |||
18840 | unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT; | |||
18841 | return DAG.getNode(Opcode, SDLoc(Op), VT, Op); | |||
18842 | } | |||
18843 | return SDValue(); | |||
18844 | } | |||
18845 | ||||
18846 | /// The minimum architected relative accuracy is 2^-12. We need one | |||
18847 | /// Newton-Raphson step to have a good float result (24 bits of precision). | |||
18848 | SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG, | |||
18849 | int Enabled, | |||
18850 | int &RefinementSteps) const { | |||
18851 | EVT VT = Op.getValueType(); | |||
18852 | ||||
18853 | // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps. | |||
18854 | // It is likely not profitable to do this for f64 because a double-precision | |||
18855 | // reciprocal estimate with refinement on x86 prior to FMA requires | |||
18856 | // 15 instructions: convert to single, rcpss, convert back to double, refine | |||
18857 | // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA | |||
18858 | // along with FMA, this could be a throughput win. | |||
18859 | ||||
18860 | if ((VT == MVT::f32 && Subtarget.hasSSE1()) || | |||
18861 | (VT == MVT::v4f32 && Subtarget.hasSSE1()) || | |||
18862 | (VT == MVT::v8f32 && Subtarget.hasAVX()) || | |||
18863 | (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) { | |||
18864 | // Enable estimate codegen with 1 refinement step for vector division. | |||
18865 | // Scalar division estimates are disabled because they break too much | |||
18866 | // real-world code. These defaults are intended to match GCC behavior. | |||
18867 | if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified) | |||
18868 | return SDValue(); | |||
18869 | ||||
18870 | if (RefinementSteps == ReciprocalEstimate::Unspecified) | |||
18871 | RefinementSteps = 1; | |||
18872 | ||||
18873 | // There is no FSQRT for 512-bits, but there is RCP14. | |||
18874 | unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP; | |||
18875 | return DAG.getNode(Opcode, SDLoc(Op), VT, Op); | |||
18876 | } | |||
18877 | return SDValue(); | |||
18878 | } | |||
18879 | ||||
18880 | /// If we have at least two divisions that use the same divisor, convert to | |||
18881 | /// multiplication by a reciprocal. This may need to be adjusted for a given | |||
18882 | /// CPU if a division's cost is not at least twice the cost of a multiplication. | |||
18883 | /// This is because we still need one division to calculate the reciprocal and | |||
18884 | /// then we need two multiplies by that reciprocal as replacements for the | |||
18885 | /// original divisions. | |||
18886 | unsigned X86TargetLowering::combineRepeatedFPDivisors() const { | |||
18887 | return 2; | |||
18888 | } | |||
18889 | ||||
18890 | /// Create a BT (Bit Test) node - Test bit \p BitNo in \p Src and set condition | |||
18891 | /// according to equal/not-equal condition code \p CC. | |||
18892 | static SDValue getBitTestCondition(SDValue Src, SDValue BitNo, ISD::CondCode CC, | |||
18893 | const SDLoc &dl, SelectionDAG &DAG) { | |||
18894 | // If Src is i8, promote it to i32 with any_extend. There is no i8 BT | |||
18895 | // instruction. Since the shift amount is in-range-or-undefined, we know | |||
18896 | // that doing a bittest on the i32 value is ok. We extend to i32 because | |||
18897 | // the encoding for the i16 version is larger than the i32 version. | |||
18898 | // Also promote i16 to i32 for performance / code size reason. | |||
18899 | if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16) | |||
18900 | Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src); | |||
18901 | ||||
18902 | // See if we can use the 32-bit instruction instead of the 64-bit one for a | |||
18903 | // shorter encoding. Since the former takes the modulo 32 of BitNo and the | |||
18904 | // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is | |||
18905 | // known to be zero. | |||
18906 | if (Src.getValueType() == MVT::i64 && | |||
18907 | DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32))) | |||
18908 | Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src); | |||
18909 | ||||
18910 | // If the operand types disagree, extend the shift amount to match. Since | |||
18911 | // BT ignores high bits (like shifts) we can use anyextend. | |||
18912 | if (Src.getValueType() != BitNo.getValueType()) | |||
18913 | BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo); | |||
18914 | ||||
18915 | SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo); | |||
18916 | X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B; | |||
18917 | return getSETCC(Cond, BT, dl , DAG); | |||
18918 | } | |||
18919 | ||||
18920 | /// Result of 'and' is compared against zero. Change to a BT node if possible. | |||
18921 | static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, | |||
18922 | const SDLoc &dl, SelectionDAG &DAG) { | |||
18923 | assert(And.getOpcode() == ISD::AND && "Expected AND node!")((And.getOpcode() == ISD::AND && "Expected AND node!" ) ? static_cast<void> (0) : __assert_fail ("And.getOpcode() == ISD::AND && \"Expected AND node!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18923, __PRETTY_FUNCTION__)); | |||
18924 | SDValue Op0 = And.getOperand(0); | |||
18925 | SDValue Op1 = And.getOperand(1); | |||
18926 | if (Op0.getOpcode() == ISD::TRUNCATE) | |||
18927 | Op0 = Op0.getOperand(0); | |||
18928 | if (Op1.getOpcode() == ISD::TRUNCATE) | |||
18929 | Op1 = Op1.getOperand(0); | |||
18930 | ||||
18931 | SDValue LHS, RHS; | |||
18932 | if (Op1.getOpcode() == ISD::SHL) | |||
18933 | std::swap(Op0, Op1); | |||
18934 | if (Op0.getOpcode() == ISD::SHL) { | |||
18935 | if (isOneConstant(Op0.getOperand(0))) { | |||
18936 | // If we looked past a truncate, check that it's only truncating away | |||
18937 | // known zeros. | |||
18938 | unsigned BitWidth = Op0.getValueSizeInBits(); | |||
18939 | unsigned AndBitWidth = And.getValueSizeInBits(); | |||
18940 | if (BitWidth > AndBitWidth) { | |||
18941 | KnownBits Known = DAG.computeKnownBits(Op0); | |||
18942 | if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth) | |||
18943 | return SDValue(); | |||
18944 | } | |||
18945 | LHS = Op1; | |||
18946 | RHS = Op0.getOperand(1); | |||
18947 | } | |||
18948 | } else if (Op1.getOpcode() == ISD::Constant) { | |||
18949 | ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1); | |||
18950 | uint64_t AndRHSVal = AndRHS->getZExtValue(); | |||
18951 | SDValue AndLHS = Op0; | |||
18952 | ||||
18953 | if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) { | |||
18954 | LHS = AndLHS.getOperand(0); | |||
18955 | RHS = AndLHS.getOperand(1); | |||
18956 | } else { | |||
18957 | // Use BT if the immediate can't be encoded in a TEST instruction or we | |||
18958 | // are optimizing for size and the immedaite won't fit in a byte. | |||
18959 | bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); | |||
18960 | if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) && | |||
18961 | isPowerOf2_64(AndRHSVal)) { | |||
18962 | LHS = AndLHS; | |||
18963 | RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl, LHS.getValueType()); | |||
18964 | } | |||
18965 | } | |||
18966 | } | |||
18967 | ||||
18968 | if (LHS.getNode()) | |||
18969 | return getBitTestCondition(LHS, RHS, CC, dl, DAG); | |||
18970 | ||||
18971 | return SDValue(); | |||
18972 | } | |||
18973 | ||||
18974 | /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask | |||
18975 | /// CMPs. | |||
18976 | static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0, | |||
18977 | SDValue &Op1) { | |||
18978 | unsigned SSECC; | |||
18979 | bool Swap = false; | |||
18980 | ||||
18981 | // SSE Condition code mapping: | |||
18982 | // 0 - EQ | |||
18983 | // 1 - LT | |||
18984 | // 2 - LE | |||
18985 | // 3 - UNORD | |||
18986 | // 4 - NEQ | |||
18987 | // 5 - NLT | |||
18988 | // 6 - NLE | |||
18989 | // 7 - ORD | |||
18990 | switch (SetCCOpcode) { | |||
18991 | default: llvm_unreachable("Unexpected SETCC condition")::llvm::llvm_unreachable_internal("Unexpected SETCC condition" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 18991); | |||
18992 | case ISD::SETOEQ: | |||
18993 | case ISD::SETEQ: SSECC = 0; break; | |||
18994 | case ISD::SETOGT: | |||
18995 | case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
18996 | case ISD::SETLT: | |||
18997 | case ISD::SETOLT: SSECC = 1; break; | |||
18998 | case ISD::SETOGE: | |||
18999 | case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
19000 | case ISD::SETLE: | |||
19001 | case ISD::SETOLE: SSECC = 2; break; | |||
19002 | case ISD::SETUO: SSECC = 3; break; | |||
19003 | case ISD::SETUNE: | |||
19004 | case ISD::SETNE: SSECC = 4; break; | |||
19005 | case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
19006 | case ISD::SETUGE: SSECC = 5; break; | |||
19007 | case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
19008 | case ISD::SETUGT: SSECC = 6; break; | |||
19009 | case ISD::SETO: SSECC = 7; break; | |||
19010 | case ISD::SETUEQ: SSECC = 8; break; | |||
19011 | case ISD::SETONE: SSECC = 12; break; | |||
19012 | } | |||
19013 | if (Swap) | |||
19014 | std::swap(Op0, Op1); | |||
19015 | ||||
19016 | return SSECC; | |||
19017 | } | |||
19018 | ||||
19019 | /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then | |||
19020 | /// concatenate the result back. | |||
19021 | static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) { | |||
19022 | MVT VT = Op.getSimpleValueType(); | |||
19023 | ||||
19024 | assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&((VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && "Unsupported value type for operation") ? static_cast <void> (0) : __assert_fail ("VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && \"Unsupported value type for operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19025, __PRETTY_FUNCTION__)) | |||
19025 | "Unsupported value type for operation")((VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && "Unsupported value type for operation") ? static_cast <void> (0) : __assert_fail ("VT.is256BitVector() && Op.getOpcode() == ISD::SETCC && \"Unsupported value type for operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19025, __PRETTY_FUNCTION__)); | |||
19026 | ||||
19027 | unsigned NumElems = VT.getVectorNumElements(); | |||
19028 | SDLoc dl(Op); | |||
19029 | SDValue CC = Op.getOperand(2); | |||
19030 | ||||
19031 | // Extract the LHS vectors | |||
19032 | SDValue LHS = Op.getOperand(0); | |||
19033 | SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl); | |||
19034 | SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl); | |||
19035 | ||||
19036 | // Extract the RHS vectors | |||
19037 | SDValue RHS = Op.getOperand(1); | |||
19038 | SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl); | |||
19039 | SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl); | |||
19040 | ||||
19041 | // Issue the operation on the smaller types and concatenate the result back | |||
19042 | MVT EltVT = VT.getVectorElementType(); | |||
19043 | MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); | |||
19044 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, | |||
19045 | DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC), | |||
19046 | DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC)); | |||
19047 | } | |||
19048 | ||||
19049 | static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) { | |||
19050 | ||||
19051 | SDValue Op0 = Op.getOperand(0); | |||
19052 | SDValue Op1 = Op.getOperand(1); | |||
19053 | SDValue CC = Op.getOperand(2); | |||
19054 | MVT VT = Op.getSimpleValueType(); | |||
19055 | SDLoc dl(Op); | |||
19056 | ||||
19057 | assert(VT.getVectorElementType() == MVT::i1 &&((VT.getVectorElementType() == MVT::i1 && "Cannot set masked compare for this operation" ) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i1 && \"Cannot set masked compare for this operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19058, __PRETTY_FUNCTION__)) | |||
19058 | "Cannot set masked compare for this operation")((VT.getVectorElementType() == MVT::i1 && "Cannot set masked compare for this operation" ) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i1 && \"Cannot set masked compare for this operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19058, __PRETTY_FUNCTION__)); | |||
19059 | ||||
19060 | ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get(); | |||
19061 | ||||
19062 | // If this is a seteq make sure any build vectors of all zeros are on the RHS. | |||
19063 | // This helps with vptestm matching. | |||
19064 | // TODO: Should we just canonicalize the setcc during DAG combine? | |||
19065 | if ((SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE) && | |||
19066 | ISD::isBuildVectorAllZeros(Op0.getNode())) | |||
19067 | std::swap(Op0, Op1); | |||
19068 | ||||
19069 | // Prefer SETGT over SETLT. | |||
19070 | if (SetCCOpcode == ISD::SETLT) { | |||
19071 | SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode); | |||
19072 | std::swap(Op0, Op1); | |||
19073 | } | |||
19074 | ||||
19075 | return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode); | |||
19076 | } | |||
19077 | ||||
19078 | /// Given a simple buildvector constant, return a new vector constant with each | |||
19079 | /// element decremented. If decrementing would result in underflow or this | |||
19080 | /// is not a simple vector constant, return an empty value. | |||
19081 | static SDValue decrementVectorConstant(SDValue V, SelectionDAG &DAG) { | |||
19082 | auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode()); | |||
19083 | if (!BV) | |||
19084 | return SDValue(); | |||
19085 | ||||
19086 | MVT VT = V.getSimpleValueType(); | |||
19087 | MVT EltVT = VT.getVectorElementType(); | |||
19088 | unsigned NumElts = VT.getVectorNumElements(); | |||
19089 | SmallVector<SDValue, 8> NewVecC; | |||
19090 | SDLoc DL(V); | |||
19091 | for (unsigned i = 0; i < NumElts; ++i) { | |||
19092 | auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i)); | |||
19093 | if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT) | |||
19094 | return SDValue(); | |||
19095 | ||||
19096 | // Avoid underflow. | |||
19097 | if (Elt->getAPIntValue().isNullValue()) | |||
19098 | return SDValue(); | |||
19099 | ||||
19100 | NewVecC.push_back(DAG.getConstant(Elt->getAPIntValue() - 1, DL, EltVT)); | |||
19101 | } | |||
19102 | ||||
19103 | return DAG.getBuildVector(VT, DL, NewVecC); | |||
19104 | } | |||
19105 | ||||
19106 | /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for | |||
19107 | /// Op0 u<= Op1: | |||
19108 | /// t = psubus Op0, Op1 | |||
19109 | /// pcmpeq t, <0..0> | |||
19110 | static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT, | |||
19111 | ISD::CondCode Cond, const SDLoc &dl, | |||
19112 | const X86Subtarget &Subtarget, | |||
19113 | SelectionDAG &DAG) { | |||
19114 | if (!Subtarget.hasSSE2()) | |||
19115 | return SDValue(); | |||
19116 | ||||
19117 | MVT VET = VT.getVectorElementType(); | |||
19118 | if (VET != MVT::i8 && VET != MVT::i16) | |||
19119 | return SDValue(); | |||
19120 | ||||
19121 | switch (Cond) { | |||
19122 | default: | |||
19123 | return SDValue(); | |||
19124 | case ISD::SETULT: { | |||
19125 | // If the comparison is against a constant we can turn this into a | |||
19126 | // setule. With psubus, setule does not require a swap. This is | |||
19127 | // beneficial because the constant in the register is no longer | |||
19128 | // destructed as the destination so it can be hoisted out of a loop. | |||
19129 | // Only do this pre-AVX since vpcmp* is no longer destructive. | |||
19130 | if (Subtarget.hasAVX()) | |||
19131 | return SDValue(); | |||
19132 | SDValue ULEOp1 = decrementVectorConstant(Op1, DAG); | |||
19133 | if (!ULEOp1) | |||
19134 | return SDValue(); | |||
19135 | Op1 = ULEOp1; | |||
19136 | break; | |||
19137 | } | |||
19138 | // Psubus is better than flip-sign because it requires no inversion. | |||
19139 | case ISD::SETUGE: | |||
19140 | std::swap(Op0, Op1); | |||
19141 | break; | |||
19142 | case ISD::SETULE: | |||
19143 | break; | |||
19144 | } | |||
19145 | ||||
19146 | SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1); | |||
19147 | return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result, | |||
19148 | DAG.getConstant(0, dl, VT)); | |||
19149 | } | |||
19150 | ||||
19151 | static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget, | |||
19152 | SelectionDAG &DAG) { | |||
19153 | SDValue Op0 = Op.getOperand(0); | |||
19154 | SDValue Op1 = Op.getOperand(1); | |||
19155 | SDValue CC = Op.getOperand(2); | |||
19156 | MVT VT = Op.getSimpleValueType(); | |||
19157 | ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get(); | |||
19158 | bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint(); | |||
19159 | SDLoc dl(Op); | |||
19160 | ||||
19161 | if (isFP) { | |||
19162 | #ifndef NDEBUG | |||
19163 | MVT EltVT = Op0.getSimpleValueType().getVectorElementType(); | |||
19164 | assert(EltVT == MVT::f32 || EltVT == MVT::f64)((EltVT == MVT::f32 || EltVT == MVT::f64) ? static_cast<void > (0) : __assert_fail ("EltVT == MVT::f32 || EltVT == MVT::f64" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19164, __PRETTY_FUNCTION__)); | |||
19165 | #endif | |||
19166 | ||||
19167 | unsigned Opc; | |||
19168 | if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1) { | |||
19169 | assert(VT.getVectorNumElements() <= 16)((VT.getVectorNumElements() <= 16) ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() <= 16", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19169, __PRETTY_FUNCTION__)); | |||
19170 | Opc = X86ISD::CMPM; | |||
19171 | } else { | |||
19172 | Opc = X86ISD::CMPP; | |||
19173 | // The SSE/AVX packed FP comparison nodes are defined with a | |||
19174 | // floating-point vector result that matches the operand type. This allows | |||
19175 | // them to work with an SSE1 target (integer vector types are not legal). | |||
19176 | VT = Op0.getSimpleValueType(); | |||
19177 | } | |||
19178 | ||||
19179 | // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE), | |||
19180 | // emit two comparisons and a logic op to tie them together. | |||
19181 | SDValue Cmp; | |||
19182 | unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1); | |||
19183 | if (SSECC >= 8 && !Subtarget.hasAVX()) { | |||
19184 | // LLVM predicate is SETUEQ or SETONE. | |||
19185 | unsigned CC0, CC1; | |||
19186 | unsigned CombineOpc; | |||
19187 | if (Cond == ISD::SETUEQ) { | |||
19188 | CC0 = 3; // UNORD | |||
19189 | CC1 = 0; // EQ | |||
19190 | CombineOpc = X86ISD::FOR; | |||
19191 | } else { | |||
19192 | assert(Cond == ISD::SETONE)((Cond == ISD::SETONE) ? static_cast<void> (0) : __assert_fail ("Cond == ISD::SETONE", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19192, __PRETTY_FUNCTION__)); | |||
19193 | CC0 = 7; // ORD | |||
19194 | CC1 = 4; // NEQ | |||
19195 | CombineOpc = X86ISD::FAND; | |||
19196 | } | |||
19197 | ||||
19198 | SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1, | |||
19199 | DAG.getConstant(CC0, dl, MVT::i8)); | |||
19200 | SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1, | |||
19201 | DAG.getConstant(CC1, dl, MVT::i8)); | |||
19202 | Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1); | |||
19203 | } else { | |||
19204 | // Handle all other FP comparisons here. | |||
19205 | Cmp = DAG.getNode(Opc, dl, VT, Op0, Op1, | |||
19206 | DAG.getConstant(SSECC, dl, MVT::i8)); | |||
19207 | } | |||
19208 | ||||
19209 | // If this is SSE/AVX CMPP, bitcast the result back to integer to match the | |||
19210 | // result type of SETCC. The bitcast is expected to be optimized away | |||
19211 | // during combining/isel. | |||
19212 | if (Opc == X86ISD::CMPP) | |||
19213 | Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp); | |||
19214 | ||||
19215 | return Cmp; | |||
19216 | } | |||
19217 | ||||
19218 | MVT VTOp0 = Op0.getSimpleValueType(); | |||
19219 | assert(VTOp0 == Op1.getSimpleValueType() &&((VTOp0 == Op1.getSimpleValueType() && "Expected operands with same type!" ) ? static_cast<void> (0) : __assert_fail ("VTOp0 == Op1.getSimpleValueType() && \"Expected operands with same type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19220, __PRETTY_FUNCTION__)) | |||
19220 | "Expected operands with same type!")((VTOp0 == Op1.getSimpleValueType() && "Expected operands with same type!" ) ? static_cast<void> (0) : __assert_fail ("VTOp0 == Op1.getSimpleValueType() && \"Expected operands with same type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19220, __PRETTY_FUNCTION__)); | |||
19221 | assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&((VT.getVectorNumElements() == VTOp0.getVectorNumElements() && "Invalid number of packed elements for source and destination!" ) ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == VTOp0.getVectorNumElements() && \"Invalid number of packed elements for source and destination!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19222, __PRETTY_FUNCTION__)) | |||
19222 | "Invalid number of packed elements for source and destination!")((VT.getVectorNumElements() == VTOp0.getVectorNumElements() && "Invalid number of packed elements for source and destination!" ) ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == VTOp0.getVectorNumElements() && \"Invalid number of packed elements for source and destination!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19222, __PRETTY_FUNCTION__)); | |||
19223 | ||||
19224 | // This is being called by type legalization because v2i32 is marked custom | |||
19225 | // for result type legalization for v2f32. | |||
19226 | if (VTOp0 == MVT::v2i32) | |||
19227 | return SDValue(); | |||
19228 | ||||
19229 | // The non-AVX512 code below works under the assumption that source and | |||
19230 | // destination types are the same. | |||
19231 | assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&(((Subtarget.hasAVX512() || (VT == VTOp0)) && "Value types for source and destination must be the same!" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasAVX512() || (VT == VTOp0)) && \"Value types for source and destination must be the same!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19232, __PRETTY_FUNCTION__)) | |||
19232 | "Value types for source and destination must be the same!")(((Subtarget.hasAVX512() || (VT == VTOp0)) && "Value types for source and destination must be the same!" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.hasAVX512() || (VT == VTOp0)) && \"Value types for source and destination must be the same!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19232, __PRETTY_FUNCTION__)); | |||
19233 | ||||
19234 | // Break 256-bit integer vector compare into smaller ones. | |||
19235 | if (VT.is256BitVector() && !Subtarget.hasInt256()) | |||
19236 | return Lower256IntVSETCC(Op, DAG); | |||
19237 | ||||
19238 | // The result is boolean, but operands are int/float | |||
19239 | if (VT.getVectorElementType() == MVT::i1) { | |||
19240 | // In AVX-512 architecture setcc returns mask with i1 elements, | |||
19241 | // But there is no compare instruction for i8 and i16 elements in KNL. | |||
19242 | assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&(((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI() ) && "Unexpected operand type") ? static_cast<void > (0) : __assert_fail ("(VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) && \"Unexpected operand type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19243, __PRETTY_FUNCTION__)) | |||
19243 | "Unexpected operand type")(((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI() ) && "Unexpected operand type") ? static_cast<void > (0) : __assert_fail ("(VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) && \"Unexpected operand type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19243, __PRETTY_FUNCTION__)); | |||
19244 | return LowerIntVSETCC_AVX512(Op, DAG); | |||
19245 | } | |||
19246 | ||||
19247 | // Lower using XOP integer comparisons. | |||
19248 | if (VT.is128BitVector() && Subtarget.hasXOP()) { | |||
19249 | // Translate compare code to XOP PCOM compare mode. | |||
19250 | unsigned CmpMode = 0; | |||
19251 | switch (Cond) { | |||
19252 | default: llvm_unreachable("Unexpected SETCC condition")::llvm::llvm_unreachable_internal("Unexpected SETCC condition" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19252); | |||
19253 | case ISD::SETULT: | |||
19254 | case ISD::SETLT: CmpMode = 0x00; break; | |||
19255 | case ISD::SETULE: | |||
19256 | case ISD::SETLE: CmpMode = 0x01; break; | |||
19257 | case ISD::SETUGT: | |||
19258 | case ISD::SETGT: CmpMode = 0x02; break; | |||
19259 | case ISD::SETUGE: | |||
19260 | case ISD::SETGE: CmpMode = 0x03; break; | |||
19261 | case ISD::SETEQ: CmpMode = 0x04; break; | |||
19262 | case ISD::SETNE: CmpMode = 0x05; break; | |||
19263 | } | |||
19264 | ||||
19265 | // Are we comparing unsigned or signed integers? | |||
19266 | unsigned Opc = | |||
19267 | ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM; | |||
19268 | ||||
19269 | return DAG.getNode(Opc, dl, VT, Op0, Op1, | |||
19270 | DAG.getConstant(CmpMode, dl, MVT::i8)); | |||
19271 | } | |||
19272 | ||||
19273 | // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2. | |||
19274 | // Revert part of the simplifySetCCWithAnd combine, to avoid an invert. | |||
19275 | if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) { | |||
19276 | SDValue BC0 = peekThroughBitcasts(Op0); | |||
19277 | if (BC0.getOpcode() == ISD::AND) { | |||
19278 | APInt UndefElts; | |||
19279 | SmallVector<APInt, 64> EltBits; | |||
19280 | if (getTargetConstantBitsFromNode(BC0.getOperand(1), | |||
19281 | VT.getScalarSizeInBits(), UndefElts, | |||
19282 | EltBits, false, false)) { | |||
19283 | if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) { | |||
19284 | Cond = ISD::SETEQ; | |||
19285 | Op1 = DAG.getBitcast(VT, BC0.getOperand(1)); | |||
19286 | } | |||
19287 | } | |||
19288 | } | |||
19289 | } | |||
19290 | ||||
19291 | // If this is a SETNE against the signed minimum value, change it to SETGT. | |||
19292 | // If this is a SETNE against the signed maximum value, change it to SETLT. | |||
19293 | // which will be swapped to SETGT. | |||
19294 | // Otherwise we use PCMPEQ+invert. | |||
19295 | APInt ConstValue; | |||
19296 | if (Cond == ISD::SETNE && | |||
19297 | ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) { | |||
19298 | if (ConstValue.isMinSignedValue()) | |||
19299 | Cond = ISD::SETGT; | |||
19300 | else if (ConstValue.isMaxSignedValue()) | |||
19301 | Cond = ISD::SETLT; | |||
19302 | } | |||
19303 | ||||
19304 | // If both operands are known non-negative, then an unsigned compare is the | |||
19305 | // same as a signed compare and there's no need to flip signbits. | |||
19306 | // TODO: We could check for more general simplifications here since we're | |||
19307 | // computing known bits. | |||
19308 | bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) && | |||
19309 | !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1)); | |||
19310 | ||||
19311 | // Special case: Use min/max operations for unsigned compares. | |||
19312 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
19313 | if (ISD::isUnsignedIntSetCC(Cond) && | |||
19314 | (FlipSigns || ISD::isTrueWhenEqual(Cond)) && | |||
19315 | TLI.isOperationLegal(ISD::UMIN, VT)) { | |||
19316 | // If we have a constant operand, increment/decrement it and change the | |||
19317 | // condition to avoid an invert. | |||
19318 | // TODO: This could be extended to handle a non-splat constant by checking | |||
19319 | // that each element of the constant is not the max/null value. | |||
19320 | APInt C; | |||
19321 | if (Cond == ISD::SETUGT && isConstantSplat(Op1, C) && !C.isMaxValue()) { | |||
19322 | // X > C --> X >= (C+1) --> X == umax(X, C+1) | |||
19323 | Op1 = DAG.getConstant(C + 1, dl, VT); | |||
19324 | Cond = ISD::SETUGE; | |||
19325 | } | |||
19326 | if (Cond == ISD::SETULT && isConstantSplat(Op1, C) && !C.isNullValue()) { | |||
19327 | // X < C --> X <= (C-1) --> X == umin(X, C-1) | |||
19328 | Op1 = DAG.getConstant(C - 1, dl, VT); | |||
19329 | Cond = ISD::SETULE; | |||
19330 | } | |||
19331 | bool Invert = false; | |||
19332 | unsigned Opc; | |||
19333 | switch (Cond) { | |||
19334 | default: llvm_unreachable("Unexpected condition code")::llvm::llvm_unreachable_internal("Unexpected condition code" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19334); | |||
19335 | case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
19336 | case ISD::SETULE: Opc = ISD::UMIN; break; | |||
19337 | case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
19338 | case ISD::SETUGE: Opc = ISD::UMAX; break; | |||
19339 | } | |||
19340 | ||||
19341 | SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); | |||
19342 | Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result); | |||
19343 | ||||
19344 | // If the logical-not of the result is required, perform that now. | |||
19345 | if (Invert) | |||
19346 | Result = DAG.getNOT(dl, Result, VT); | |||
19347 | ||||
19348 | return Result; | |||
19349 | } | |||
19350 | ||||
19351 | // Try to use SUBUS and PCMPEQ. | |||
19352 | if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG)) | |||
19353 | return V; | |||
19354 | ||||
19355 | // We are handling one of the integer comparisons here. Since SSE only has | |||
19356 | // GT and EQ comparisons for integer, swapping operands and multiple | |||
19357 | // operations may be required for some comparisons. | |||
19358 | unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ | |||
19359 | : X86ISD::PCMPGT; | |||
19360 | bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT || | |||
19361 | Cond == ISD::SETGE || Cond == ISD::SETUGE; | |||
19362 | bool Invert = Cond == ISD::SETNE || | |||
19363 | (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond)); | |||
19364 | ||||
19365 | if (Swap) | |||
19366 | std::swap(Op0, Op1); | |||
19367 | ||||
19368 | // Check that the operation in question is available (most are plain SSE2, | |||
19369 | // but PCMPGTQ and PCMPEQQ have different requirements). | |||
19370 | if (VT == MVT::v2i64) { | |||
19371 | if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) { | |||
19372 | assert(Subtarget.hasSSE2() && "Don't know how to lower!")((Subtarget.hasSSE2() && "Don't know how to lower!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Don't know how to lower!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19372, __PRETTY_FUNCTION__)); | |||
19373 | ||||
19374 | // Since SSE has no unsigned integer comparisons, we need to flip the sign | |||
19375 | // bits of the inputs before performing those operations. The lower | |||
19376 | // compare is always unsigned. | |||
19377 | SDValue SB; | |||
19378 | if (FlipSigns) { | |||
19379 | SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64); | |||
19380 | } else { | |||
19381 | SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64); | |||
19382 | } | |||
19383 | Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB); | |||
19384 | Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB); | |||
19385 | ||||
19386 | // Cast everything to the right type. | |||
19387 | Op0 = DAG.getBitcast(MVT::v4i32, Op0); | |||
19388 | Op1 = DAG.getBitcast(MVT::v4i32, Op1); | |||
19389 | ||||
19390 | // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2)) | |||
19391 | SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1); | |||
19392 | SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1); | |||
19393 | ||||
19394 | // Create masks for only the low parts/high parts of the 64 bit integers. | |||
19395 | static const int MaskHi[] = { 1, 1, 3, 3 }; | |||
19396 | static const int MaskLo[] = { 0, 0, 2, 2 }; | |||
19397 | SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi); | |||
19398 | SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo); | |||
19399 | SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi); | |||
19400 | ||||
19401 | SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo); | |||
19402 | Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi); | |||
19403 | ||||
19404 | if (Invert) | |||
19405 | Result = DAG.getNOT(dl, Result, MVT::v4i32); | |||
19406 | ||||
19407 | return DAG.getBitcast(VT, Result); | |||
19408 | } | |||
19409 | ||||
19410 | if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) { | |||
19411 | // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with | |||
19412 | // pcmpeqd + pshufd + pand. | |||
19413 | assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!")((Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && !FlipSigns && \"Don't know how to lower!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19413, __PRETTY_FUNCTION__)); | |||
19414 | ||||
19415 | // First cast everything to the right type. | |||
19416 | Op0 = DAG.getBitcast(MVT::v4i32, Op0); | |||
19417 | Op1 = DAG.getBitcast(MVT::v4i32, Op1); | |||
19418 | ||||
19419 | // Do the compare. | |||
19420 | SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1); | |||
19421 | ||||
19422 | // Make sure the lower and upper halves are both all-ones. | |||
19423 | static const int Mask[] = { 1, 0, 3, 2 }; | |||
19424 | SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask); | |||
19425 | Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf); | |||
19426 | ||||
19427 | if (Invert) | |||
19428 | Result = DAG.getNOT(dl, Result, MVT::v4i32); | |||
19429 | ||||
19430 | return DAG.getBitcast(VT, Result); | |||
19431 | } | |||
19432 | } | |||
19433 | ||||
19434 | // Since SSE has no unsigned integer comparisons, we need to flip the sign | |||
19435 | // bits of the inputs before performing those operations. | |||
19436 | if (FlipSigns) { | |||
19437 | MVT EltVT = VT.getVectorElementType(); | |||
19438 | SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl, | |||
19439 | VT); | |||
19440 | Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM); | |||
19441 | Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM); | |||
19442 | } | |||
19443 | ||||
19444 | SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1); | |||
19445 | ||||
19446 | // If the logical-not of the result is required, perform that now. | |||
19447 | if (Invert) | |||
19448 | Result = DAG.getNOT(dl, Result, VT); | |||
19449 | ||||
19450 | return Result; | |||
19451 | } | |||
19452 | ||||
19453 | // Try to select this as a KORTEST+SETCC if possible. | |||
19454 | static SDValue EmitKORTEST(SDValue Op0, SDValue Op1, ISD::CondCode CC, | |||
19455 | const SDLoc &dl, SelectionDAG &DAG, | |||
19456 | const X86Subtarget &Subtarget) { | |||
19457 | // Only support equality comparisons. | |||
19458 | if (CC != ISD::SETEQ && CC != ISD::SETNE) | |||
19459 | return SDValue(); | |||
19460 | ||||
19461 | // Must be a bitcast from vXi1. | |||
19462 | if (Op0.getOpcode() != ISD::BITCAST) | |||
19463 | return SDValue(); | |||
19464 | ||||
19465 | Op0 = Op0.getOperand(0); | |||
19466 | MVT VT = Op0.getSimpleValueType(); | |||
19467 | if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) && | |||
19468 | !(Subtarget.hasDQI() && VT == MVT::v8i1) && | |||
19469 | !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))) | |||
19470 | return SDValue(); | |||
19471 | ||||
19472 | X86::CondCode X86CC; | |||
19473 | if (isNullConstant(Op1)) { | |||
19474 | X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE; | |||
19475 | } else if (isAllOnesConstant(Op1)) { | |||
19476 | // C flag is set for all ones. | |||
19477 | X86CC = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE; | |||
19478 | } else | |||
19479 | return SDValue(); | |||
19480 | ||||
19481 | // If the input is an OR, we can combine it's operands into the KORTEST. | |||
19482 | SDValue LHS = Op0; | |||
19483 | SDValue RHS = Op0; | |||
19484 | if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) { | |||
19485 | LHS = Op0.getOperand(0); | |||
19486 | RHS = Op0.getOperand(1); | |||
19487 | } | |||
19488 | ||||
19489 | SDValue KORTEST = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS); | |||
19490 | return getSETCC(X86CC, KORTEST, dl, DAG); | |||
19491 | } | |||
19492 | ||||
19493 | SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { | |||
19494 | ||||
19495 | MVT VT = Op.getSimpleValueType(); | |||
19496 | ||||
19497 | if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG); | |||
19498 | ||||
19499 | assert(VT == MVT::i8 && "SetCC type must be 8-bit integer")((VT == MVT::i8 && "SetCC type must be 8-bit integer" ) ? static_cast<void> (0) : __assert_fail ("VT == MVT::i8 && \"SetCC type must be 8-bit integer\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19499, __PRETTY_FUNCTION__)); | |||
19500 | SDValue Op0 = Op.getOperand(0); | |||
19501 | SDValue Op1 = Op.getOperand(1); | |||
19502 | SDLoc dl(Op); | |||
19503 | ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); | |||
19504 | ||||
19505 | // Optimize to BT if possible. | |||
19506 | // Lower (X & (1 << N)) == 0 to BT(X, N). | |||
19507 | // Lower ((X >>u N) & 1) != 0 to BT(X, N). | |||
19508 | // Lower ((X >>s N) & 1) != 0 to BT(X, N). | |||
19509 | if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) && | |||
19510 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { | |||
19511 | if (SDValue NewSetCC = LowerAndToBT(Op0, CC, dl, DAG)) | |||
19512 | return NewSetCC; | |||
19513 | } | |||
19514 | ||||
19515 | // Try to use PTEST for a tree ORs equality compared with 0. | |||
19516 | // TODO: We could do AND tree with all 1s as well by using the C flag. | |||
19517 | if (Op0.getOpcode() == ISD::OR && isNullConstant(Op1) && | |||
19518 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { | |||
19519 | if (SDValue NewSetCC = LowerVectorAllZeroTest(Op0, CC, Subtarget, DAG)) | |||
19520 | return NewSetCC; | |||
19521 | } | |||
19522 | ||||
19523 | // Try to lower using KORTEST. | |||
19524 | if (SDValue NewSetCC = EmitKORTEST(Op0, Op1, CC, dl, DAG, Subtarget)) | |||
19525 | return NewSetCC; | |||
19526 | ||||
19527 | // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of | |||
19528 | // these. | |||
19529 | if ((isOneConstant(Op1) || isNullConstant(Op1)) && | |||
19530 | (CC == ISD::SETEQ || CC == ISD::SETNE)) { | |||
19531 | ||||
19532 | // If the input is a setcc, then reuse the input setcc or use a new one with | |||
19533 | // the inverted condition. | |||
19534 | if (Op0.getOpcode() == X86ISD::SETCC) { | |||
19535 | X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0); | |||
19536 | bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1); | |||
19537 | if (!Invert) | |||
19538 | return Op0; | |||
19539 | ||||
19540 | CCode = X86::GetOppositeBranchCondition(CCode); | |||
19541 | return getSETCC(CCode, Op0.getOperand(1), dl, DAG); | |||
19542 | } | |||
19543 | } | |||
19544 | ||||
19545 | bool IsFP = Op1.getSimpleValueType().isFloatingPoint(); | |||
19546 | X86::CondCode X86CC = TranslateX86CC(CC, dl, IsFP, Op0, Op1, DAG); | |||
19547 | if (X86CC == X86::COND_INVALID) | |||
19548 | return SDValue(); | |||
19549 | ||||
19550 | SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG); | |||
19551 | EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG); | |||
19552 | return getSETCC(X86CC, EFLAGS, dl, DAG); | |||
19553 | } | |||
19554 | ||||
19555 | SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const { | |||
19556 | SDValue LHS = Op.getOperand(0); | |||
19557 | SDValue RHS = Op.getOperand(1); | |||
19558 | SDValue Carry = Op.getOperand(2); | |||
19559 | SDValue Cond = Op.getOperand(3); | |||
19560 | SDLoc DL(Op); | |||
19561 | ||||
19562 | assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.")((LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only." ) ? static_cast<void> (0) : __assert_fail ("LHS.getSimpleValueType().isInteger() && \"SETCCCARRY is integer only.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19562, __PRETTY_FUNCTION__)); | |||
19563 | X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get()); | |||
19564 | ||||
19565 | // Recreate the carry if needed. | |||
19566 | EVT CarryVT = Carry.getValueType(); | |||
19567 | APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits()); | |||
19568 | Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), | |||
19569 | Carry, DAG.getConstant(NegOne, DL, CarryVT)); | |||
19570 | ||||
19571 | SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); | |||
19572 | SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1)); | |||
19573 | return getSETCC(CC, Cmp.getValue(1), DL, DAG); | |||
19574 | } | |||
19575 | ||||
19576 | /// Return true if opcode is a X86 logical comparison. | |||
19577 | static bool isX86LogicalCmp(SDValue Op) { | |||
19578 | unsigned Opc = Op.getOpcode(); | |||
19579 | if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI || | |||
19580 | Opc == X86ISD::SAHF) | |||
19581 | return true; | |||
19582 | if (Op.getResNo() == 1 && | |||
19583 | (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC || | |||
19584 | Opc == X86ISD::SBB || Opc == X86ISD::SMUL || | |||
19585 | Opc == X86ISD::INC || Opc == X86ISD::DEC || Opc == X86ISD::OR || | |||
19586 | Opc == X86ISD::XOR || Opc == X86ISD::AND)) | |||
19587 | return true; | |||
19588 | ||||
19589 | if (Op.getResNo() == 2 && Opc == X86ISD::UMUL) | |||
19590 | return true; | |||
19591 | ||||
19592 | return false; | |||
19593 | } | |||
19594 | ||||
19595 | static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) { | |||
19596 | if (V.getOpcode() != ISD::TRUNCATE) | |||
19597 | return false; | |||
19598 | ||||
19599 | SDValue VOp0 = V.getOperand(0); | |||
19600 | unsigned InBits = VOp0.getValueSizeInBits(); | |||
19601 | unsigned Bits = V.getValueSizeInBits(); | |||
19602 | return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits)); | |||
19603 | } | |||
19604 | ||||
19605 | SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const { | |||
19606 | bool AddTest = true; | |||
19607 | SDValue Cond = Op.getOperand(0); | |||
19608 | SDValue Op1 = Op.getOperand(1); | |||
19609 | SDValue Op2 = Op.getOperand(2); | |||
19610 | SDLoc DL(Op); | |||
19611 | MVT VT = Op1.getSimpleValueType(); | |||
19612 | SDValue CC; | |||
19613 | ||||
19614 | // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops | |||
19615 | // are available or VBLENDV if AVX is available. | |||
19616 | // Otherwise FP cmovs get lowered into a less efficient branch sequence later. | |||
19617 | if (Cond.getOpcode() == ISD::SETCC && | |||
19618 | ((Subtarget.hasSSE2() && VT == MVT::f64) || | |||
19619 | (Subtarget.hasSSE1() && VT == MVT::f32)) && | |||
19620 | VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) { | |||
19621 | SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1); | |||
19622 | unsigned SSECC = translateX86FSETCC( | |||
19623 | cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1); | |||
19624 | ||||
19625 | if (Subtarget.hasAVX512()) { | |||
19626 | SDValue Cmp = DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, | |||
19627 | CondOp1, DAG.getConstant(SSECC, DL, MVT::i8)); | |||
19628 | assert(!VT.isVector() && "Not a scalar type?")((!VT.isVector() && "Not a scalar type?") ? static_cast <void> (0) : __assert_fail ("!VT.isVector() && \"Not a scalar type?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19628, __PRETTY_FUNCTION__)); | |||
19629 | return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2); | |||
19630 | } | |||
19631 | ||||
19632 | if (SSECC < 8 || Subtarget.hasAVX()) { | |||
19633 | SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1, | |||
19634 | DAG.getConstant(SSECC, DL, MVT::i8)); | |||
19635 | ||||
19636 | // If we have AVX, we can use a variable vector select (VBLENDV) instead | |||
19637 | // of 3 logic instructions for size savings and potentially speed. | |||
19638 | // Unfortunately, there is no scalar form of VBLENDV. | |||
19639 | ||||
19640 | // If either operand is a +0.0 constant, don't try this. We can expect to | |||
19641 | // optimize away at least one of the logic instructions later in that | |||
19642 | // case, so that sequence would be faster than a variable blend. | |||
19643 | ||||
19644 | // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly | |||
19645 | // uses XMM0 as the selection register. That may need just as many | |||
19646 | // instructions as the AND/ANDN/OR sequence due to register moves, so | |||
19647 | // don't bother. | |||
19648 | if (Subtarget.hasAVX() && !isNullFPConstant(Op1) && | |||
19649 | !isNullFPConstant(Op2)) { | |||
19650 | // Convert to vectors, do a VSELECT, and convert back to scalar. | |||
19651 | // All of the conversions should be optimized away. | |||
19652 | MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64; | |||
19653 | SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1); | |||
19654 | SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2); | |||
19655 | SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp); | |||
19656 | ||||
19657 | MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64; | |||
19658 | VCmp = DAG.getBitcast(VCmpVT, VCmp); | |||
19659 | ||||
19660 | SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2); | |||
19661 | ||||
19662 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, | |||
19663 | VSel, DAG.getIntPtrConstant(0, DL)); | |||
19664 | } | |||
19665 | SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2); | |||
19666 | SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1); | |||
19667 | return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And); | |||
19668 | } | |||
19669 | } | |||
19670 | ||||
19671 | // AVX512 fallback is to lower selects of scalar floats to masked moves. | |||
19672 | if ((VT == MVT::f64 || VT == MVT::f32) && Subtarget.hasAVX512()) { | |||
19673 | SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond); | |||
19674 | return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2); | |||
19675 | } | |||
19676 | ||||
19677 | // For v64i1 without 64-bit support we need to split and rejoin. | |||
19678 | if (VT == MVT::v64i1 && !Subtarget.is64Bit()) { | |||
19679 | assert(Subtarget.hasBWI() && "Expected BWI to be legal")((Subtarget.hasBWI() && "Expected BWI to be legal") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected BWI to be legal\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19679, __PRETTY_FUNCTION__)); | |||
19680 | SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32); | |||
19681 | SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32); | |||
19682 | SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32); | |||
19683 | SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32); | |||
19684 | SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo); | |||
19685 | SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi); | |||
19686 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); | |||
19687 | } | |||
19688 | ||||
19689 | if (VT.isVector() && VT.getVectorElementType() == MVT::i1) { | |||
19690 | SDValue Op1Scalar; | |||
19691 | if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode())) | |||
19692 | Op1Scalar = ConvertI1VectorToInteger(Op1, DAG); | |||
19693 | else if (Op1.getOpcode() == ISD::BITCAST && Op1.getOperand(0)) | |||
19694 | Op1Scalar = Op1.getOperand(0); | |||
19695 | SDValue Op2Scalar; | |||
19696 | if (ISD::isBuildVectorOfConstantSDNodes(Op2.getNode())) | |||
19697 | Op2Scalar = ConvertI1VectorToInteger(Op2, DAG); | |||
19698 | else if (Op2.getOpcode() == ISD::BITCAST && Op2.getOperand(0)) | |||
19699 | Op2Scalar = Op2.getOperand(0); | |||
19700 | if (Op1Scalar.getNode() && Op2Scalar.getNode()) { | |||
19701 | SDValue newSelect = DAG.getSelect(DL, Op1Scalar.getValueType(), Cond, | |||
19702 | Op1Scalar, Op2Scalar); | |||
19703 | if (newSelect.getValueSizeInBits() == VT.getSizeInBits()) | |||
19704 | return DAG.getBitcast(VT, newSelect); | |||
19705 | SDValue ExtVec = DAG.getBitcast(MVT::v8i1, newSelect); | |||
19706 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, ExtVec, | |||
19707 | DAG.getIntPtrConstant(0, DL)); | |||
19708 | } | |||
19709 | } | |||
19710 | ||||
19711 | if (Cond.getOpcode() == ISD::SETCC) { | |||
19712 | if (SDValue NewCond = LowerSETCC(Cond, DAG)) { | |||
19713 | Cond = NewCond; | |||
19714 | // If the condition was updated, it's possible that the operands of the | |||
19715 | // select were also updated (for example, EmitTest has a RAUW). Refresh | |||
19716 | // the local references to the select operands in case they got stale. | |||
19717 | Op1 = Op.getOperand(1); | |||
19718 | Op2 = Op.getOperand(2); | |||
19719 | } | |||
19720 | } | |||
19721 | ||||
19722 | // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y | |||
19723 | // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y | |||
19724 | // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y | |||
19725 | // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y | |||
19726 | // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y | |||
19727 | // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y | |||
19728 | if (Cond.getOpcode() == X86ISD::SETCC && | |||
19729 | Cond.getOperand(1).getOpcode() == X86ISD::CMP && | |||
19730 | isNullConstant(Cond.getOperand(1).getOperand(1))) { | |||
19731 | SDValue Cmp = Cond.getOperand(1); | |||
19732 | unsigned CondCode = | |||
19733 | cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue(); | |||
19734 | ||||
19735 | if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) && | |||
19736 | (CondCode == X86::COND_E || CondCode == X86::COND_NE)) { | |||
19737 | SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2; | |||
19738 | SDValue CmpOp0 = Cmp.getOperand(0); | |||
19739 | ||||
19740 | // Apply further optimizations for special cases | |||
19741 | // (select (x != 0), -1, 0) -> neg & sbb | |||
19742 | // (select (x == 0), 0, -1) -> neg & sbb | |||
19743 | if (isNullConstant(Y) && | |||
19744 | (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) { | |||
19745 | SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType()); | |||
19746 | SDValue Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Zero, CmpOp0); | |||
19747 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); | |||
19748 | Zero = DAG.getConstant(0, DL, Op.getValueType()); | |||
19749 | return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp); | |||
19750 | } | |||
19751 | ||||
19752 | Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, | |||
19753 | CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType())); | |||
19754 | Cmp = ConvertCmpIfNecessary(Cmp, DAG); | |||
19755 | ||||
19756 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); | |||
19757 | SDValue Zero = DAG.getConstant(0, DL, Op.getValueType()); | |||
19758 | SDValue Res = // Res = 0 or -1. | |||
19759 | DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp); | |||
19760 | ||||
19761 | if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E)) | |||
19762 | Res = DAG.getNOT(DL, Res, Res.getValueType()); | |||
19763 | ||||
19764 | if (!isNullConstant(Op2)) | |||
19765 | Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y); | |||
19766 | return Res; | |||
19767 | } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E && | |||
19768 | Cmp.getOperand(0).getOpcode() == ISD::AND && | |||
19769 | isOneConstant(Cmp.getOperand(0).getOperand(1))) { | |||
19770 | SDValue CmpOp0 = Cmp.getOperand(0); | |||
19771 | SDValue Src1, Src2; | |||
19772 | // true if Op2 is XOR or OR operator and one of its operands | |||
19773 | // is equal to Op1 | |||
19774 | // ( a , a op b) || ( b , a op b) | |||
19775 | auto isOrXorPattern = [&]() { | |||
19776 | if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) && | |||
19777 | (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) { | |||
19778 | Src1 = | |||
19779 | Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0); | |||
19780 | Src2 = Op1; | |||
19781 | return true; | |||
19782 | } | |||
19783 | return false; | |||
19784 | }; | |||
19785 | ||||
19786 | if (isOrXorPattern()) { | |||
19787 | SDValue Neg; | |||
19788 | unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits(); | |||
19789 | // we need mask of all zeros or ones with same size of the other | |||
19790 | // operands. | |||
19791 | if (CmpSz > VT.getSizeInBits()) | |||
19792 | Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0); | |||
19793 | else if (CmpSz < VT.getSizeInBits()) | |||
19794 | Neg = DAG.getNode(ISD::AND, DL, VT, | |||
19795 | DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)), | |||
19796 | DAG.getConstant(1, DL, VT)); | |||
19797 | else | |||
19798 | Neg = CmpOp0; | |||
19799 | SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), | |||
19800 | Neg); // -(and (x, 0x1)) | |||
19801 | SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z | |||
19802 | return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y | |||
19803 | } | |||
19804 | } | |||
19805 | } | |||
19806 | ||||
19807 | // Look past (and (setcc_carry (cmp ...)), 1). | |||
19808 | if (Cond.getOpcode() == ISD::AND && | |||
19809 | Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY && | |||
19810 | isOneConstant(Cond.getOperand(1))) | |||
19811 | Cond = Cond.getOperand(0); | |||
19812 | ||||
19813 | // If condition flag is set by a X86ISD::CMP, then use it as the condition | |||
19814 | // setting operand in place of the X86ISD::SETCC. | |||
19815 | unsigned CondOpcode = Cond.getOpcode(); | |||
19816 | if (CondOpcode == X86ISD::SETCC || | |||
19817 | CondOpcode == X86ISD::SETCC_CARRY) { | |||
19818 | CC = Cond.getOperand(0); | |||
19819 | ||||
19820 | SDValue Cmp = Cond.getOperand(1); | |||
19821 | unsigned Opc = Cmp.getOpcode(); | |||
19822 | MVT VT = Op.getSimpleValueType(); | |||
19823 | ||||
19824 | bool IllegalFPCMov = false; | |||
19825 | if (VT.isFloatingPoint() && !VT.isVector() && | |||
19826 | !isScalarFPTypeInSSEReg(VT)) // FPStack? | |||
19827 | IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue()); | |||
19828 | ||||
19829 | if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) || | |||
19830 | Opc == X86ISD::BT) { // FIXME | |||
19831 | Cond = Cmp; | |||
19832 | AddTest = false; | |||
19833 | } | |||
19834 | } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || | |||
19835 | CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || | |||
19836 | ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && | |||
19837 | Cond.getOperand(0).getValueType() != MVT::i8)) { | |||
19838 | SDValue LHS = Cond.getOperand(0); | |||
19839 | SDValue RHS = Cond.getOperand(1); | |||
19840 | unsigned X86Opcode; | |||
19841 | unsigned X86Cond; | |||
19842 | SDVTList VTs; | |||
19843 | switch (CondOpcode) { | |||
19844 | case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; | |||
19845 | case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; | |||
19846 | case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; | |||
19847 | case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; | |||
19848 | case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; | |||
19849 | case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; | |||
19850 | default: llvm_unreachable("unexpected overflowing operator")::llvm::llvm_unreachable_internal("unexpected overflowing operator" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19850); | |||
19851 | } | |||
19852 | if (CondOpcode == ISD::UMULO) | |||
19853 | VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), | |||
19854 | MVT::i32); | |||
19855 | else | |||
19856 | VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); | |||
19857 | ||||
19858 | SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS); | |||
19859 | ||||
19860 | if (CondOpcode == ISD::UMULO) | |||
19861 | Cond = X86Op.getValue(2); | |||
19862 | else | |||
19863 | Cond = X86Op.getValue(1); | |||
19864 | ||||
19865 | CC = DAG.getConstant(X86Cond, DL, MVT::i8); | |||
19866 | AddTest = false; | |||
19867 | } | |||
19868 | ||||
19869 | if (AddTest) { | |||
19870 | // Look past the truncate if the high bits are known zero. | |||
19871 | if (isTruncWithZeroHighBitsInput(Cond, DAG)) | |||
19872 | Cond = Cond.getOperand(0); | |||
19873 | ||||
19874 | // We know the result of AND is compared against zero. Try to match | |||
19875 | // it to BT. | |||
19876 | if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { | |||
19877 | if (SDValue NewSetCC = LowerAndToBT(Cond, ISD::SETNE, DL, DAG)) { | |||
19878 | CC = NewSetCC.getOperand(0); | |||
19879 | Cond = NewSetCC.getOperand(1); | |||
19880 | AddTest = false; | |||
19881 | } | |||
19882 | } | |||
19883 | } | |||
19884 | ||||
19885 | if (AddTest) { | |||
19886 | CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8); | |||
19887 | Cond = EmitCmp(Cond, DAG.getConstant(0, DL, Cond.getValueType()), | |||
19888 | X86::COND_NE, DL, DAG); | |||
19889 | } | |||
19890 | ||||
19891 | // a < b ? -1 : 0 -> RES = ~setcc_carry | |||
19892 | // a < b ? 0 : -1 -> RES = setcc_carry | |||
19893 | // a >= b ? -1 : 0 -> RES = setcc_carry | |||
19894 | // a >= b ? 0 : -1 -> RES = ~setcc_carry | |||
19895 | if (Cond.getOpcode() == X86ISD::SUB) { | |||
19896 | Cond = ConvertCmpIfNecessary(Cond, DAG); | |||
19897 | unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue(); | |||
19898 | ||||
19899 | if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) && | |||
19900 | (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) && | |||
19901 | (isNullConstant(Op1) || isNullConstant(Op2))) { | |||
19902 | SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(), | |||
19903 | DAG.getConstant(X86::COND_B, DL, MVT::i8), | |||
19904 | Cond); | |||
19905 | if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B)) | |||
19906 | return DAG.getNOT(DL, Res, Res.getValueType()); | |||
19907 | return Res; | |||
19908 | } | |||
19909 | } | |||
19910 | ||||
19911 | // X86 doesn't have an i8 cmov. If both operands are the result of a truncate | |||
19912 | // widen the cmov and push the truncate through. This avoids introducing a new | |||
19913 | // branch during isel and doesn't add any extensions. | |||
19914 | if (Op.getValueType() == MVT::i8 && | |||
19915 | Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) { | |||
19916 | SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0); | |||
19917 | if (T1.getValueType() == T2.getValueType() && | |||
19918 | // Blacklist CopyFromReg to avoid partial register stalls. | |||
19919 | T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){ | |||
19920 | SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1, | |||
19921 | CC, Cond); | |||
19922 | return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov); | |||
19923 | } | |||
19924 | } | |||
19925 | ||||
19926 | // Promote i16 cmovs if it won't prevent folding a load. | |||
19927 | if (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) && !MayFoldLoad(Op2)) { | |||
19928 | Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1); | |||
19929 | Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2); | |||
19930 | SDValue Ops[] = { Op2, Op1, CC, Cond }; | |||
19931 | SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops); | |||
19932 | return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov); | |||
19933 | } | |||
19934 | ||||
19935 | // X86ISD::CMOV means set the result (which is operand 1) to the RHS if | |||
19936 | // condition is true. | |||
19937 | SDValue Ops[] = { Op2, Op1, CC, Cond }; | |||
19938 | return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops); | |||
19939 | } | |||
19940 | ||||
19941 | static SDValue LowerSIGN_EXTEND_Mask(SDValue Op, | |||
19942 | const X86Subtarget &Subtarget, | |||
19943 | SelectionDAG &DAG) { | |||
19944 | MVT VT = Op->getSimpleValueType(0); | |||
19945 | SDValue In = Op->getOperand(0); | |||
19946 | MVT InVT = In.getSimpleValueType(); | |||
19947 | assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!")((InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!" ) ? static_cast<void> (0) : __assert_fail ("InVT.getVectorElementType() == MVT::i1 && \"Unexpected input type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 19947, __PRETTY_FUNCTION__)); | |||
19948 | MVT VTElt = VT.getVectorElementType(); | |||
19949 | SDLoc dl(Op); | |||
19950 | ||||
19951 | unsigned NumElts = VT.getVectorNumElements(); | |||
19952 | ||||
19953 | // Extend VT if the scalar type is i8/i16 and BWI is not supported. | |||
19954 | MVT ExtVT = VT; | |||
19955 | if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) { | |||
19956 | // If v16i32 is to be avoided, we'll need to split and concatenate. | |||
19957 | if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) | |||
19958 | return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG); | |||
19959 | ||||
19960 | ExtVT = MVT::getVectorVT(MVT::i32, NumElts); | |||
19961 | } | |||
19962 | ||||
19963 | // Widen to 512-bits if VLX is not supported. | |||
19964 | MVT WideVT = ExtVT; | |||
19965 | if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) { | |||
19966 | NumElts *= 512 / ExtVT.getSizeInBits(); | |||
19967 | InVT = MVT::getVectorVT(MVT::i1, NumElts); | |||
19968 | In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT), | |||
19969 | In, DAG.getIntPtrConstant(0, dl)); | |||
19970 | WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts); | |||
19971 | } | |||
19972 | ||||
19973 | SDValue V; | |||
19974 | MVT WideEltVT = WideVT.getVectorElementType(); | |||
19975 | if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) || | |||
19976 | (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) { | |||
19977 | V = DAG.getNode(Op.getOpcode(), dl, WideVT, In); | |||
19978 | } else { | |||
19979 | SDValue NegOne = DAG.getConstant(-1, dl, WideVT); | |||
19980 | SDValue Zero = DAG.getConstant(0, dl, WideVT); | |||
19981 | V = DAG.getSelect(dl, WideVT, In, NegOne, Zero); | |||
19982 | } | |||
19983 | ||||
19984 | // Truncate if we had to extend i16/i8 above. | |||
19985 | if (VT != ExtVT) { | |||
19986 | WideVT = MVT::getVectorVT(VTElt, NumElts); | |||
19987 | V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V); | |||
19988 | } | |||
19989 | ||||
19990 | // Extract back to 128/256-bit if we widened. | |||
19991 | if (WideVT != VT) | |||
19992 | V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V, | |||
19993 | DAG.getIntPtrConstant(0, dl)); | |||
19994 | ||||
19995 | return V; | |||
19996 | } | |||
19997 | ||||
19998 | static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget, | |||
19999 | SelectionDAG &DAG) { | |||
20000 | SDValue In = Op->getOperand(0); | |||
20001 | MVT InVT = In.getSimpleValueType(); | |||
20002 | ||||
20003 | if (InVT.getVectorElementType() == MVT::i1) | |||
20004 | return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG); | |||
20005 | ||||
20006 | assert(Subtarget.hasAVX() && "Expected AVX support")((Subtarget.hasAVX() && "Expected AVX support") ? static_cast <void> (0) : __assert_fail ("Subtarget.hasAVX() && \"Expected AVX support\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20006, __PRETTY_FUNCTION__)); | |||
20007 | return LowerAVXExtend(Op, DAG, Subtarget); | |||
20008 | } | |||
20009 | ||||
20010 | // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG. | |||
20011 | // For sign extend this needs to handle all vector sizes and SSE4.1 and | |||
20012 | // non-SSE4.1 targets. For zero extend this should only handle inputs of | |||
20013 | // MVT::v64i8 when BWI is not supported, but AVX512 is. | |||
20014 | static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op, | |||
20015 | const X86Subtarget &Subtarget, | |||
20016 | SelectionDAG &DAG) { | |||
20017 | SDValue In = Op->getOperand(0); | |||
20018 | MVT VT = Op->getSimpleValueType(0); | |||
20019 | MVT InVT = In.getSimpleValueType(); | |||
20020 | ||||
20021 | MVT SVT = VT.getVectorElementType(); | |||
20022 | MVT InSVT = InVT.getVectorElementType(); | |||
20023 | assert(SVT.getSizeInBits() > InSVT.getSizeInBits())((SVT.getSizeInBits() > InSVT.getSizeInBits()) ? static_cast <void> (0) : __assert_fail ("SVT.getSizeInBits() > InSVT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20023, __PRETTY_FUNCTION__)); | |||
20024 | ||||
20025 | if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16) | |||
20026 | return SDValue(); | |||
20027 | if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8) | |||
20028 | return SDValue(); | |||
20029 | if (!(VT.is128BitVector() && Subtarget.hasSSE2()) && | |||
20030 | !(VT.is256BitVector() && Subtarget.hasAVX()) && | |||
20031 | !(VT.is512BitVector() && Subtarget.hasAVX512())) | |||
20032 | return SDValue(); | |||
20033 | ||||
20034 | SDLoc dl(Op); | |||
20035 | unsigned Opc = Op.getOpcode(); | |||
20036 | unsigned NumElts = VT.getVectorNumElements(); | |||
20037 | ||||
20038 | // For 256-bit vectors, we only need the lower (128-bit) half of the input. | |||
20039 | // For 512-bit vectors, we need 128-bits or 256-bits. | |||
20040 | if (InVT.getSizeInBits() > 128) { | |||
20041 | // Input needs to be at least the same number of elements as output, and | |||
20042 | // at least 128-bits. | |||
20043 | int InSize = InSVT.getSizeInBits() * NumElts; | |||
20044 | In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128)); | |||
20045 | InVT = In.getSimpleValueType(); | |||
20046 | } | |||
20047 | ||||
20048 | // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results, | |||
20049 | // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still | |||
20050 | // need to be handled here for 256/512-bit results. | |||
20051 | if (Subtarget.hasInt256()) { | |||
20052 | assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension")((VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension" ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() > 128 && \"Unexpected 128-bit vector extension\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20052, __PRETTY_FUNCTION__)); | |||
20053 | ||||
20054 | if (InVT.getVectorNumElements() != NumElts) | |||
20055 | return DAG.getNode(Op.getOpcode(), dl, VT, In); | |||
20056 | ||||
20057 | // FIXME: Apparently we create inreg operations that could be regular | |||
20058 | // extends. | |||
20059 | unsigned ExtOpc = | |||
20060 | Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND | |||
20061 | : ISD::ZERO_EXTEND; | |||
20062 | return DAG.getNode(ExtOpc, dl, VT, In); | |||
20063 | } | |||
20064 | ||||
20065 | // pre-AVX2 256-bit extensions need to be split into 128-bit instructions. | |||
20066 | if (Subtarget.hasAVX()) { | |||
20067 | assert(VT.is256BitVector() && "256-bit vector expected")((VT.is256BitVector() && "256-bit vector expected") ? static_cast<void> (0) : __assert_fail ("VT.is256BitVector() && \"256-bit vector expected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20067, __PRETTY_FUNCTION__)); | |||
20068 | int HalfNumElts = NumElts / 2; | |||
20069 | MVT HalfVT = MVT::getVectorVT(SVT, HalfNumElts); | |||
20070 | ||||
20071 | unsigned NumSrcElts = InVT.getVectorNumElements(); | |||
20072 | SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef); | |||
20073 | for (int i = 0; i != HalfNumElts; ++i) | |||
20074 | HiMask[i] = HalfNumElts + i; | |||
20075 | ||||
20076 | SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In); | |||
20077 | SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask); | |||
20078 | Hi = DAG.getNode(Opc, dl, HalfVT, Hi); | |||
20079 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi); | |||
20080 | } | |||
20081 | ||||
20082 | // We should only get here for sign extend. | |||
20083 | assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!")((Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!" ) ? static_cast<void> (0) : __assert_fail ("Opc == ISD::SIGN_EXTEND_VECTOR_INREG && \"Unexpected opcode!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20083, __PRETTY_FUNCTION__)); | |||
20084 | assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs")((VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs") ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && InVT.is128BitVector() && \"Unexpected VTs\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20084, __PRETTY_FUNCTION__)); | |||
20085 | ||||
20086 | // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI. | |||
20087 | SDValue Curr = In; | |||
20088 | SDValue SignExt = Curr; | |||
20089 | ||||
20090 | // As SRAI is only available on i16/i32 types, we expand only up to i32 | |||
20091 | // and handle i64 separately. | |||
20092 | if (InVT != MVT::v4i32) { | |||
20093 | MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT; | |||
20094 | ||||
20095 | unsigned DestWidth = DestVT.getScalarSizeInBits(); | |||
20096 | unsigned Scale = DestWidth / InSVT.getSizeInBits(); | |||
20097 | ||||
20098 | unsigned InNumElts = InVT.getVectorNumElements(); | |||
20099 | unsigned DestElts = DestVT.getVectorNumElements(); | |||
20100 | ||||
20101 | // Build a shuffle mask that takes each input element and places it in the | |||
20102 | // MSBs of the new element size. | |||
20103 | SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef); | |||
20104 | for (unsigned i = 0; i != DestElts; ++i) | |||
20105 | Mask[i * Scale + (Scale - 1)] = i; | |||
20106 | ||||
20107 | Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask); | |||
20108 | Curr = DAG.getBitcast(DestVT, Curr); | |||
20109 | ||||
20110 | unsigned SignExtShift = DestWidth - InSVT.getSizeInBits(); | |||
20111 | SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr, | |||
20112 | DAG.getConstant(SignExtShift, dl, MVT::i8)); | |||
20113 | } | |||
20114 | ||||
20115 | if (VT == MVT::v2i64) { | |||
20116 | assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT")((Curr.getValueType() == MVT::v4i32 && "Unexpected input VT" ) ? static_cast<void> (0) : __assert_fail ("Curr.getValueType() == MVT::v4i32 && \"Unexpected input VT\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20116, __PRETTY_FUNCTION__)); | |||
20117 | SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32); | |||
20118 | SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT); | |||
20119 | SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5}); | |||
20120 | SignExt = DAG.getBitcast(VT, SignExt); | |||
20121 | } | |||
20122 | ||||
20123 | return SignExt; | |||
20124 | } | |||
20125 | ||||
20126 | static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget, | |||
20127 | SelectionDAG &DAG) { | |||
20128 | MVT VT = Op->getSimpleValueType(0); | |||
20129 | SDValue In = Op->getOperand(0); | |||
20130 | MVT InVT = In.getSimpleValueType(); | |||
20131 | SDLoc dl(Op); | |||
20132 | ||||
20133 | if (InVT.getVectorElementType() == MVT::i1) | |||
20134 | return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG); | |||
20135 | ||||
20136 | assert(VT.isVector() && InVT.isVector() && "Expected vector type")((VT.isVector() && InVT.isVector() && "Expected vector type" ) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && InVT.isVector() && \"Expected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20136, __PRETTY_FUNCTION__)); | |||
20137 | assert(VT.getVectorNumElements() == VT.getVectorNumElements() &&((VT.getVectorNumElements() == VT.getVectorNumElements() && "Expected same number of elements") ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == VT.getVectorNumElements() && \"Expected same number of elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20138, __PRETTY_FUNCTION__)) | |||
20138 | "Expected same number of elements")((VT.getVectorNumElements() == VT.getVectorNumElements() && "Expected same number of elements") ? static_cast<void> (0) : __assert_fail ("VT.getVectorNumElements() == VT.getVectorNumElements() && \"Expected same number of elements\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20138, __PRETTY_FUNCTION__)); | |||
20139 | assert((VT.getVectorElementType() == MVT::i16 ||(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType () == MVT::i32 || VT.getVectorElementType() == MVT::i64) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20142, __PRETTY_FUNCTION__)) | |||
20140 | VT.getVectorElementType() == MVT::i32 ||(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType () == MVT::i32 || VT.getVectorElementType() == MVT::i64) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20142, __PRETTY_FUNCTION__)) | |||
20141 | VT.getVectorElementType() == MVT::i64) &&(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType () == MVT::i32 || VT.getVectorElementType() == MVT::i64) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20142, __PRETTY_FUNCTION__)) | |||
20142 | "Unexpected element type")(((VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType () == MVT::i32 || VT.getVectorElementType() == MVT::i64) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i16 || VT.getVectorElementType() == MVT::i32 || VT.getVectorElementType() == MVT::i64) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20142, __PRETTY_FUNCTION__)); | |||
20143 | assert((InVT.getVectorElementType() == MVT::i8 ||(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType () == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20146, __PRETTY_FUNCTION__)) | |||
20144 | InVT.getVectorElementType() == MVT::i16 ||(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType () == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20146, __PRETTY_FUNCTION__)) | |||
20145 | InVT.getVectorElementType() == MVT::i32) &&(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType () == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20146, __PRETTY_FUNCTION__)) | |||
20146 | "Unexpected element type")(((InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType () == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && "Unexpected element type") ? static_cast<void> (0) : __assert_fail ("(InVT.getVectorElementType() == MVT::i8 || InVT.getVectorElementType() == MVT::i16 || InVT.getVectorElementType() == MVT::i32) && \"Unexpected element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20146, __PRETTY_FUNCTION__)); | |||
20147 | ||||
20148 | // Custom legalize v8i8->v8i64 on CPUs without avx512bw. | |||
20149 | if (InVT == MVT::v8i8) { | |||
20150 | if (!ExperimentalVectorWideningLegalization || VT != MVT::v8i64) | |||
20151 | return SDValue(); | |||
20152 | ||||
20153 | In = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(Op), | |||
20154 | MVT::v16i8, In, DAG.getUNDEF(MVT::v8i8)); | |||
20155 | return DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, VT, In); | |||
20156 | } | |||
20157 | ||||
20158 | if (Subtarget.hasInt256()) | |||
20159 | return Op; | |||
20160 | ||||
20161 | // Optimize vectors in AVX mode | |||
20162 | // Sign extend v8i16 to v8i32 and | |||
20163 | // v4i32 to v4i64 | |||
20164 | // | |||
20165 | // Divide input vector into two parts | |||
20166 | // for v4i32 the high shuffle mask will be {2, 3, -1, -1} | |||
20167 | // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32 | |||
20168 | // concat the vectors to original VT | |||
20169 | ||||
20170 | MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(), | |||
20171 | VT.getVectorNumElements() / 2); | |||
20172 | ||||
20173 | SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In); | |||
20174 | ||||
20175 | unsigned NumElems = InVT.getVectorNumElements(); | |||
20176 | SmallVector<int,8> ShufMask(NumElems, -1); | |||
20177 | for (unsigned i = 0; i != NumElems/2; ++i) | |||
20178 | ShufMask[i] = i + NumElems/2; | |||
20179 | ||||
20180 | SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask); | |||
20181 | OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi); | |||
20182 | ||||
20183 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi); | |||
20184 | } | |||
20185 | ||||
20186 | static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget, | |||
20187 | SelectionDAG &DAG) { | |||
20188 | StoreSDNode *St = cast<StoreSDNode>(Op.getNode()); | |||
20189 | SDLoc dl(St); | |||
20190 | SDValue StoredVal = St->getValue(); | |||
20191 | ||||
20192 | // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores. | |||
20193 | if (StoredVal.getValueType().isVector() && | |||
20194 | StoredVal.getValueType().getVectorElementType() == MVT::i1) { | |||
20195 | assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&((StoredVal.getValueType().getVectorNumElements() <= 8 && "Unexpected VT") ? static_cast<void> (0) : __assert_fail ("StoredVal.getValueType().getVectorNumElements() <= 8 && \"Unexpected VT\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20196, __PRETTY_FUNCTION__)) | |||
20196 | "Unexpected VT")((StoredVal.getValueType().getVectorNumElements() <= 8 && "Unexpected VT") ? static_cast<void> (0) : __assert_fail ("StoredVal.getValueType().getVectorNumElements() <= 8 && \"Unexpected VT\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20196, __PRETTY_FUNCTION__)); | |||
20197 | assert(!St->isTruncatingStore() && "Expected non-truncating store")((!St->isTruncatingStore() && "Expected non-truncating store" ) ? static_cast<void> (0) : __assert_fail ("!St->isTruncatingStore() && \"Expected non-truncating store\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20197, __PRETTY_FUNCTION__)); | |||
20198 | assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&((Subtarget.hasAVX512() && !Subtarget.hasDQI() && "Expected AVX512F without AVX512DQI") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasDQI() && \"Expected AVX512F without AVX512DQI\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20199, __PRETTY_FUNCTION__)) | |||
20199 | "Expected AVX512F without AVX512DQI")((Subtarget.hasAVX512() && !Subtarget.hasDQI() && "Expected AVX512F without AVX512DQI") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasDQI() && \"Expected AVX512F without AVX512DQI\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20199, __PRETTY_FUNCTION__)); | |||
20200 | ||||
20201 | StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1, | |||
20202 | DAG.getUNDEF(MVT::v8i1), StoredVal, | |||
20203 | DAG.getIntPtrConstant(0, dl)); | |||
20204 | StoredVal = DAG.getBitcast(MVT::i8, StoredVal); | |||
20205 | ||||
20206 | return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(), | |||
20207 | St->getPointerInfo(), St->getAlignment(), | |||
20208 | St->getMemOperand()->getFlags()); | |||
20209 | } | |||
20210 | ||||
20211 | if (St->isTruncatingStore()) | |||
20212 | return SDValue(); | |||
20213 | ||||
20214 | MVT StoreVT = StoredVal.getSimpleValueType(); | |||
20215 | assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&((StoreVT.isVector() && StoreVT.getSizeInBits() == 64 && "Unexpected VT") ? static_cast<void> (0) : __assert_fail ("StoreVT.isVector() && StoreVT.getSizeInBits() == 64 && \"Unexpected VT\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20216, __PRETTY_FUNCTION__)) | |||
20216 | "Unexpected VT")((StoreVT.isVector() && StoreVT.getSizeInBits() == 64 && "Unexpected VT") ? static_cast<void> (0) : __assert_fail ("StoreVT.isVector() && StoreVT.getSizeInBits() == 64 && \"Unexpected VT\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20216, __PRETTY_FUNCTION__)); | |||
20217 | if (DAG.getTargetLoweringInfo().getTypeAction(*DAG.getContext(), StoreVT) != | |||
20218 | TargetLowering::TypeWidenVector) | |||
20219 | return SDValue(); | |||
20220 | ||||
20221 | // Widen the vector, cast to a v2x64 type, extract the single 64-bit element | |||
20222 | // and store it. | |||
20223 | MVT WideVT = MVT::getVectorVT(StoreVT.getVectorElementType(), | |||
20224 | StoreVT.getVectorNumElements() * 2); | |||
20225 | StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal, | |||
20226 | DAG.getUNDEF(StoreVT)); | |||
20227 | MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64; | |||
20228 | MVT CastVT = MVT::getVectorVT(StVT, 2); | |||
20229 | StoredVal = DAG.getBitcast(CastVT, StoredVal); | |||
20230 | StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal, | |||
20231 | DAG.getIntPtrConstant(0, dl)); | |||
20232 | ||||
20233 | return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(), | |||
20234 | St->getPointerInfo(), St->getAlignment(), | |||
20235 | St->getMemOperand()->getFlags()); | |||
20236 | } | |||
20237 | ||||
20238 | // Lower vector extended loads using a shuffle. If SSSE3 is not available we | |||
20239 | // may emit an illegal shuffle but the expansion is still better than scalar | |||
20240 | // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise | |||
20241 | // we'll emit a shuffle and a arithmetic shift. | |||
20242 | // FIXME: Is the expansion actually better than scalar code? It doesn't seem so. | |||
20243 | // TODO: It is possible to support ZExt by zeroing the undef values during | |||
20244 | // the shuffle phase or after the shuffle. | |||
20245 | static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget, | |||
20246 | SelectionDAG &DAG) { | |||
20247 | MVT RegVT = Op.getSimpleValueType(); | |||
20248 | assert(RegVT.isVector() && "We only custom lower vector loads.")((RegVT.isVector() && "We only custom lower vector loads." ) ? static_cast<void> (0) : __assert_fail ("RegVT.isVector() && \"We only custom lower vector loads.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20248, __PRETTY_FUNCTION__)); | |||
20249 | assert(RegVT.isInteger() &&((RegVT.isInteger() && "We only custom lower integer vector loads." ) ? static_cast<void> (0) : __assert_fail ("RegVT.isInteger() && \"We only custom lower integer vector loads.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20250, __PRETTY_FUNCTION__)) | |||
20250 | "We only custom lower integer vector loads.")((RegVT.isInteger() && "We only custom lower integer vector loads." ) ? static_cast<void> (0) : __assert_fail ("RegVT.isInteger() && \"We only custom lower integer vector loads.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20250, __PRETTY_FUNCTION__)); | |||
20251 | ||||
20252 | LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode()); | |||
20253 | SDLoc dl(Ld); | |||
20254 | EVT MemVT = Ld->getMemoryVT(); | |||
20255 | ||||
20256 | // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads. | |||
20257 | if (RegVT.getVectorElementType() == MVT::i1) { | |||
20258 | assert(EVT(RegVT) == MemVT && "Expected non-extending load")((EVT(RegVT) == MemVT && "Expected non-extending load" ) ? static_cast<void> (0) : __assert_fail ("EVT(RegVT) == MemVT && \"Expected non-extending load\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20258, __PRETTY_FUNCTION__)); | |||
20259 | assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT")((RegVT.getVectorNumElements() <= 8 && "Unexpected VT" ) ? static_cast<void> (0) : __assert_fail ("RegVT.getVectorNumElements() <= 8 && \"Unexpected VT\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20259, __PRETTY_FUNCTION__)); | |||
20260 | assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&((Subtarget.hasAVX512() && !Subtarget.hasDQI() && "Expected AVX512F without AVX512DQI") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasDQI() && \"Expected AVX512F without AVX512DQI\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20261, __PRETTY_FUNCTION__)) | |||
20261 | "Expected AVX512F without AVX512DQI")((Subtarget.hasAVX512() && !Subtarget.hasDQI() && "Expected AVX512F without AVX512DQI") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasDQI() && \"Expected AVX512F without AVX512DQI\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20261, __PRETTY_FUNCTION__)); | |||
20262 | ||||
20263 | SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(), | |||
20264 | Ld->getPointerInfo(), Ld->getAlignment(), | |||
20265 | Ld->getMemOperand()->getFlags()); | |||
20266 | ||||
20267 | // Replace chain users with the new chain. | |||
20268 | assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!")((NewLd->getNumValues() == 2 && "Loads must carry a chain!" ) ? static_cast<void> (0) : __assert_fail ("NewLd->getNumValues() == 2 && \"Loads must carry a chain!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20268, __PRETTY_FUNCTION__)); | |||
20269 | ||||
20270 | SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT, | |||
20271 | DAG.getBitcast(MVT::v8i1, NewLd), | |||
20272 | DAG.getIntPtrConstant(0, dl)); | |||
20273 | return DAG.getMergeValues({Extract, NewLd.getValue(1)}, dl); | |||
20274 | } | |||
20275 | ||||
20276 | // Nothing useful we can do without SSE2 shuffles. | |||
20277 | assert(Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2.")((Subtarget.hasSSE2() && "We only custom lower sext loads with SSE2." ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"We only custom lower sext loads with SSE2.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20277, __PRETTY_FUNCTION__)); | |||
20278 | ||||
20279 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
20280 | unsigned RegSz = RegVT.getSizeInBits(); | |||
20281 | ||||
20282 | ISD::LoadExtType Ext = Ld->getExtensionType(); | |||
20283 | ||||
20284 | assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)(((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD) && "Only anyext and sext are currently implemented." ) ? static_cast<void> (0) : __assert_fail ("(Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD) && \"Only anyext and sext are currently implemented.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20285, __PRETTY_FUNCTION__)) | |||
20285 | && "Only anyext and sext are currently implemented.")(((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD) && "Only anyext and sext are currently implemented." ) ? static_cast<void> (0) : __assert_fail ("(Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD) && \"Only anyext and sext are currently implemented.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20285, __PRETTY_FUNCTION__)); | |||
20286 | assert(MemVT != RegVT && "Cannot extend to the same type")((MemVT != RegVT && "Cannot extend to the same type") ? static_cast<void> (0) : __assert_fail ("MemVT != RegVT && \"Cannot extend to the same type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20286, __PRETTY_FUNCTION__)); | |||
20287 | assert(MemVT.isVector() && "Must load a vector from memory")((MemVT.isVector() && "Must load a vector from memory" ) ? static_cast<void> (0) : __assert_fail ("MemVT.isVector() && \"Must load a vector from memory\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20287, __PRETTY_FUNCTION__)); | |||
20288 | ||||
20289 | unsigned NumElems = RegVT.getVectorNumElements(); | |||
20290 | unsigned MemSz = MemVT.getSizeInBits(); | |||
20291 | assert(RegSz > MemSz && "Register size must be greater than the mem size")((RegSz > MemSz && "Register size must be greater than the mem size" ) ? static_cast<void> (0) : __assert_fail ("RegSz > MemSz && \"Register size must be greater than the mem size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20291, __PRETTY_FUNCTION__)); | |||
20292 | ||||
20293 | if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget.hasInt256()) { | |||
20294 | // The only way in which we have a legal 256-bit vector result but not the | |||
20295 | // integer 256-bit operations needed to directly lower a sextload is if we | |||
20296 | // have AVX1 but not AVX2. In that case, we can always emit a sextload to | |||
20297 | // a 128-bit vector and a normal sign_extend to 256-bits that should get | |||
20298 | // correctly legalized. We do this late to allow the canonical form of | |||
20299 | // sextload to persist throughout the rest of the DAG combiner -- it wants | |||
20300 | // to fold together any extensions it can, and so will fuse a sign_extend | |||
20301 | // of an sextload into a sextload targeting a wider value. | |||
20302 | SDValue Load; | |||
20303 | if (MemSz == 128) { | |||
20304 | // Just switch this to a normal load. | |||
20305 | assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "((TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, " "it must be a legal 128-bit vector " "type!") ? static_cast< void> (0) : __assert_fail ("TLI.isTypeLegal(MemVT) && \"If the memory type is a 128-bit type, \" \"it must be a legal 128-bit vector \" \"type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20307, __PRETTY_FUNCTION__)) | |||
20306 | "it must be a legal 128-bit vector "((TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, " "it must be a legal 128-bit vector " "type!") ? static_cast< void> (0) : __assert_fail ("TLI.isTypeLegal(MemVT) && \"If the memory type is a 128-bit type, \" \"it must be a legal 128-bit vector \" \"type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20307, __PRETTY_FUNCTION__)) | |||
20307 | "type!")((TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, " "it must be a legal 128-bit vector " "type!") ? static_cast< void> (0) : __assert_fail ("TLI.isTypeLegal(MemVT) && \"If the memory type is a 128-bit type, \" \"it must be a legal 128-bit vector \" \"type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20307, __PRETTY_FUNCTION__)); | |||
20308 | Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(), | |||
20309 | Ld->getPointerInfo(), Ld->getAlignment(), | |||
20310 | Ld->getMemOperand()->getFlags()); | |||
20311 | } else { | |||
20312 | assert(MemSz < 128 &&((MemSz < 128 && "Can't extend a type wider than 128 bits to a 256 bit vector!" ) ? static_cast<void> (0) : __assert_fail ("MemSz < 128 && \"Can't extend a type wider than 128 bits to a 256 bit vector!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20313, __PRETTY_FUNCTION__)) | |||
20313 | "Can't extend a type wider than 128 bits to a 256 bit vector!")((MemSz < 128 && "Can't extend a type wider than 128 bits to a 256 bit vector!" ) ? static_cast<void> (0) : __assert_fail ("MemSz < 128 && \"Can't extend a type wider than 128 bits to a 256 bit vector!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20313, __PRETTY_FUNCTION__)); | |||
20314 | // Do an sext load to a 128-bit vector type. We want to use the same | |||
20315 | // number of elements, but elements half as wide. This will end up being | |||
20316 | // recursively lowered by this routine, but will succeed as we definitely | |||
20317 | // have all the necessary features if we're using AVX1. | |||
20318 | EVT HalfEltVT = | |||
20319 | EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2); | |||
20320 | EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems); | |||
20321 | Load = | |||
20322 | DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(), | |||
20323 | Ld->getPointerInfo(), MemVT, Ld->getAlignment(), | |||
20324 | Ld->getMemOperand()->getFlags()); | |||
20325 | } | |||
20326 | ||||
20327 | // Replace chain users with the new chain. | |||
20328 | assert(Load->getNumValues() == 2 && "Loads must carry a chain!")((Load->getNumValues() == 2 && "Loads must carry a chain!" ) ? static_cast<void> (0) : __assert_fail ("Load->getNumValues() == 2 && \"Loads must carry a chain!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20328, __PRETTY_FUNCTION__)); | |||
20329 | ||||
20330 | // Finally, do a normal sign-extend to the desired register. | |||
20331 | SDValue SExt = DAG.getSExtOrTrunc(Load, dl, RegVT); | |||
20332 | return DAG.getMergeValues({SExt, Load.getValue(1)}, dl); | |||
20333 | } | |||
20334 | ||||
20335 | // All sizes must be a power of two. | |||
20336 | assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&((isPowerOf2_32(RegSz * MemSz * NumElems) && "Non-power-of-two elements are not custom lowered!" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(RegSz * MemSz * NumElems) && \"Non-power-of-two elements are not custom lowered!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20337, __PRETTY_FUNCTION__)) | |||
20337 | "Non-power-of-two elements are not custom lowered!")((isPowerOf2_32(RegSz * MemSz * NumElems) && "Non-power-of-two elements are not custom lowered!" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(RegSz * MemSz * NumElems) && \"Non-power-of-two elements are not custom lowered!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20337, __PRETTY_FUNCTION__)); | |||
20338 | ||||
20339 | // Attempt to load the original value using scalar loads. | |||
20340 | // Find the largest scalar type that divides the total loaded size. | |||
20341 | MVT SclrLoadTy = MVT::i8; | |||
20342 | for (MVT Tp : MVT::integer_valuetypes()) { | |||
20343 | if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) { | |||
20344 | SclrLoadTy = Tp; | |||
20345 | } | |||
20346 | } | |||
20347 | ||||
20348 | // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. | |||
20349 | if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 && | |||
20350 | (64 <= MemSz)) | |||
20351 | SclrLoadTy = MVT::f64; | |||
20352 | ||||
20353 | // Calculate the number of scalar loads that we need to perform | |||
20354 | // in order to load our vector from memory. | |||
20355 | unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits(); | |||
20356 | ||||
20357 | assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&(((Ext != ISD::SEXTLOAD || NumLoads == 1) && "Can only lower sext loads with a single scalar load!" ) ? static_cast<void> (0) : __assert_fail ("(Ext != ISD::SEXTLOAD || NumLoads == 1) && \"Can only lower sext loads with a single scalar load!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20358, __PRETTY_FUNCTION__)) | |||
20358 | "Can only lower sext loads with a single scalar load!")(((Ext != ISD::SEXTLOAD || NumLoads == 1) && "Can only lower sext loads with a single scalar load!" ) ? static_cast<void> (0) : __assert_fail ("(Ext != ISD::SEXTLOAD || NumLoads == 1) && \"Can only lower sext loads with a single scalar load!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20358, __PRETTY_FUNCTION__)); | |||
20359 | ||||
20360 | unsigned loadRegSize = RegSz; | |||
20361 | if (Ext == ISD::SEXTLOAD && RegSz >= 256) | |||
20362 | loadRegSize = 128; | |||
20363 | ||||
20364 | // If we don't have BWI we won't be able to create the shuffle needed for | |||
20365 | // v8i8->v8i64. | |||
20366 | if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 && | |||
20367 | MemVT == MVT::v8i8) | |||
20368 | loadRegSize = 128; | |||
20369 | ||||
20370 | // Represent our vector as a sequence of elements which are the | |||
20371 | // largest scalar that we can load. | |||
20372 | EVT LoadUnitVecVT = EVT::getVectorVT( | |||
20373 | *DAG.getContext(), SclrLoadTy, loadRegSize / SclrLoadTy.getSizeInBits()); | |||
20374 | ||||
20375 | // Represent the data using the same element type that is stored in | |||
20376 | // memory. In practice, we ''widen'' MemVT. | |||
20377 | EVT WideVecVT = | |||
20378 | EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), | |||
20379 | loadRegSize / MemVT.getScalarSizeInBits()); | |||
20380 | ||||
20381 | assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&((WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && "Invalid vector type") ? static_cast<void> (0) : __assert_fail ("WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && \"Invalid vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20382, __PRETTY_FUNCTION__)) | |||
20382 | "Invalid vector type")((WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && "Invalid vector type") ? static_cast<void> (0) : __assert_fail ("WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() && \"Invalid vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20382, __PRETTY_FUNCTION__)); | |||
20383 | ||||
20384 | // We can't shuffle using an illegal type. | |||
20385 | assert(TLI.isTypeLegal(WideVecVT) &&((TLI.isTypeLegal(WideVecVT) && "We only lower types that form legal widened vector types" ) ? static_cast<void> (0) : __assert_fail ("TLI.isTypeLegal(WideVecVT) && \"We only lower types that form legal widened vector types\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20386, __PRETTY_FUNCTION__)) | |||
20386 | "We only lower types that form legal widened vector types")((TLI.isTypeLegal(WideVecVT) && "We only lower types that form legal widened vector types" ) ? static_cast<void> (0) : __assert_fail ("TLI.isTypeLegal(WideVecVT) && \"We only lower types that form legal widened vector types\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20386, __PRETTY_FUNCTION__)); | |||
20387 | ||||
20388 | SmallVector<SDValue, 8> Chains; | |||
20389 | SDValue Ptr = Ld->getBasePtr(); | |||
20390 | unsigned OffsetInc = SclrLoadTy.getSizeInBits() / 8; | |||
20391 | SDValue Increment = DAG.getConstant(OffsetInc, dl, | |||
20392 | TLI.getPointerTy(DAG.getDataLayout())); | |||
20393 | SDValue Res = DAG.getUNDEF(LoadUnitVecVT); | |||
20394 | ||||
20395 | unsigned Offset = 0; | |||
20396 | for (unsigned i = 0; i < NumLoads; ++i) { | |||
20397 | unsigned NewAlign = MinAlign(Ld->getAlignment(), Offset); | |||
20398 | ||||
20399 | // Perform a single load. | |||
20400 | SDValue ScalarLoad = | |||
20401 | DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, | |||
20402 | Ld->getPointerInfo().getWithOffset(Offset), | |||
20403 | NewAlign, Ld->getMemOperand()->getFlags()); | |||
20404 | Chains.push_back(ScalarLoad.getValue(1)); | |||
20405 | // Create the first element type using SCALAR_TO_VECTOR in order to avoid | |||
20406 | // another round of DAGCombining. | |||
20407 | if (i == 0) | |||
20408 | Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad); | |||
20409 | else | |||
20410 | Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res, | |||
20411 | ScalarLoad, DAG.getIntPtrConstant(i, dl)); | |||
20412 | ||||
20413 | Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment); | |||
20414 | Offset += OffsetInc; | |||
20415 | } | |||
20416 | ||||
20417 | SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); | |||
20418 | ||||
20419 | // Bitcast the loaded value to a vector of the original element type, in | |||
20420 | // the size of the target vector type. | |||
20421 | SDValue SlicedVec = DAG.getBitcast(WideVecVT, Res); | |||
20422 | unsigned SizeRatio = RegSz / MemSz; | |||
20423 | ||||
20424 | if (Ext == ISD::SEXTLOAD) { | |||
20425 | SDValue Sext = getExtendInVec(/*Signed*/true, dl, RegVT, SlicedVec, DAG); | |||
20426 | return DAG.getMergeValues({Sext, TF}, dl); | |||
20427 | } | |||
20428 | ||||
20429 | if (Ext == ISD::EXTLOAD && !Subtarget.hasBWI() && RegVT == MVT::v8i64 && | |||
20430 | MemVT == MVT::v8i8) { | |||
20431 | SDValue Sext = getExtendInVec(/*Signed*/false, dl, RegVT, SlicedVec, DAG); | |||
20432 | return DAG.getMergeValues({Sext, TF}, dl); | |||
20433 | } | |||
20434 | ||||
20435 | // Redistribute the loaded elements into the different locations. | |||
20436 | SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1); | |||
20437 | for (unsigned i = 0; i != NumElems; ++i) | |||
20438 | ShuffleVec[i * SizeRatio] = i; | |||
20439 | ||||
20440 | SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec, | |||
20441 | DAG.getUNDEF(WideVecVT), ShuffleVec); | |||
20442 | ||||
20443 | // Bitcast to the requested type. | |||
20444 | Shuff = DAG.getBitcast(RegVT, Shuff); | |||
20445 | return DAG.getMergeValues({Shuff, TF}, dl); | |||
20446 | } | |||
20447 | ||||
20448 | /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes | |||
20449 | /// each of which has no other use apart from the AND / OR. | |||
20450 | static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) { | |||
20451 | Opc = Op.getOpcode(); | |||
20452 | if (Opc != ISD::OR && Opc != ISD::AND) | |||
20453 | return false; | |||
20454 | return (Op.getOperand(0).getOpcode() == X86ISD::SETCC && | |||
20455 | Op.getOperand(0).hasOneUse() && | |||
20456 | Op.getOperand(1).getOpcode() == X86ISD::SETCC && | |||
20457 | Op.getOperand(1).hasOneUse()); | |||
20458 | } | |||
20459 | ||||
20460 | /// Return true if node is an ISD::XOR of a X86ISD::SETCC and 1 and that the | |||
20461 | /// SETCC node has a single use. | |||
20462 | static bool isXor1OfSetCC(SDValue Op) { | |||
20463 | if (Op.getOpcode() != ISD::XOR) | |||
20464 | return false; | |||
20465 | if (isOneConstant(Op.getOperand(1))) | |||
20466 | return Op.getOperand(0).getOpcode() == X86ISD::SETCC && | |||
20467 | Op.getOperand(0).hasOneUse(); | |||
20468 | return false; | |||
20469 | } | |||
20470 | ||||
20471 | SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { | |||
20472 | bool addTest = true; | |||
20473 | SDValue Chain = Op.getOperand(0); | |||
20474 | SDValue Cond = Op.getOperand(1); | |||
20475 | SDValue Dest = Op.getOperand(2); | |||
20476 | SDLoc dl(Op); | |||
20477 | SDValue CC; | |||
20478 | bool Inverted = false; | |||
20479 | ||||
20480 | if (Cond.getOpcode() == ISD::SETCC) { | |||
20481 | // Check for setcc([su]{add,sub,mul}o == 0). | |||
20482 | if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ && | |||
20483 | isNullConstant(Cond.getOperand(1)) && | |||
20484 | Cond.getOperand(0).getResNo() == 1 && | |||
20485 | (Cond.getOperand(0).getOpcode() == ISD::SADDO || | |||
20486 | Cond.getOperand(0).getOpcode() == ISD::UADDO || | |||
20487 | Cond.getOperand(0).getOpcode() == ISD::SSUBO || | |||
20488 | Cond.getOperand(0).getOpcode() == ISD::USUBO || | |||
20489 | Cond.getOperand(0).getOpcode() == ISD::SMULO || | |||
20490 | Cond.getOperand(0).getOpcode() == ISD::UMULO)) { | |||
20491 | Inverted = true; | |||
20492 | Cond = Cond.getOperand(0); | |||
20493 | } else { | |||
20494 | if (SDValue NewCond = LowerSETCC(Cond, DAG)) | |||
20495 | Cond = NewCond; | |||
20496 | } | |||
20497 | } | |||
20498 | #if 0 | |||
20499 | // FIXME: LowerXALUO doesn't handle these!! | |||
20500 | else if (Cond.getOpcode() == X86ISD::ADD || | |||
20501 | Cond.getOpcode() == X86ISD::SUB || | |||
20502 | Cond.getOpcode() == X86ISD::SMUL || | |||
20503 | Cond.getOpcode() == X86ISD::UMUL) | |||
20504 | Cond = LowerXALUO(Cond, DAG); | |||
20505 | #endif | |||
20506 | ||||
20507 | // Look pass (and (setcc_carry (cmp ...)), 1). | |||
20508 | if (Cond.getOpcode() == ISD::AND && | |||
20509 | Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY && | |||
20510 | isOneConstant(Cond.getOperand(1))) | |||
20511 | Cond = Cond.getOperand(0); | |||
20512 | ||||
20513 | // If condition flag is set by a X86ISD::CMP, then use it as the condition | |||
20514 | // setting operand in place of the X86ISD::SETCC. | |||
20515 | unsigned CondOpcode = Cond.getOpcode(); | |||
20516 | if (CondOpcode == X86ISD::SETCC || | |||
20517 | CondOpcode == X86ISD::SETCC_CARRY) { | |||
20518 | CC = Cond.getOperand(0); | |||
20519 | ||||
20520 | SDValue Cmp = Cond.getOperand(1); | |||
20521 | unsigned Opc = Cmp.getOpcode(); | |||
20522 | // FIXME: WHY THE SPECIAL CASING OF LogicalCmp?? | |||
20523 | if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) { | |||
20524 | Cond = Cmp; | |||
20525 | addTest = false; | |||
20526 | } else { | |||
20527 | switch (cast<ConstantSDNode>(CC)->getZExtValue()) { | |||
20528 | default: break; | |||
20529 | case X86::COND_O: | |||
20530 | case X86::COND_B: | |||
20531 | // These can only come from an arithmetic instruction with overflow, | |||
20532 | // e.g. SADDO, UADDO. | |||
20533 | Cond = Cond.getOperand(1); | |||
20534 | addTest = false; | |||
20535 | break; | |||
20536 | } | |||
20537 | } | |||
20538 | } | |||
20539 | CondOpcode = Cond.getOpcode(); | |||
20540 | if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO || | |||
20541 | CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO || | |||
20542 | ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) && | |||
20543 | Cond.getOperand(0).getValueType() != MVT::i8)) { | |||
20544 | SDValue LHS = Cond.getOperand(0); | |||
20545 | SDValue RHS = Cond.getOperand(1); | |||
20546 | unsigned X86Opcode; | |||
20547 | unsigned X86Cond; | |||
20548 | SDVTList VTs; | |||
20549 | // Keep this in sync with LowerXALUO, otherwise we might create redundant | |||
20550 | // instructions that can't be removed afterwards (i.e. X86ISD::ADD and | |||
20551 | // X86ISD::INC). | |||
20552 | switch (CondOpcode) { | |||
20553 | case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break; | |||
20554 | case ISD::SADDO: | |||
20555 | if (isOneConstant(RHS)) { | |||
20556 | X86Opcode = X86ISD::INC; X86Cond = X86::COND_O; | |||
20557 | break; | |||
20558 | } | |||
20559 | X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break; | |||
20560 | case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break; | |||
20561 | case ISD::SSUBO: | |||
20562 | if (isOneConstant(RHS)) { | |||
20563 | X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O; | |||
20564 | break; | |||
20565 | } | |||
20566 | X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break; | |||
20567 | case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break; | |||
20568 | case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break; | |||
20569 | default: llvm_unreachable("unexpected overflowing operator")::llvm::llvm_unreachable_internal("unexpected overflowing operator" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20569); | |||
20570 | } | |||
20571 | if (Inverted) | |||
20572 | X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond); | |||
20573 | if (CondOpcode == ISD::UMULO) | |||
20574 | VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(), | |||
20575 | MVT::i32); | |||
20576 | else | |||
20577 | VTs = DAG.getVTList(LHS.getValueType(), MVT::i32); | |||
20578 | ||||
20579 | SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS); | |||
20580 | ||||
20581 | if (CondOpcode == ISD::UMULO) | |||
20582 | Cond = X86Op.getValue(2); | |||
20583 | else | |||
20584 | Cond = X86Op.getValue(1); | |||
20585 | ||||
20586 | CC = DAG.getConstant(X86Cond, dl, MVT::i8); | |||
20587 | addTest = false; | |||
20588 | } else { | |||
20589 | unsigned CondOpc; | |||
20590 | if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) { | |||
20591 | SDValue Cmp = Cond.getOperand(0).getOperand(1); | |||
20592 | if (CondOpc == ISD::OR) { | |||
20593 | // Also, recognize the pattern generated by an FCMP_UNE. We can emit | |||
20594 | // two branches instead of an explicit OR instruction with a | |||
20595 | // separate test. | |||
20596 | if (Cmp == Cond.getOperand(1).getOperand(1) && | |||
20597 | isX86LogicalCmp(Cmp)) { | |||
20598 | CC = Cond.getOperand(0).getOperand(0); | |||
20599 | Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), | |||
20600 | Chain, Dest, CC, Cmp); | |||
20601 | CC = Cond.getOperand(1).getOperand(0); | |||
20602 | Cond = Cmp; | |||
20603 | addTest = false; | |||
20604 | } | |||
20605 | } else { // ISD::AND | |||
20606 | // Also, recognize the pattern generated by an FCMP_OEQ. We can emit | |||
20607 | // two branches instead of an explicit AND instruction with a | |||
20608 | // separate test. However, we only do this if this block doesn't | |||
20609 | // have a fall-through edge, because this requires an explicit | |||
20610 | // jmp when the condition is false. | |||
20611 | if (Cmp == Cond.getOperand(1).getOperand(1) && | |||
20612 | isX86LogicalCmp(Cmp) && | |||
20613 | Op.getNode()->hasOneUse()) { | |||
20614 | X86::CondCode CCode = | |||
20615 | (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); | |||
20616 | CCode = X86::GetOppositeBranchCondition(CCode); | |||
20617 | CC = DAG.getConstant(CCode, dl, MVT::i8); | |||
20618 | SDNode *User = *Op.getNode()->use_begin(); | |||
20619 | // Look for an unconditional branch following this conditional branch. | |||
20620 | // We need this because we need to reverse the successors in order | |||
20621 | // to implement FCMP_OEQ. | |||
20622 | if (User->getOpcode() == ISD::BR) { | |||
20623 | SDValue FalseBB = User->getOperand(1); | |||
20624 | SDNode *NewBR = | |||
20625 | DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); | |||
20626 | assert(NewBR == User)((NewBR == User) ? static_cast<void> (0) : __assert_fail ("NewBR == User", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20626, __PRETTY_FUNCTION__)); | |||
20627 | (void)NewBR; | |||
20628 | Dest = FalseBB; | |||
20629 | ||||
20630 | Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), | |||
20631 | Chain, Dest, CC, Cmp); | |||
20632 | X86::CondCode CCode = | |||
20633 | (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0); | |||
20634 | CCode = X86::GetOppositeBranchCondition(CCode); | |||
20635 | CC = DAG.getConstant(CCode, dl, MVT::i8); | |||
20636 | Cond = Cmp; | |||
20637 | addTest = false; | |||
20638 | } | |||
20639 | } | |||
20640 | } | |||
20641 | } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) { | |||
20642 | // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition. | |||
20643 | // It should be transformed during dag combiner except when the condition | |||
20644 | // is set by a arithmetics with overflow node. | |||
20645 | X86::CondCode CCode = | |||
20646 | (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0); | |||
20647 | CCode = X86::GetOppositeBranchCondition(CCode); | |||
20648 | CC = DAG.getConstant(CCode, dl, MVT::i8); | |||
20649 | Cond = Cond.getOperand(0).getOperand(1); | |||
20650 | addTest = false; | |||
20651 | } else if (Cond.getOpcode() == ISD::SETCC && | |||
20652 | cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) { | |||
20653 | // For FCMP_OEQ, we can emit | |||
20654 | // two branches instead of an explicit AND instruction with a | |||
20655 | // separate test. However, we only do this if this block doesn't | |||
20656 | // have a fall-through edge, because this requires an explicit | |||
20657 | // jmp when the condition is false. | |||
20658 | if (Op.getNode()->hasOneUse()) { | |||
20659 | SDNode *User = *Op.getNode()->use_begin(); | |||
20660 | // Look for an unconditional branch following this conditional branch. | |||
20661 | // We need this because we need to reverse the successors in order | |||
20662 | // to implement FCMP_OEQ. | |||
20663 | if (User->getOpcode() == ISD::BR) { | |||
20664 | SDValue FalseBB = User->getOperand(1); | |||
20665 | SDNode *NewBR = | |||
20666 | DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); | |||
20667 | assert(NewBR == User)((NewBR == User) ? static_cast<void> (0) : __assert_fail ("NewBR == User", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20667, __PRETTY_FUNCTION__)); | |||
20668 | (void)NewBR; | |||
20669 | Dest = FalseBB; | |||
20670 | ||||
20671 | SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, | |||
20672 | Cond.getOperand(0), Cond.getOperand(1)); | |||
20673 | Cmp = ConvertCmpIfNecessary(Cmp, DAG); | |||
20674 | CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8); | |||
20675 | Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), | |||
20676 | Chain, Dest, CC, Cmp); | |||
20677 | CC = DAG.getConstant(X86::COND_P, dl, MVT::i8); | |||
20678 | Cond = Cmp; | |||
20679 | addTest = false; | |||
20680 | } | |||
20681 | } | |||
20682 | } else if (Cond.getOpcode() == ISD::SETCC && | |||
20683 | cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) { | |||
20684 | // For FCMP_UNE, we can emit | |||
20685 | // two branches instead of an explicit AND instruction with a | |||
20686 | // separate test. However, we only do this if this block doesn't | |||
20687 | // have a fall-through edge, because this requires an explicit | |||
20688 | // jmp when the condition is false. | |||
20689 | if (Op.getNode()->hasOneUse()) { | |||
20690 | SDNode *User = *Op.getNode()->use_begin(); | |||
20691 | // Look for an unconditional branch following this conditional branch. | |||
20692 | // We need this because we need to reverse the successors in order | |||
20693 | // to implement FCMP_UNE. | |||
20694 | if (User->getOpcode() == ISD::BR) { | |||
20695 | SDValue FalseBB = User->getOperand(1); | |||
20696 | SDNode *NewBR = | |||
20697 | DAG.UpdateNodeOperands(User, User->getOperand(0), Dest); | |||
20698 | assert(NewBR == User)((NewBR == User) ? static_cast<void> (0) : __assert_fail ("NewBR == User", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20698, __PRETTY_FUNCTION__)); | |||
20699 | (void)NewBR; | |||
20700 | ||||
20701 | SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32, | |||
20702 | Cond.getOperand(0), Cond.getOperand(1)); | |||
20703 | Cmp = ConvertCmpIfNecessary(Cmp, DAG); | |||
20704 | CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8); | |||
20705 | Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), | |||
20706 | Chain, Dest, CC, Cmp); | |||
20707 | CC = DAG.getConstant(X86::COND_NP, dl, MVT::i8); | |||
20708 | Cond = Cmp; | |||
20709 | addTest = false; | |||
20710 | Dest = FalseBB; | |||
20711 | } | |||
20712 | } | |||
20713 | } | |||
20714 | } | |||
20715 | ||||
20716 | if (addTest) { | |||
20717 | // Look pass the truncate if the high bits are known zero. | |||
20718 | if (isTruncWithZeroHighBitsInput(Cond, DAG)) | |||
20719 | Cond = Cond.getOperand(0); | |||
20720 | ||||
20721 | // We know the result of AND is compared against zero. Try to match | |||
20722 | // it to BT. | |||
20723 | if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) { | |||
20724 | if (SDValue NewSetCC = LowerAndToBT(Cond, ISD::SETNE, dl, DAG)) { | |||
20725 | CC = NewSetCC.getOperand(0); | |||
20726 | Cond = NewSetCC.getOperand(1); | |||
20727 | addTest = false; | |||
20728 | } | |||
20729 | } | |||
20730 | } | |||
20731 | ||||
20732 | if (addTest) { | |||
20733 | X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE; | |||
20734 | CC = DAG.getConstant(X86Cond, dl, MVT::i8); | |||
20735 | Cond = EmitCmp(Cond, DAG.getConstant(0, dl, Cond.getValueType()), | |||
20736 | X86Cond, dl, DAG); | |||
20737 | } | |||
20738 | Cond = ConvertCmpIfNecessary(Cond, DAG); | |||
20739 | return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(), | |||
20740 | Chain, Dest, CC, Cond); | |||
20741 | } | |||
20742 | ||||
20743 | // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets. | |||
20744 | // Calls to _alloca are needed to probe the stack when allocating more than 4k | |||
20745 | // bytes in one go. Touching the stack at 4K increments is necessary to ensure | |||
20746 | // that the guard pages used by the OS virtual memory manager are allocated in | |||
20747 | // correct sequence. | |||
20748 | SDValue | |||
20749 | X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, | |||
20750 | SelectionDAG &DAG) const { | |||
20751 | MachineFunction &MF = DAG.getMachineFunction(); | |||
20752 | bool SplitStack = MF.shouldSplitStack(); | |||
20753 | bool EmitStackProbe = !getStackProbeSymbolName(MF).empty(); | |||
20754 | bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) || | |||
20755 | SplitStack || EmitStackProbe; | |||
20756 | SDLoc dl(Op); | |||
20757 | ||||
20758 | // Get the inputs. | |||
20759 | SDNode *Node = Op.getNode(); | |||
20760 | SDValue Chain = Op.getOperand(0); | |||
20761 | SDValue Size = Op.getOperand(1); | |||
20762 | unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); | |||
20763 | EVT VT = Node->getValueType(0); | |||
20764 | ||||
20765 | // Chain the dynamic stack allocation so that it doesn't modify the stack | |||
20766 | // pointer when other instructions are using the stack. | |||
20767 | Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); | |||
20768 | ||||
20769 | bool Is64Bit = Subtarget.is64Bit(); | |||
20770 | MVT SPTy = getPointerTy(DAG.getDataLayout()); | |||
20771 | ||||
20772 | SDValue Result; | |||
20773 | if (!Lower) { | |||
20774 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
20775 | unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore(); | |||
20776 | assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"((SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" " not tell us which reg is the stack pointer!") ? static_cast <void> (0) : __assert_fail ("SPReg && \"Target cannot require DYNAMIC_STACKALLOC expansion and\" \" not tell us which reg is the stack pointer!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20777, __PRETTY_FUNCTION__)) | |||
20777 | " not tell us which reg is the stack pointer!")((SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and" " not tell us which reg is the stack pointer!") ? static_cast <void> (0) : __assert_fail ("SPReg && \"Target cannot require DYNAMIC_STACKALLOC expansion and\" \" not tell us which reg is the stack pointer!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20777, __PRETTY_FUNCTION__)); | |||
20778 | ||||
20779 | SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT); | |||
20780 | Chain = SP.getValue(1); | |||
20781 | const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); | |||
20782 | unsigned StackAlign = TFI.getStackAlignment(); | |||
20783 | Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value | |||
20784 | if (Align > StackAlign) | |||
20785 | Result = DAG.getNode(ISD::AND, dl, VT, Result, | |||
20786 | DAG.getConstant(-(uint64_t)Align, dl, VT)); | |||
20787 | Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain | |||
20788 | } else if (SplitStack) { | |||
20789 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |||
20790 | ||||
20791 | if (Is64Bit) { | |||
20792 | // The 64 bit implementation of segmented stacks needs to clobber both r10 | |||
20793 | // r11. This makes it impossible to use it along with nested parameters. | |||
20794 | const Function &F = MF.getFunction(); | |||
20795 | for (const auto &A : F.args()) { | |||
20796 | if (A.hasNestAttr()) | |||
20797 | report_fatal_error("Cannot use segmented stacks with functions that " | |||
20798 | "have nested arguments."); | |||
20799 | } | |||
20800 | } | |||
20801 | ||||
20802 | const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy); | |||
20803 | unsigned Vreg = MRI.createVirtualRegister(AddrRegClass); | |||
20804 | Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size); | |||
20805 | Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain, | |||
20806 | DAG.getRegister(Vreg, SPTy)); | |||
20807 | } else { | |||
20808 | SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
20809 | Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size); | |||
20810 | MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true); | |||
20811 | ||||
20812 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
20813 | unsigned SPReg = RegInfo->getStackRegister(); | |||
20814 | SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy); | |||
20815 | Chain = SP.getValue(1); | |||
20816 | ||||
20817 | if (Align) { | |||
20818 | SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), | |||
20819 | DAG.getConstant(-(uint64_t)Align, dl, VT)); | |||
20820 | Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP); | |||
20821 | } | |||
20822 | ||||
20823 | Result = SP; | |||
20824 | } | |||
20825 | ||||
20826 | Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true), | |||
20827 | DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); | |||
20828 | ||||
20829 | SDValue Ops[2] = {Result, Chain}; | |||
20830 | return DAG.getMergeValues(Ops, dl); | |||
20831 | } | |||
20832 | ||||
20833 | SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { | |||
20834 | MachineFunction &MF = DAG.getMachineFunction(); | |||
20835 | auto PtrVT = getPointerTy(MF.getDataLayout()); | |||
20836 | X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); | |||
20837 | ||||
20838 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
20839 | SDLoc DL(Op); | |||
20840 | ||||
20841 | if (!Subtarget.is64Bit() || | |||
20842 | Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) { | |||
20843 | // vastart just stores the address of the VarArgsFrameIndex slot into the | |||
20844 | // memory location argument. | |||
20845 | SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); | |||
20846 | return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), | |||
20847 | MachinePointerInfo(SV)); | |||
20848 | } | |||
20849 | ||||
20850 | // __va_list_tag: | |||
20851 | // gp_offset (0 - 6 * 8) | |||
20852 | // fp_offset (48 - 48 + 8 * 16) | |||
20853 | // overflow_arg_area (point to parameters coming in memory). | |||
20854 | // reg_save_area | |||
20855 | SmallVector<SDValue, 8> MemOps; | |||
20856 | SDValue FIN = Op.getOperand(1); | |||
20857 | // Store gp_offset | |||
20858 | SDValue Store = DAG.getStore( | |||
20859 | Op.getOperand(0), DL, | |||
20860 | DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN, | |||
20861 | MachinePointerInfo(SV)); | |||
20862 | MemOps.push_back(Store); | |||
20863 | ||||
20864 | // Store fp_offset | |||
20865 | FIN = DAG.getMemBasePlusOffset(FIN, 4, DL); | |||
20866 | Store = DAG.getStore( | |||
20867 | Op.getOperand(0), DL, | |||
20868 | DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN, | |||
20869 | MachinePointerInfo(SV, 4)); | |||
20870 | MemOps.push_back(Store); | |||
20871 | ||||
20872 | // Store ptr to overflow_arg_area | |||
20873 | FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL)); | |||
20874 | SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT); | |||
20875 | Store = | |||
20876 | DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8)); | |||
20877 | MemOps.push_back(Store); | |||
20878 | ||||
20879 | // Store ptr to reg_save_area. | |||
20880 | FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant( | |||
20881 | Subtarget.isTarget64BitLP64() ? 8 : 4, DL)); | |||
20882 | SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT); | |||
20883 | Store = DAG.getStore( | |||
20884 | Op.getOperand(0), DL, RSFIN, FIN, | |||
20885 | MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12)); | |||
20886 | MemOps.push_back(Store); | |||
20887 | return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); | |||
20888 | } | |||
20889 | ||||
20890 | SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { | |||
20891 | assert(Subtarget.is64Bit() &&((Subtarget.is64Bit() && "LowerVAARG only handles 64-bit va_arg!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"LowerVAARG only handles 64-bit va_arg!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20892, __PRETTY_FUNCTION__)) | |||
20892 | "LowerVAARG only handles 64-bit va_arg!")((Subtarget.is64Bit() && "LowerVAARG only handles 64-bit va_arg!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"LowerVAARG only handles 64-bit va_arg!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20892, __PRETTY_FUNCTION__)); | |||
20893 | assert(Op.getNumOperands() == 4)((Op.getNumOperands() == 4) ? static_cast<void> (0) : __assert_fail ("Op.getNumOperands() == 4", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20893, __PRETTY_FUNCTION__)); | |||
20894 | ||||
20895 | MachineFunction &MF = DAG.getMachineFunction(); | |||
20896 | if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) | |||
20897 | // The Win64 ABI uses char* instead of a structure. | |||
20898 | return DAG.expandVAArg(Op.getNode()); | |||
20899 | ||||
20900 | SDValue Chain = Op.getOperand(0); | |||
20901 | SDValue SrcPtr = Op.getOperand(1); | |||
20902 | const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); | |||
20903 | unsigned Align = Op.getConstantOperandVal(3); | |||
20904 | SDLoc dl(Op); | |||
20905 | ||||
20906 | EVT ArgVT = Op.getNode()->getValueType(0); | |||
20907 | Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); | |||
20908 | uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy); | |||
20909 | uint8_t ArgMode; | |||
20910 | ||||
20911 | // Decide which area this value should be read from. | |||
20912 | // TODO: Implement the AMD64 ABI in its entirety. This simple | |||
20913 | // selection mechanism works only for the basic types. | |||
20914 | if (ArgVT == MVT::f80) { | |||
20915 | llvm_unreachable("va_arg for f80 not yet implemented")::llvm::llvm_unreachable_internal("va_arg for f80 not yet implemented" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20915); | |||
20916 | } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) { | |||
20917 | ArgMode = 2; // Argument passed in XMM register. Use fp_offset. | |||
20918 | } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) { | |||
20919 | ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset. | |||
20920 | } else { | |||
20921 | llvm_unreachable("Unhandled argument type in LowerVAARG")::llvm::llvm_unreachable_internal("Unhandled argument type in LowerVAARG" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20921); | |||
20922 | } | |||
20923 | ||||
20924 | if (ArgMode == 2) { | |||
20925 | // Sanity Check: Make sure using fp_offset makes sense. | |||
20926 | assert(!Subtarget.useSoftFloat() &&((!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute (Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()) ? static_cast<void> (0) : __assert_fail ("!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20928, __PRETTY_FUNCTION__)) | |||
20927 | !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&((!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute (Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()) ? static_cast<void> (0) : __assert_fail ("!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20928, __PRETTY_FUNCTION__)) | |||
20928 | Subtarget.hasSSE1())((!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute (Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()) ? static_cast<void> (0) : __assert_fail ("!Subtarget.useSoftFloat() && !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) && Subtarget.hasSSE1()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20928, __PRETTY_FUNCTION__)); | |||
20929 | } | |||
20930 | ||||
20931 | // Insert VAARG_64 node into the DAG | |||
20932 | // VAARG_64 returns two values: Variable Argument Address, Chain | |||
20933 | SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32), | |||
20934 | DAG.getConstant(ArgMode, dl, MVT::i8), | |||
20935 | DAG.getConstant(Align, dl, MVT::i32)}; | |||
20936 | SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other); | |||
20937 | SDValue VAARG = DAG.getMemIntrinsicNode( | |||
20938 | X86ISD::VAARG_64, dl, | |||
20939 | VTs, InstOps, MVT::i64, | |||
20940 | MachinePointerInfo(SV), | |||
20941 | /*Align=*/0, | |||
20942 | MachineMemOperand::MOLoad | MachineMemOperand::MOStore); | |||
20943 | Chain = VAARG.getValue(1); | |||
20944 | ||||
20945 | // Load the next argument and return it | |||
20946 | return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo()); | |||
20947 | } | |||
20948 | ||||
20949 | static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget, | |||
20950 | SelectionDAG &DAG) { | |||
20951 | // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows, | |||
20952 | // where a va_list is still an i8*. | |||
20953 | assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!")((Subtarget.is64Bit() && "This code only handles 64-bit va_copy!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"This code only handles 64-bit va_copy!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20953, __PRETTY_FUNCTION__)); | |||
20954 | if (Subtarget.isCallingConvWin64( | |||
20955 | DAG.getMachineFunction().getFunction().getCallingConv())) | |||
20956 | // Probably a Win64 va_copy. | |||
20957 | return DAG.expandVACopy(Op.getNode()); | |||
20958 | ||||
20959 | SDValue Chain = Op.getOperand(0); | |||
20960 | SDValue DstPtr = Op.getOperand(1); | |||
20961 | SDValue SrcPtr = Op.getOperand(2); | |||
20962 | const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); | |||
20963 | const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); | |||
20964 | SDLoc DL(Op); | |||
20965 | ||||
20966 | return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, | |||
20967 | DAG.getIntPtrConstant(24, DL), 8, /*isVolatile*/false, | |||
20968 | false, false, | |||
20969 | MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV)); | |||
20970 | } | |||
20971 | ||||
20972 | // Helper to get immediate/variable SSE shift opcode from other shift opcodes. | |||
20973 | static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) { | |||
20974 | switch (Opc) { | |||
20975 | case ISD::SHL: | |||
20976 | case X86ISD::VSHL: | |||
20977 | case X86ISD::VSHLI: | |||
20978 | return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI; | |||
20979 | case ISD::SRL: | |||
20980 | case X86ISD::VSRL: | |||
20981 | case X86ISD::VSRLI: | |||
20982 | return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI; | |||
20983 | case ISD::SRA: | |||
20984 | case X86ISD::VSRA: | |||
20985 | case X86ISD::VSRAI: | |||
20986 | return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI; | |||
20987 | } | |||
20988 | llvm_unreachable("Unknown target vector shift node")::llvm::llvm_unreachable_internal("Unknown target vector shift node" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 20988); | |||
20989 | } | |||
20990 | ||||
20991 | /// Handle vector element shifts where the shift amount is a constant. | |||
20992 | /// Takes immediate version of shift as input. | |||
20993 | static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT, | |||
20994 | SDValue SrcOp, uint64_t ShiftAmt, | |||
20995 | SelectionDAG &DAG) { | |||
20996 | MVT ElementType = VT.getVectorElementType(); | |||
20997 | ||||
20998 | // Bitcast the source vector to the output type, this is mainly necessary for | |||
20999 | // vXi8/vXi64 shifts. | |||
21000 | if (VT != SrcOp.getSimpleValueType()) | |||
21001 | SrcOp = DAG.getBitcast(VT, SrcOp); | |||
21002 | ||||
21003 | // Fold this packed shift into its first operand if ShiftAmt is 0. | |||
21004 | if (ShiftAmt == 0) | |||
21005 | return SrcOp; | |||
21006 | ||||
21007 | // Check for ShiftAmt >= element width | |||
21008 | if (ShiftAmt >= ElementType.getSizeInBits()) { | |||
21009 | if (Opc == X86ISD::VSRAI) | |||
21010 | ShiftAmt = ElementType.getSizeInBits() - 1; | |||
21011 | else | |||
21012 | return DAG.getConstant(0, dl, VT); | |||
21013 | } | |||
21014 | ||||
21015 | assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)(((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD ::VSRAI) && "Unknown target vector shift-by-constant node" ) ? static_cast<void> (0) : __assert_fail ("(Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI) && \"Unknown target vector shift-by-constant node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21016, __PRETTY_FUNCTION__)) | |||
21016 | && "Unknown target vector shift-by-constant node")(((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD ::VSRAI) && "Unknown target vector shift-by-constant node" ) ? static_cast<void> (0) : __assert_fail ("(Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI) && \"Unknown target vector shift-by-constant node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21016, __PRETTY_FUNCTION__)); | |||
21017 | ||||
21018 | // Fold this packed vector shift into a build vector if SrcOp is a | |||
21019 | // vector of Constants or UNDEFs. | |||
21020 | if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) { | |||
21021 | SmallVector<SDValue, 8> Elts; | |||
21022 | unsigned NumElts = SrcOp->getNumOperands(); | |||
21023 | ConstantSDNode *ND; | |||
21024 | ||||
21025 | switch(Opc) { | |||
21026 | default: llvm_unreachable("Unknown opcode!")::llvm::llvm_unreachable_internal("Unknown opcode!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21026); | |||
21027 | case X86ISD::VSHLI: | |||
21028 | for (unsigned i=0; i!=NumElts; ++i) { | |||
21029 | SDValue CurrentOp = SrcOp->getOperand(i); | |||
21030 | if (CurrentOp->isUndef()) { | |||
21031 | Elts.push_back(CurrentOp); | |||
21032 | continue; | |||
21033 | } | |||
21034 | ND = cast<ConstantSDNode>(CurrentOp); | |||
21035 | const APInt &C = ND->getAPIntValue(); | |||
21036 | Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType)); | |||
21037 | } | |||
21038 | break; | |||
21039 | case X86ISD::VSRLI: | |||
21040 | for (unsigned i=0; i!=NumElts; ++i) { | |||
21041 | SDValue CurrentOp = SrcOp->getOperand(i); | |||
21042 | if (CurrentOp->isUndef()) { | |||
21043 | Elts.push_back(CurrentOp); | |||
21044 | continue; | |||
21045 | } | |||
21046 | ND = cast<ConstantSDNode>(CurrentOp); | |||
21047 | const APInt &C = ND->getAPIntValue(); | |||
21048 | Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType)); | |||
21049 | } | |||
21050 | break; | |||
21051 | case X86ISD::VSRAI: | |||
21052 | for (unsigned i=0; i!=NumElts; ++i) { | |||
21053 | SDValue CurrentOp = SrcOp->getOperand(i); | |||
21054 | if (CurrentOp->isUndef()) { | |||
21055 | Elts.push_back(CurrentOp); | |||
21056 | continue; | |||
21057 | } | |||
21058 | ND = cast<ConstantSDNode>(CurrentOp); | |||
21059 | const APInt &C = ND->getAPIntValue(); | |||
21060 | Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType)); | |||
21061 | } | |||
21062 | break; | |||
21063 | } | |||
21064 | ||||
21065 | return DAG.getBuildVector(VT, dl, Elts); | |||
21066 | } | |||
21067 | ||||
21068 | return DAG.getNode(Opc, dl, VT, SrcOp, | |||
21069 | DAG.getConstant(ShiftAmt, dl, MVT::i8)); | |||
21070 | } | |||
21071 | ||||
21072 | /// Handle vector element shifts where the shift amount may or may not be a | |||
21073 | /// constant. Takes immediate version of shift as input. | |||
21074 | static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT, | |||
21075 | SDValue SrcOp, SDValue ShAmt, | |||
21076 | const X86Subtarget &Subtarget, | |||
21077 | SelectionDAG &DAG) { | |||
21078 | MVT SVT = ShAmt.getSimpleValueType(); | |||
21079 | assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!")(((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!" ) ? static_cast<void> (0) : __assert_fail ("(SVT == MVT::i32 || SVT == MVT::i64) && \"Unexpected value type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21079, __PRETTY_FUNCTION__)); | |||
21080 | ||||
21081 | // Catch shift-by-constant. | |||
21082 | if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt)) | |||
21083 | return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp, | |||
21084 | CShAmt->getZExtValue(), DAG); | |||
21085 | ||||
21086 | // Change opcode to non-immediate version. | |||
21087 | Opc = getTargetVShiftUniformOpcode(Opc, true); | |||
21088 | ||||
21089 | // Need to build a vector containing shift amount. | |||
21090 | // SSE/AVX packed shifts only use the lower 64-bit of the shift count. | |||
21091 | // +====================+============+=======================================+ | |||
21092 | // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as | | |||
21093 | // +====================+============+=======================================+ | |||
21094 | // | i64 | Yes, No | Use ShAmt as lowest elt | | |||
21095 | // | i32 | Yes | zero-extend in-reg | | |||
21096 | // | (i32 zext(i16/i8)) | Yes | zero-extend in-reg | | |||
21097 | // | (i32 zext(i16/i8)) | No | byte-shift-in-reg | | |||
21098 | // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) | | |||
21099 | // +====================+============+=======================================+ | |||
21100 | ||||
21101 | if (SVT == MVT::i64) | |||
21102 | ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt); | |||
21103 | else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND && | |||
21104 | ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
21105 | (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 || | |||
21106 | ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) { | |||
21107 | ShAmt = ShAmt.getOperand(0); | |||
21108 | MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16; | |||
21109 | ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt); | |||
21110 | if (Subtarget.hasSSE41()) | |||
21111 | ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt), | |||
21112 | MVT::v2i64, ShAmt); | |||
21113 | else { | |||
21114 | SDValue ByteShift = DAG.getConstant( | |||
21115 | (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8); | |||
21116 | ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt); | |||
21117 | ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt, | |||
21118 | ByteShift); | |||
21119 | ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt, | |||
21120 | ByteShift); | |||
21121 | } | |||
21122 | } else if (Subtarget.hasSSE41() && | |||
21123 | ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { | |||
21124 | ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt); | |||
21125 | ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt), | |||
21126 | MVT::v2i64, ShAmt); | |||
21127 | } else { | |||
21128 | SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT), | |||
21129 | DAG.getUNDEF(SVT)}; | |||
21130 | ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps); | |||
21131 | } | |||
21132 | ||||
21133 | // The return type has to be a 128-bit type with the same element | |||
21134 | // type as the input type. | |||
21135 | MVT EltVT = VT.getVectorElementType(); | |||
21136 | MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits()); | |||
21137 | ||||
21138 | ShAmt = DAG.getBitcast(ShVT, ShAmt); | |||
21139 | return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt); | |||
21140 | } | |||
21141 | ||||
21142 | /// Return Mask with the necessary casting or extending | |||
21143 | /// for \p Mask according to \p MaskVT when lowering masking intrinsics | |||
21144 | static SDValue getMaskNode(SDValue Mask, MVT MaskVT, | |||
21145 | const X86Subtarget &Subtarget, SelectionDAG &DAG, | |||
21146 | const SDLoc &dl) { | |||
21147 | ||||
21148 | if (isAllOnesConstant(Mask)) | |||
21149 | return DAG.getConstant(1, dl, MaskVT); | |||
21150 | if (X86::isZeroNode(Mask)) | |||
21151 | return DAG.getConstant(0, dl, MaskVT); | |||
21152 | ||||
21153 | if (MaskVT.bitsGT(Mask.getSimpleValueType())) { | |||
21154 | // Mask should be extended | |||
21155 | Mask = DAG.getNode(ISD::ANY_EXTEND, dl, | |||
21156 | MVT::getIntegerVT(MaskVT.getSizeInBits()), Mask); | |||
21157 | } | |||
21158 | ||||
21159 | if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) { | |||
21160 | assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!")((MaskVT == MVT::v64i1 && "Expected v64i1 mask!") ? static_cast <void> (0) : __assert_fail ("MaskVT == MVT::v64i1 && \"Expected v64i1 mask!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21160, __PRETTY_FUNCTION__)); | |||
21161 | assert(Subtarget.hasBWI() && "Expected AVX512BW target!")((Subtarget.hasBWI() && "Expected AVX512BW target!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected AVX512BW target!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21161, __PRETTY_FUNCTION__)); | |||
21162 | // In case 32bit mode, bitcast i64 is illegal, extend/split it. | |||
21163 | SDValue Lo, Hi; | |||
21164 | Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask, | |||
21165 | DAG.getConstant(0, dl, MVT::i32)); | |||
21166 | Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask, | |||
21167 | DAG.getConstant(1, dl, MVT::i32)); | |||
21168 | ||||
21169 | Lo = DAG.getBitcast(MVT::v32i1, Lo); | |||
21170 | Hi = DAG.getBitcast(MVT::v32i1, Hi); | |||
21171 | ||||
21172 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi); | |||
21173 | } else { | |||
21174 | MVT BitcastVT = MVT::getVectorVT(MVT::i1, | |||
21175 | Mask.getSimpleValueType().getSizeInBits()); | |||
21176 | // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements | |||
21177 | // are extracted by EXTRACT_SUBVECTOR. | |||
21178 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT, | |||
21179 | DAG.getBitcast(BitcastVT, Mask), | |||
21180 | DAG.getIntPtrConstant(0, dl)); | |||
21181 | } | |||
21182 | } | |||
21183 | ||||
21184 | /// Return (and \p Op, \p Mask) for compare instructions or | |||
21185 | /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the | |||
21186 | /// necessary casting or extending for \p Mask when lowering masking intrinsics | |||
21187 | static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask, | |||
21188 | SDValue PreservedSrc, | |||
21189 | const X86Subtarget &Subtarget, | |||
21190 | SelectionDAG &DAG) { | |||
21191 | MVT VT = Op.getSimpleValueType(); | |||
21192 | MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); | |||
21193 | unsigned OpcodeSelect = ISD::VSELECT; | |||
21194 | SDLoc dl(Op); | |||
21195 | ||||
21196 | if (isAllOnesConstant(Mask)) | |||
21197 | return Op; | |||
21198 | ||||
21199 | SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl); | |||
21200 | ||||
21201 | switch (Op.getOpcode()) { | |||
21202 | default: break; | |||
21203 | case X86ISD::CMPM: | |||
21204 | case X86ISD::CMPM_RND: | |||
21205 | case X86ISD::VPSHUFBITQMB: | |||
21206 | case X86ISD::VFPCLASS: | |||
21207 | return DAG.getNode(ISD::AND, dl, VT, Op, VMask); | |||
21208 | case ISD::TRUNCATE: | |||
21209 | case X86ISD::VTRUNC: | |||
21210 | case X86ISD::VTRUNCS: | |||
21211 | case X86ISD::VTRUNCUS: | |||
21212 | case X86ISD::CVTPS2PH: | |||
21213 | // We can't use ISD::VSELECT here because it is not always "Legal" | |||
21214 | // for the destination type. For example vpmovqb require only AVX512 | |||
21215 | // and vselect that can operate on byte element type require BWI | |||
21216 | OpcodeSelect = X86ISD::SELECT; | |||
21217 | break; | |||
21218 | } | |||
21219 | if (PreservedSrc.isUndef()) | |||
21220 | PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl); | |||
21221 | return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc); | |||
21222 | } | |||
21223 | ||||
21224 | /// Creates an SDNode for a predicated scalar operation. | |||
21225 | /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc). | |||
21226 | /// The mask is coming as MVT::i8 and it should be transformed | |||
21227 | /// to MVT::v1i1 while lowering masking intrinsics. | |||
21228 | /// The main difference between ScalarMaskingNode and VectorMaskingNode is using | |||
21229 | /// "X86select" instead of "vselect". We just can't create the "vselect" node | |||
21230 | /// for a scalar instruction. | |||
21231 | static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask, | |||
21232 | SDValue PreservedSrc, | |||
21233 | const X86Subtarget &Subtarget, | |||
21234 | SelectionDAG &DAG) { | |||
21235 | ||||
21236 | if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask)) | |||
21237 | if (MaskConst->getZExtValue() & 0x1) | |||
21238 | return Op; | |||
21239 | ||||
21240 | MVT VT = Op.getSimpleValueType(); | |||
21241 | SDLoc dl(Op); | |||
21242 | ||||
21243 | assert(Mask.getValueType() == MVT::i8 && "Unexpect type")((Mask.getValueType() == MVT::i8 && "Unexpect type") ? static_cast<void> (0) : __assert_fail ("Mask.getValueType() == MVT::i8 && \"Unexpect type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21243, __PRETTY_FUNCTION__)); | |||
21244 | SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1, | |||
21245 | DAG.getBitcast(MVT::v8i1, Mask), | |||
21246 | DAG.getIntPtrConstant(0, dl)); | |||
21247 | if (Op.getOpcode() == X86ISD::FSETCCM || | |||
21248 | Op.getOpcode() == X86ISD::FSETCCM_RND || | |||
21249 | Op.getOpcode() == X86ISD::VFPCLASSS) | |||
21250 | return DAG.getNode(ISD::AND, dl, VT, Op, IMask); | |||
21251 | ||||
21252 | if (PreservedSrc.isUndef()) | |||
21253 | PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl); | |||
21254 | return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc); | |||
21255 | } | |||
21256 | ||||
21257 | static int getSEHRegistrationNodeSize(const Function *Fn) { | |||
21258 | if (!Fn->hasPersonalityFn()) | |||
21259 | report_fatal_error( | |||
21260 | "querying registration node size for function without personality"); | |||
21261 | // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See | |||
21262 | // WinEHStatePass for the full struct definition. | |||
21263 | switch (classifyEHPersonality(Fn->getPersonalityFn())) { | |||
21264 | case EHPersonality::MSVC_X86SEH: return 24; | |||
21265 | case EHPersonality::MSVC_CXX: return 16; | |||
21266 | default: break; | |||
21267 | } | |||
21268 | report_fatal_error( | |||
21269 | "can only recover FP for 32-bit MSVC EH personality functions"); | |||
21270 | } | |||
21271 | ||||
21272 | /// When the MSVC runtime transfers control to us, either to an outlined | |||
21273 | /// function or when returning to a parent frame after catching an exception, we | |||
21274 | /// recover the parent frame pointer by doing arithmetic on the incoming EBP. | |||
21275 | /// Here's the math: | |||
21276 | /// RegNodeBase = EntryEBP - RegNodeSize | |||
21277 | /// ParentFP = RegNodeBase - ParentFrameOffset | |||
21278 | /// Subtracting RegNodeSize takes us to the offset of the registration node, and | |||
21279 | /// subtracting the offset (negative on x86) takes us back to the parent FP. | |||
21280 | static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn, | |||
21281 | SDValue EntryEBP) { | |||
21282 | MachineFunction &MF = DAG.getMachineFunction(); | |||
21283 | SDLoc dl; | |||
21284 | ||||
21285 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
21286 | MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); | |||
21287 | ||||
21288 | // It's possible that the parent function no longer has a personality function | |||
21289 | // if the exceptional code was optimized away, in which case we just return | |||
21290 | // the incoming EBP. | |||
21291 | if (!Fn->hasPersonalityFn()) | |||
21292 | return EntryEBP; | |||
21293 | ||||
21294 | // Get an MCSymbol that will ultimately resolve to the frame offset of the EH | |||
21295 | // registration, or the .set_setframe offset. | |||
21296 | MCSymbol *OffsetSym = | |||
21297 | MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol( | |||
21298 | GlobalValue::dropLLVMManglingEscape(Fn->getName())); | |||
21299 | SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT); | |||
21300 | SDValue ParentFrameOffset = | |||
21301 | DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal); | |||
21302 | ||||
21303 | // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after | |||
21304 | // prologue to RBP in the parent function. | |||
21305 | const X86Subtarget &Subtarget = | |||
21306 | static_cast<const X86Subtarget &>(DAG.getSubtarget()); | |||
21307 | if (Subtarget.is64Bit()) | |||
21308 | return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset); | |||
21309 | ||||
21310 | int RegNodeSize = getSEHRegistrationNodeSize(Fn); | |||
21311 | // RegNodeBase = EntryEBP - RegNodeSize | |||
21312 | // ParentFP = RegNodeBase - ParentFrameOffset | |||
21313 | SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP, | |||
21314 | DAG.getConstant(RegNodeSize, dl, PtrVT)); | |||
21315 | return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset); | |||
21316 | } | |||
21317 | ||||
21318 | SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, | |||
21319 | SelectionDAG &DAG) const { | |||
21320 | // Helper to detect if the operand is CUR_DIRECTION rounding mode. | |||
21321 | auto isRoundModeCurDirection = [](SDValue Rnd) { | |||
21322 | if (!isa<ConstantSDNode>(Rnd)) | |||
21323 | return false; | |||
21324 | ||||
21325 | unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue(); | |||
21326 | return Round == X86::STATIC_ROUNDING::CUR_DIRECTION; | |||
21327 | }; | |||
21328 | ||||
21329 | SDLoc dl(Op); | |||
21330 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
21331 | MVT VT = Op.getSimpleValueType(); | |||
21332 | const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo); | |||
21333 | if (IntrData) { | |||
21334 | switch(IntrData->Type) { | |||
21335 | case INTR_TYPE_1OP: { | |||
21336 | // We specify 2 possible opcodes for intrinsics with rounding modes. | |||
21337 | // First, we check if the intrinsic may have non-default rounding mode, | |||
21338 | // (IntrData->Opc1 != 0), then we check the rounding mode operand. | |||
21339 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21340 | if (IntrWithRoundingModeOpcode != 0) { | |||
21341 | SDValue Rnd = Op.getOperand(2); | |||
21342 | if (!isRoundModeCurDirection(Rnd)) { | |||
21343 | return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(), | |||
21344 | Op.getOperand(1), Rnd); | |||
21345 | } | |||
21346 | } | |||
21347 | return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1)); | |||
21348 | } | |||
21349 | case INTR_TYPE_2OP: | |||
21350 | case INTR_TYPE_2OP_IMM8: { | |||
21351 | SDValue Src2 = Op.getOperand(2); | |||
21352 | ||||
21353 | if (IntrData->Type == INTR_TYPE_2OP_IMM8) | |||
21354 | Src2 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src2); | |||
21355 | ||||
21356 | // We specify 2 possible opcodes for intrinsics with rounding modes. | |||
21357 | // First, we check if the intrinsic may have non-default rounding mode, | |||
21358 | // (IntrData->Opc1 != 0), then we check the rounding mode operand. | |||
21359 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21360 | if (IntrWithRoundingModeOpcode != 0) { | |||
21361 | SDValue Rnd = Op.getOperand(3); | |||
21362 | if (!isRoundModeCurDirection(Rnd)) { | |||
21363 | return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(), | |||
21364 | Op.getOperand(1), Src2, Rnd); | |||
21365 | } | |||
21366 | } | |||
21367 | ||||
21368 | return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), | |||
21369 | Op.getOperand(1), Src2); | |||
21370 | } | |||
21371 | case INTR_TYPE_3OP: | |||
21372 | case INTR_TYPE_3OP_IMM8: { | |||
21373 | SDValue Src1 = Op.getOperand(1); | |||
21374 | SDValue Src2 = Op.getOperand(2); | |||
21375 | SDValue Src3 = Op.getOperand(3); | |||
21376 | ||||
21377 | if (IntrData->Type == INTR_TYPE_3OP_IMM8) | |||
21378 | Src3 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Src3); | |||
21379 | ||||
21380 | // We specify 2 possible opcodes for intrinsics with rounding modes. | |||
21381 | // First, we check if the intrinsic may have non-default rounding mode, | |||
21382 | // (IntrData->Opc1 != 0), then we check the rounding mode operand. | |||
21383 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21384 | if (IntrWithRoundingModeOpcode != 0) { | |||
21385 | SDValue Rnd = Op.getOperand(4); | |||
21386 | if (!isRoundModeCurDirection(Rnd)) { | |||
21387 | return DAG.getNode(IntrWithRoundingModeOpcode, | |||
21388 | dl, Op.getValueType(), | |||
21389 | Src1, Src2, Src3, Rnd); | |||
21390 | } | |||
21391 | } | |||
21392 | ||||
21393 | return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), | |||
21394 | Src1, Src2, Src3); | |||
21395 | } | |||
21396 | case INTR_TYPE_4OP: | |||
21397 | return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1), | |||
21398 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(4)); | |||
21399 | case INTR_TYPE_1OP_MASK_RM: { | |||
21400 | SDValue Src = Op.getOperand(1); | |||
21401 | SDValue PassThru = Op.getOperand(2); | |||
21402 | SDValue Mask = Op.getOperand(3); | |||
21403 | SDValue RoundingMode; | |||
21404 | // We always add rounding mode to the Node. | |||
21405 | // If the rounding mode is not specified, we add the | |||
21406 | // "current direction" mode. | |||
21407 | if (Op.getNumOperands() == 4) | |||
21408 | RoundingMode = | |||
21409 | DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32); | |||
21410 | else | |||
21411 | RoundingMode = Op.getOperand(4); | |||
21412 | assert(IntrData->Opc1 == 0 && "Unexpected second opcode!")((IntrData->Opc1 == 0 && "Unexpected second opcode!" ) ? static_cast<void> (0) : __assert_fail ("IntrData->Opc1 == 0 && \"Unexpected second opcode!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21412, __PRETTY_FUNCTION__)); | |||
21413 | return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src, | |||
21414 | RoundingMode), | |||
21415 | Mask, PassThru, Subtarget, DAG); | |||
21416 | } | |||
21417 | case INTR_TYPE_1OP_MASK: { | |||
21418 | SDValue Src = Op.getOperand(1); | |||
21419 | SDValue PassThru = Op.getOperand(2); | |||
21420 | SDValue Mask = Op.getOperand(3); | |||
21421 | // We add rounding mode to the Node when | |||
21422 | // - RM Opcode is specified and | |||
21423 | // - RM is not "current direction". | |||
21424 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21425 | if (IntrWithRoundingModeOpcode != 0) { | |||
21426 | SDValue Rnd = Op.getOperand(4); | |||
21427 | if (!isRoundModeCurDirection(Rnd)) { | |||
21428 | return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode, | |||
21429 | dl, Op.getValueType(), | |||
21430 | Src, Rnd), | |||
21431 | Mask, PassThru, Subtarget, DAG); | |||
21432 | } | |||
21433 | } | |||
21434 | return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src), | |||
21435 | Mask, PassThru, Subtarget, DAG); | |||
21436 | } | |||
21437 | case INTR_TYPE_SCALAR_MASK: { | |||
21438 | SDValue Src1 = Op.getOperand(1); | |||
21439 | SDValue Src2 = Op.getOperand(2); | |||
21440 | SDValue passThru = Op.getOperand(3); | |||
21441 | SDValue Mask = Op.getOperand(4); | |||
21442 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21443 | // There are 2 kinds of intrinsics in this group: | |||
21444 | // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands | |||
21445 | // (2) With rounding mode and sae - 7 operands. | |||
21446 | bool HasRounding = IntrWithRoundingModeOpcode != 0; | |||
21447 | if (Op.getNumOperands() == (5U + HasRounding)) { | |||
21448 | if (HasRounding) { | |||
21449 | SDValue Rnd = Op.getOperand(5); | |||
21450 | if (!isRoundModeCurDirection(Rnd)) | |||
21451 | return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode, | |||
21452 | dl, VT, Src1, Src2, Rnd), | |||
21453 | Mask, passThru, Subtarget, DAG); | |||
21454 | } | |||
21455 | return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, | |||
21456 | Src2), | |||
21457 | Mask, passThru, Subtarget, DAG); | |||
21458 | } | |||
21459 | ||||
21460 | assert(Op.getNumOperands() == (6U + HasRounding) &&((Op.getNumOperands() == (6U + HasRounding) && "Unexpected intrinsic form" ) ? static_cast<void> (0) : __assert_fail ("Op.getNumOperands() == (6U + HasRounding) && \"Unexpected intrinsic form\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21461, __PRETTY_FUNCTION__)) | |||
21461 | "Unexpected intrinsic form")((Op.getNumOperands() == (6U + HasRounding) && "Unexpected intrinsic form" ) ? static_cast<void> (0) : __assert_fail ("Op.getNumOperands() == (6U + HasRounding) && \"Unexpected intrinsic form\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21461, __PRETTY_FUNCTION__)); | |||
21462 | SDValue RoundingMode = Op.getOperand(5); | |||
21463 | if (HasRounding) { | |||
21464 | SDValue Sae = Op.getOperand(6); | |||
21465 | if (!isRoundModeCurDirection(Sae)) | |||
21466 | return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode, | |||
21467 | dl, VT, Src1, Src2, | |||
21468 | RoundingMode, Sae), | |||
21469 | Mask, passThru, Subtarget, DAG); | |||
21470 | } | |||
21471 | return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, | |||
21472 | Src2, RoundingMode), | |||
21473 | Mask, passThru, Subtarget, DAG); | |||
21474 | } | |||
21475 | case INTR_TYPE_SCALAR_MASK_RM: { | |||
21476 | SDValue Src1 = Op.getOperand(1); | |||
21477 | SDValue Src2 = Op.getOperand(2); | |||
21478 | SDValue Src0 = Op.getOperand(3); | |||
21479 | SDValue Mask = Op.getOperand(4); | |||
21480 | // There are 2 kinds of intrinsics in this group: | |||
21481 | // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands | |||
21482 | // (2) With rounding mode and sae - 7 operands. | |||
21483 | if (Op.getNumOperands() == 6) { | |||
21484 | SDValue Sae = Op.getOperand(5); | |||
21485 | return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, | |||
21486 | Sae), | |||
21487 | Mask, Src0, Subtarget, DAG); | |||
21488 | } | |||
21489 | assert(Op.getNumOperands() == 7 && "Unexpected intrinsic form")((Op.getNumOperands() == 7 && "Unexpected intrinsic form" ) ? static_cast<void> (0) : __assert_fail ("Op.getNumOperands() == 7 && \"Unexpected intrinsic form\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21489, __PRETTY_FUNCTION__)); | |||
21490 | SDValue RoundingMode = Op.getOperand(5); | |||
21491 | SDValue Sae = Op.getOperand(6); | |||
21492 | return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, | |||
21493 | RoundingMode, Sae), | |||
21494 | Mask, Src0, Subtarget, DAG); | |||
21495 | } | |||
21496 | case INTR_TYPE_2OP_MASK: { | |||
21497 | SDValue Src1 = Op.getOperand(1); | |||
21498 | SDValue Src2 = Op.getOperand(2); | |||
21499 | SDValue PassThru = Op.getOperand(3); | |||
21500 | SDValue Mask = Op.getOperand(4); | |||
21501 | ||||
21502 | // We specify 2 possible opcodes for intrinsics with rounding modes. | |||
21503 | // First, we check if the intrinsic may have non-default rounding mode, | |||
21504 | // (IntrData->Opc1 != 0), then we check the rounding mode operand. | |||
21505 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21506 | if (IntrWithRoundingModeOpcode != 0) { | |||
21507 | SDValue Rnd = Op.getOperand(5); | |||
21508 | if (!isRoundModeCurDirection(Rnd)) { | |||
21509 | return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode, | |||
21510 | dl, Op.getValueType(), | |||
21511 | Src1, Src2, Rnd), | |||
21512 | Mask, PassThru, Subtarget, DAG); | |||
21513 | } | |||
21514 | } | |||
21515 | // TODO: Intrinsics should have fast-math-flags to propagate. | |||
21516 | return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,Src1,Src2), | |||
21517 | Mask, PassThru, Subtarget, DAG); | |||
21518 | } | |||
21519 | case INTR_TYPE_2OP_MASK_RM: { | |||
21520 | SDValue Src1 = Op.getOperand(1); | |||
21521 | SDValue Src2 = Op.getOperand(2); | |||
21522 | SDValue PassThru = Op.getOperand(3); | |||
21523 | SDValue Mask = Op.getOperand(4); | |||
21524 | // We specify 2 possible modes for intrinsics, with/without rounding | |||
21525 | // modes. | |||
21526 | // First, we check if the intrinsic have rounding mode (6 operands), | |||
21527 | // if not, we set rounding mode to "current". | |||
21528 | SDValue Rnd; | |||
21529 | if (Op.getNumOperands() == 6) | |||
21530 | Rnd = Op.getOperand(5); | |||
21531 | else | |||
21532 | Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32); | |||
21533 | return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, | |||
21534 | Src1, Src2, Rnd), | |||
21535 | Mask, PassThru, Subtarget, DAG); | |||
21536 | } | |||
21537 | case INTR_TYPE_3OP_SCALAR_MASK: { | |||
21538 | SDValue Src1 = Op.getOperand(1); | |||
21539 | SDValue Src2 = Op.getOperand(2); | |||
21540 | SDValue Src3 = Op.getOperand(3); | |||
21541 | SDValue PassThru = Op.getOperand(4); | |||
21542 | SDValue Mask = Op.getOperand(5); | |||
21543 | ||||
21544 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21545 | if (IntrWithRoundingModeOpcode != 0) { | |||
21546 | SDValue Rnd = Op.getOperand(6); | |||
21547 | if (!isRoundModeCurDirection(Rnd)) | |||
21548 | return getScalarMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode, | |||
21549 | dl, VT, Src1, Src2, Src3, Rnd), | |||
21550 | Mask, PassThru, Subtarget, DAG); | |||
21551 | } | |||
21552 | return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, | |||
21553 | Src2, Src3), | |||
21554 | Mask, PassThru, Subtarget, DAG); | |||
21555 | } | |||
21556 | case INTR_TYPE_3OP_MASK: { | |||
21557 | SDValue Src1 = Op.getOperand(1); | |||
21558 | SDValue Src2 = Op.getOperand(2); | |||
21559 | SDValue Src3 = Op.getOperand(3); | |||
21560 | SDValue PassThru = Op.getOperand(4); | |||
21561 | SDValue Mask = Op.getOperand(5); | |||
21562 | ||||
21563 | // We specify 2 possible opcodes for intrinsics with rounding modes. | |||
21564 | // First, we check if the intrinsic may have non-default rounding mode, | |||
21565 | // (IntrData->Opc1 != 0), then we check the rounding mode operand. | |||
21566 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21567 | if (IntrWithRoundingModeOpcode != 0) { | |||
21568 | SDValue Rnd = Op.getOperand(6); | |||
21569 | if (!isRoundModeCurDirection(Rnd)) { | |||
21570 | return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode, | |||
21571 | dl, Op.getValueType(), | |||
21572 | Src1, Src2, Src3, Rnd), | |||
21573 | Mask, PassThru, Subtarget, DAG); | |||
21574 | } | |||
21575 | } | |||
21576 | return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, | |||
21577 | Src1, Src2, Src3), | |||
21578 | Mask, PassThru, Subtarget, DAG); | |||
21579 | } | |||
21580 | case VPERM_2OP : { | |||
21581 | SDValue Src1 = Op.getOperand(1); | |||
21582 | SDValue Src2 = Op.getOperand(2); | |||
21583 | ||||
21584 | // Swap Src1 and Src2 in the node creation | |||
21585 | return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1); | |||
21586 | } | |||
21587 | case FMA_OP_MASKZ: | |||
21588 | case FMA_OP_MASK: { | |||
21589 | SDValue Src1 = Op.getOperand(1); | |||
21590 | SDValue Src2 = Op.getOperand(2); | |||
21591 | SDValue Src3 = Op.getOperand(3); | |||
21592 | SDValue Mask = Op.getOperand(4); | |||
21593 | MVT VT = Op.getSimpleValueType(); | |||
21594 | SDValue PassThru = SDValue(); | |||
21595 | ||||
21596 | // set PassThru element | |||
21597 | if (IntrData->Type == FMA_OP_MASKZ) | |||
21598 | PassThru = getZeroVector(VT, Subtarget, DAG, dl); | |||
21599 | else | |||
21600 | PassThru = Src1; | |||
21601 | ||||
21602 | // We specify 2 possible opcodes for intrinsics with rounding modes. | |||
21603 | // First, we check if the intrinsic may have non-default rounding mode, | |||
21604 | // (IntrData->Opc1 != 0), then we check the rounding mode operand. | |||
21605 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21606 | if (IntrWithRoundingModeOpcode != 0) { | |||
21607 | SDValue Rnd = Op.getOperand(5); | |||
21608 | if (!isRoundModeCurDirection(Rnd)) | |||
21609 | return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode, | |||
21610 | dl, Op.getValueType(), | |||
21611 | Src1, Src2, Src3, Rnd), | |||
21612 | Mask, PassThru, Subtarget, DAG); | |||
21613 | } | |||
21614 | return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, | |||
21615 | dl, Op.getValueType(), | |||
21616 | Src1, Src2, Src3), | |||
21617 | Mask, PassThru, Subtarget, DAG); | |||
21618 | } | |||
21619 | case IFMA_OP: | |||
21620 | // NOTE: We need to swizzle the operands to pass the multiply operands | |||
21621 | // first. | |||
21622 | return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), | |||
21623 | Op.getOperand(2), Op.getOperand(3), Op.getOperand(1)); | |||
21624 | case CVTPD2PS: | |||
21625 | // ISD::FP_ROUND has a second argument that indicates if the truncation | |||
21626 | // does not change the value. Set it to 0 since it can change. | |||
21627 | return DAG.getNode(IntrData->Opc0, dl, VT, Op.getOperand(1), | |||
21628 | DAG.getIntPtrConstant(0, dl)); | |||
21629 | case CVTPD2PS_MASK: { | |||
21630 | SDValue Src = Op.getOperand(1); | |||
21631 | SDValue PassThru = Op.getOperand(2); | |||
21632 | SDValue Mask = Op.getOperand(3); | |||
21633 | // We add rounding mode to the Node when | |||
21634 | // - RM Opcode is specified and | |||
21635 | // - RM is not "current direction". | |||
21636 | unsigned IntrWithRoundingModeOpcode = IntrData->Opc1; | |||
21637 | if (IntrWithRoundingModeOpcode != 0) { | |||
21638 | SDValue Rnd = Op.getOperand(4); | |||
21639 | if (!isRoundModeCurDirection(Rnd)) { | |||
21640 | return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode, | |||
21641 | dl, Op.getValueType(), | |||
21642 | Src, Rnd), | |||
21643 | Mask, PassThru, Subtarget, DAG); | |||
21644 | } | |||
21645 | } | |||
21646 | assert(IntrData->Opc0 == ISD::FP_ROUND && "Unexpected opcode!")((IntrData->Opc0 == ISD::FP_ROUND && "Unexpected opcode!" ) ? static_cast<void> (0) : __assert_fail ("IntrData->Opc0 == ISD::FP_ROUND && \"Unexpected opcode!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21646, __PRETTY_FUNCTION__)); | |||
21647 | // ISD::FP_ROUND has a second argument that indicates if the truncation | |||
21648 | // does not change the value. Set it to 0 since it can change. | |||
21649 | return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src, | |||
21650 | DAG.getIntPtrConstant(0, dl)), | |||
21651 | Mask, PassThru, Subtarget, DAG); | |||
21652 | } | |||
21653 | case FPCLASS: { | |||
21654 | // FPclass intrinsics | |||
21655 | SDValue Src1 = Op.getOperand(1); | |||
21656 | MVT MaskVT = Op.getSimpleValueType(); | |||
21657 | SDValue Imm = Op.getOperand(2); | |||
21658 | return DAG.getNode(IntrData->Opc0, dl, MaskVT, Src1, Imm); | |||
21659 | } | |||
21660 | case FPCLASSS: { | |||
21661 | SDValue Src1 = Op.getOperand(1); | |||
21662 | SDValue Imm = Op.getOperand(2); | |||
21663 | SDValue Mask = Op.getOperand(3); | |||
21664 | SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm); | |||
21665 | SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(), | |||
21666 | Subtarget, DAG); | |||
21667 | // Need to fill with zeros to ensure the bitcast will produce zeroes | |||
21668 | // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that. | |||
21669 | SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1, | |||
21670 | DAG.getConstant(0, dl, MVT::v8i1), | |||
21671 | FPclassMask, DAG.getIntPtrConstant(0, dl)); | |||
21672 | return DAG.getBitcast(MVT::i8, Ins); | |||
21673 | } | |||
21674 | case CMP_MASK: { | |||
21675 | // Comparison intrinsics with masks. | |||
21676 | // Example of transformation: | |||
21677 | // (i8 (int_x86_avx512_mask_pcmpeq_q_128 | |||
21678 | // (v2i64 %a), (v2i64 %b), (i8 %mask))) -> | |||
21679 | // (i8 (bitcast | |||
21680 | // (v8i1 (insert_subvector zero, | |||
21681 | // (v2i1 (and (PCMPEQM %a, %b), | |||
21682 | // (extract_subvector | |||
21683 | // (v8i1 (bitcast %mask)), 0))), 0)))) | |||
21684 | MVT VT = Op.getOperand(1).getSimpleValueType(); | |||
21685 | MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); | |||
21686 | SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3); | |||
21687 | MVT BitcastVT = MVT::getVectorVT(MVT::i1, | |||
21688 | Mask.getSimpleValueType().getSizeInBits()); | |||
21689 | SDValue Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1), | |||
21690 | Op.getOperand(2)); | |||
21691 | SDValue CmpMask = getVectorMaskingNode(Cmp, Mask, SDValue(), | |||
21692 | Subtarget, DAG); | |||
21693 | // Need to fill with zeros to ensure the bitcast will produce zeroes | |||
21694 | // for the upper bits in the v2i1/v4i1 case. | |||
21695 | SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT, | |||
21696 | DAG.getConstant(0, dl, BitcastVT), | |||
21697 | CmpMask, DAG.getIntPtrConstant(0, dl)); | |||
21698 | return DAG.getBitcast(Op.getValueType(), Res); | |||
21699 | } | |||
21700 | ||||
21701 | case CMP_MASK_CC: { | |||
21702 | MVT MaskVT = Op.getSimpleValueType(); | |||
21703 | SDValue Cmp; | |||
21704 | SDValue CC = Op.getOperand(3); | |||
21705 | CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, CC); | |||
21706 | // We specify 2 possible opcodes for intrinsics with rounding modes. | |||
21707 | // First, we check if the intrinsic may have non-default rounding mode, | |||
21708 | // (IntrData->Opc1 != 0), then we check the rounding mode operand. | |||
21709 | if (IntrData->Opc1 != 0) { | |||
21710 | SDValue Rnd = Op.getOperand(4); | |||
21711 | if (!isRoundModeCurDirection(Rnd)) | |||
21712 | Cmp = DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1), | |||
21713 | Op.getOperand(2), CC, Rnd); | |||
21714 | } | |||
21715 | //default rounding mode | |||
21716 | if (!Cmp.getNode()) | |||
21717 | Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1), | |||
21718 | Op.getOperand(2), CC); | |||
21719 | ||||
21720 | return Cmp; | |||
21721 | } | |||
21722 | case CMP_MASK_SCALAR_CC: { | |||
21723 | SDValue Src1 = Op.getOperand(1); | |||
21724 | SDValue Src2 = Op.getOperand(2); | |||
21725 | SDValue CC = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op.getOperand(3)); | |||
21726 | SDValue Mask = Op.getOperand(4); | |||
21727 | ||||
21728 | SDValue Cmp; | |||
21729 | if (IntrData->Opc1 != 0) { | |||
21730 | SDValue Rnd = Op.getOperand(5); | |||
21731 | if (!isRoundModeCurDirection(Rnd)) | |||
21732 | Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Rnd); | |||
21733 | } | |||
21734 | //default rounding mode | |||
21735 | if(!Cmp.getNode()) | |||
21736 | Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC); | |||
21737 | ||||
21738 | SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(), | |||
21739 | Subtarget, DAG); | |||
21740 | // Need to fill with zeros to ensure the bitcast will produce zeroes | |||
21741 | // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that. | |||
21742 | SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1, | |||
21743 | DAG.getConstant(0, dl, MVT::v8i1), | |||
21744 | CmpMask, DAG.getIntPtrConstant(0, dl)); | |||
21745 | return DAG.getBitcast(MVT::i8, Ins); | |||
21746 | } | |||
21747 | case COMI: { // Comparison intrinsics | |||
21748 | ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1; | |||
21749 | SDValue LHS = Op.getOperand(1); | |||
21750 | SDValue RHS = Op.getOperand(2); | |||
21751 | SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS); | |||
21752 | SDValue InvComi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, RHS, LHS); | |||
21753 | SDValue SetCC; | |||
21754 | switch (CC) { | |||
21755 | case ISD::SETEQ: { // (ZF = 0 and PF = 0) | |||
21756 | SetCC = getSETCC(X86::COND_E, Comi, dl, DAG); | |||
21757 | SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG); | |||
21758 | SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP); | |||
21759 | break; | |||
21760 | } | |||
21761 | case ISD::SETNE: { // (ZF = 1 or PF = 1) | |||
21762 | SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG); | |||
21763 | SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG); | |||
21764 | SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP); | |||
21765 | break; | |||
21766 | } | |||
21767 | case ISD::SETGT: // (CF = 0 and ZF = 0) | |||
21768 | SetCC = getSETCC(X86::COND_A, Comi, dl, DAG); | |||
21769 | break; | |||
21770 | case ISD::SETLT: { // The condition is opposite to GT. Swap the operands. | |||
21771 | SetCC = getSETCC(X86::COND_A, InvComi, dl, DAG); | |||
21772 | break; | |||
21773 | } | |||
21774 | case ISD::SETGE: // CF = 0 | |||
21775 | SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG); | |||
21776 | break; | |||
21777 | case ISD::SETLE: // The condition is opposite to GE. Swap the operands. | |||
21778 | SetCC = getSETCC(X86::COND_AE, InvComi, dl, DAG); | |||
21779 | break; | |||
21780 | default: | |||
21781 | llvm_unreachable("Unexpected illegal condition!")::llvm::llvm_unreachable_internal("Unexpected illegal condition!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21781); | |||
21782 | } | |||
21783 | return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); | |||
21784 | } | |||
21785 | case COMI_RM: { // Comparison intrinsics with Sae | |||
21786 | SDValue LHS = Op.getOperand(1); | |||
21787 | SDValue RHS = Op.getOperand(2); | |||
21788 | unsigned CondVal = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); | |||
21789 | SDValue Sae = Op.getOperand(4); | |||
21790 | ||||
21791 | SDValue FCmp; | |||
21792 | if (isRoundModeCurDirection(Sae)) | |||
21793 | FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS, | |||
21794 | DAG.getConstant(CondVal, dl, MVT::i8)); | |||
21795 | else | |||
21796 | FCmp = DAG.getNode(X86ISD::FSETCCM_RND, dl, MVT::v1i1, LHS, RHS, | |||
21797 | DAG.getConstant(CondVal, dl, MVT::i8), Sae); | |||
21798 | // Need to fill with zeros to ensure the bitcast will produce zeroes | |||
21799 | // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that. | |||
21800 | SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1, | |||
21801 | DAG.getConstant(0, dl, MVT::v16i1), | |||
21802 | FCmp, DAG.getIntPtrConstant(0, dl)); | |||
21803 | return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, | |||
21804 | DAG.getBitcast(MVT::i16, Ins)); | |||
21805 | } | |||
21806 | case VSHIFT: | |||
21807 | return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(), | |||
21808 | Op.getOperand(1), Op.getOperand(2), Subtarget, | |||
21809 | DAG); | |||
21810 | case COMPRESS_EXPAND_IN_REG: { | |||
21811 | SDValue Mask = Op.getOperand(3); | |||
21812 | SDValue DataToCompress = Op.getOperand(1); | |||
21813 | SDValue PassThru = Op.getOperand(2); | |||
21814 | if (isAllOnesConstant(Mask)) // return data as is | |||
21815 | return Op.getOperand(1); | |||
21816 | ||||
21817 | return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, | |||
21818 | DataToCompress), | |||
21819 | Mask, PassThru, Subtarget, DAG); | |||
21820 | } | |||
21821 | case FIXUPIMMS: | |||
21822 | case FIXUPIMMS_MASKZ: | |||
21823 | case FIXUPIMM: | |||
21824 | case FIXUPIMM_MASKZ:{ | |||
21825 | SDValue Src1 = Op.getOperand(1); | |||
21826 | SDValue Src2 = Op.getOperand(2); | |||
21827 | SDValue Src3 = Op.getOperand(3); | |||
21828 | SDValue Imm = Op.getOperand(4); | |||
21829 | SDValue Mask = Op.getOperand(5); | |||
21830 | SDValue Passthru = (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMMS ) ? | |||
21831 | Src1 : getZeroVector(VT, Subtarget, DAG, dl); | |||
21832 | // We specify 2 possible modes for intrinsics, with/without rounding | |||
21833 | // modes. | |||
21834 | // First, we check if the intrinsic have rounding mode (7 operands), | |||
21835 | // if not, we set rounding mode to "current". | |||
21836 | SDValue Rnd; | |||
21837 | if (Op.getNumOperands() == 7) | |||
21838 | Rnd = Op.getOperand(6); | |||
21839 | else | |||
21840 | Rnd = DAG.getConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, dl, MVT::i32); | |||
21841 | if (IntrData->Type == FIXUPIMM || IntrData->Type == FIXUPIMM_MASKZ) | |||
21842 | return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, | |||
21843 | Src1, Src2, Src3, Imm, Rnd), | |||
21844 | Mask, Passthru, Subtarget, DAG); | |||
21845 | else // Scalar - FIXUPIMMS, FIXUPIMMS_MASKZ | |||
21846 | return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, | |||
21847 | Src1, Src2, Src3, Imm, Rnd), | |||
21848 | Mask, Passthru, Subtarget, DAG); | |||
21849 | } | |||
21850 | case ROUNDP: { | |||
21851 | assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode")((IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode" ) ? static_cast<void> (0) : __assert_fail ("IntrData->Opc0 == X86ISD::VRNDSCALE && \"Unexpected opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21851, __PRETTY_FUNCTION__)); | |||
21852 | // Clear the upper bits of the rounding immediate so that the legacy | |||
21853 | // intrinsic can't trigger the scaling behavior of VRNDSCALE. | |||
21854 | SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32, | |||
21855 | Op.getOperand(2), | |||
21856 | DAG.getConstant(0xf, dl, MVT::i32)); | |||
21857 | return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), | |||
21858 | Op.getOperand(1), RoundingMode); | |||
21859 | } | |||
21860 | case ROUNDS: { | |||
21861 | assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode")((IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode" ) ? static_cast<void> (0) : __assert_fail ("IntrData->Opc0 == X86ISD::VRNDSCALES && \"Unexpected opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21861, __PRETTY_FUNCTION__)); | |||
21862 | // Clear the upper bits of the rounding immediate so that the legacy | |||
21863 | // intrinsic can't trigger the scaling behavior of VRNDSCALE. | |||
21864 | SDValue RoundingMode = DAG.getNode(ISD::AND, dl, MVT::i32, | |||
21865 | Op.getOperand(3), | |||
21866 | DAG.getConstant(0xf, dl, MVT::i32)); | |||
21867 | return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), | |||
21868 | Op.getOperand(1), Op.getOperand(2), RoundingMode); | |||
21869 | } | |||
21870 | // ADC/ADCX/SBB | |||
21871 | case ADX: { | |||
21872 | SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32); | |||
21873 | SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32); | |||
21874 | ||||
21875 | SDValue Res; | |||
21876 | // If the carry in is zero, then we should just use ADD/SUB instead of | |||
21877 | // ADC/SBB. | |||
21878 | if (isNullConstant(Op.getOperand(1))) { | |||
21879 | Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2), | |||
21880 | Op.getOperand(3)); | |||
21881 | } else { | |||
21882 | SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1), | |||
21883 | DAG.getConstant(-1, dl, MVT::i8)); | |||
21884 | Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2), | |||
21885 | Op.getOperand(3), GenCF.getValue(1)); | |||
21886 | } | |||
21887 | SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG); | |||
21888 | SDValue Results[] = { SetCC, Res }; | |||
21889 | return DAG.getMergeValues(Results, dl); | |||
21890 | } | |||
21891 | default: | |||
21892 | break; | |||
21893 | } | |||
21894 | } | |||
21895 | ||||
21896 | switch (IntNo) { | |||
21897 | default: return SDValue(); // Don't custom lower most intrinsics. | |||
21898 | ||||
21899 | // ptest and testp intrinsics. The intrinsic these come from are designed to | |||
21900 | // return an integer value, not just an instruction so lower it to the ptest | |||
21901 | // or testp pattern and a setcc for the result. | |||
21902 | case Intrinsic::x86_avx512_ktestc_b: | |||
21903 | case Intrinsic::x86_avx512_ktestc_w: | |||
21904 | case Intrinsic::x86_avx512_ktestc_d: | |||
21905 | case Intrinsic::x86_avx512_ktestc_q: | |||
21906 | case Intrinsic::x86_avx512_ktestz_b: | |||
21907 | case Intrinsic::x86_avx512_ktestz_w: | |||
21908 | case Intrinsic::x86_avx512_ktestz_d: | |||
21909 | case Intrinsic::x86_avx512_ktestz_q: | |||
21910 | case Intrinsic::x86_sse41_ptestz: | |||
21911 | case Intrinsic::x86_sse41_ptestc: | |||
21912 | case Intrinsic::x86_sse41_ptestnzc: | |||
21913 | case Intrinsic::x86_avx_ptestz_256: | |||
21914 | case Intrinsic::x86_avx_ptestc_256: | |||
21915 | case Intrinsic::x86_avx_ptestnzc_256: | |||
21916 | case Intrinsic::x86_avx_vtestz_ps: | |||
21917 | case Intrinsic::x86_avx_vtestc_ps: | |||
21918 | case Intrinsic::x86_avx_vtestnzc_ps: | |||
21919 | case Intrinsic::x86_avx_vtestz_pd: | |||
21920 | case Intrinsic::x86_avx_vtestc_pd: | |||
21921 | case Intrinsic::x86_avx_vtestnzc_pd: | |||
21922 | case Intrinsic::x86_avx_vtestz_ps_256: | |||
21923 | case Intrinsic::x86_avx_vtestc_ps_256: | |||
21924 | case Intrinsic::x86_avx_vtestnzc_ps_256: | |||
21925 | case Intrinsic::x86_avx_vtestz_pd_256: | |||
21926 | case Intrinsic::x86_avx_vtestc_pd_256: | |||
21927 | case Intrinsic::x86_avx_vtestnzc_pd_256: { | |||
21928 | unsigned TestOpc = X86ISD::PTEST; | |||
21929 | X86::CondCode X86CC; | |||
21930 | switch (IntNo) { | |||
21931 | default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.")::llvm::llvm_unreachable_internal("Bad fallthrough in Intrinsic lowering." , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 21931); | |||
21932 | case Intrinsic::x86_avx512_ktestc_b: | |||
21933 | case Intrinsic::x86_avx512_ktestc_w: | |||
21934 | case Intrinsic::x86_avx512_ktestc_d: | |||
21935 | case Intrinsic::x86_avx512_ktestc_q: | |||
21936 | // CF = 1 | |||
21937 | TestOpc = X86ISD::KTEST; | |||
21938 | X86CC = X86::COND_B; | |||
21939 | break; | |||
21940 | case Intrinsic::x86_avx512_ktestz_b: | |||
21941 | case Intrinsic::x86_avx512_ktestz_w: | |||
21942 | case Intrinsic::x86_avx512_ktestz_d: | |||
21943 | case Intrinsic::x86_avx512_ktestz_q: | |||
21944 | TestOpc = X86ISD::KTEST; | |||
21945 | X86CC = X86::COND_E; | |||
21946 | break; | |||
21947 | case Intrinsic::x86_avx_vtestz_ps: | |||
21948 | case Intrinsic::x86_avx_vtestz_pd: | |||
21949 | case Intrinsic::x86_avx_vtestz_ps_256: | |||
21950 | case Intrinsic::x86_avx_vtestz_pd_256: | |||
21951 | TestOpc = X86ISD::TESTP; | |||
21952 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
21953 | case Intrinsic::x86_sse41_ptestz: | |||
21954 | case Intrinsic::x86_avx_ptestz_256: | |||
21955 | // ZF = 1 | |||
21956 | X86CC = X86::COND_E; | |||
21957 | break; | |||
21958 | case Intrinsic::x86_avx_vtestc_ps: | |||
21959 | case Intrinsic::x86_avx_vtestc_pd: | |||
21960 | case Intrinsic::x86_avx_vtestc_ps_256: | |||
21961 | case Intrinsic::x86_avx_vtestc_pd_256: | |||
21962 | TestOpc = X86ISD::TESTP; | |||
21963 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
21964 | case Intrinsic::x86_sse41_ptestc: | |||
21965 | case Intrinsic::x86_avx_ptestc_256: | |||
21966 | // CF = 1 | |||
21967 | X86CC = X86::COND_B; | |||
21968 | break; | |||
21969 | case Intrinsic::x86_avx_vtestnzc_ps: | |||
21970 | case Intrinsic::x86_avx_vtestnzc_pd: | |||
21971 | case Intrinsic::x86_avx_vtestnzc_ps_256: | |||
21972 | case Intrinsic::x86_avx_vtestnzc_pd_256: | |||
21973 | TestOpc = X86ISD::TESTP; | |||
21974 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
21975 | case Intrinsic::x86_sse41_ptestnzc: | |||
21976 | case Intrinsic::x86_avx_ptestnzc_256: | |||
21977 | // ZF and CF = 0 | |||
21978 | X86CC = X86::COND_A; | |||
21979 | break; | |||
21980 | } | |||
21981 | ||||
21982 | SDValue LHS = Op.getOperand(1); | |||
21983 | SDValue RHS = Op.getOperand(2); | |||
21984 | SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); | |||
21985 | SDValue SetCC = getSETCC(X86CC, Test, dl, DAG); | |||
21986 | return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); | |||
21987 | } | |||
21988 | ||||
21989 | case Intrinsic::x86_sse42_pcmpistria128: | |||
21990 | case Intrinsic::x86_sse42_pcmpestria128: | |||
21991 | case Intrinsic::x86_sse42_pcmpistric128: | |||
21992 | case Intrinsic::x86_sse42_pcmpestric128: | |||
21993 | case Intrinsic::x86_sse42_pcmpistrio128: | |||
21994 | case Intrinsic::x86_sse42_pcmpestrio128: | |||
21995 | case Intrinsic::x86_sse42_pcmpistris128: | |||
21996 | case Intrinsic::x86_sse42_pcmpestris128: | |||
21997 | case Intrinsic::x86_sse42_pcmpistriz128: | |||
21998 | case Intrinsic::x86_sse42_pcmpestriz128: { | |||
21999 | unsigned Opcode; | |||
22000 | X86::CondCode X86CC; | |||
22001 | switch (IntNo) { | |||
22002 | default: llvm_unreachable("Impossible intrinsic")::llvm::llvm_unreachable_internal("Impossible intrinsic", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22002); // Can't reach here. | |||
22003 | case Intrinsic::x86_sse42_pcmpistria128: | |||
22004 | Opcode = X86ISD::PCMPISTR; | |||
22005 | X86CC = X86::COND_A; | |||
22006 | break; | |||
22007 | case Intrinsic::x86_sse42_pcmpestria128: | |||
22008 | Opcode = X86ISD::PCMPESTR; | |||
22009 | X86CC = X86::COND_A; | |||
22010 | break; | |||
22011 | case Intrinsic::x86_sse42_pcmpistric128: | |||
22012 | Opcode = X86ISD::PCMPISTR; | |||
22013 | X86CC = X86::COND_B; | |||
22014 | break; | |||
22015 | case Intrinsic::x86_sse42_pcmpestric128: | |||
22016 | Opcode = X86ISD::PCMPESTR; | |||
22017 | X86CC = X86::COND_B; | |||
22018 | break; | |||
22019 | case Intrinsic::x86_sse42_pcmpistrio128: | |||
22020 | Opcode = X86ISD::PCMPISTR; | |||
22021 | X86CC = X86::COND_O; | |||
22022 | break; | |||
22023 | case Intrinsic::x86_sse42_pcmpestrio128: | |||
22024 | Opcode = X86ISD::PCMPESTR; | |||
22025 | X86CC = X86::COND_O; | |||
22026 | break; | |||
22027 | case Intrinsic::x86_sse42_pcmpistris128: | |||
22028 | Opcode = X86ISD::PCMPISTR; | |||
22029 | X86CC = X86::COND_S; | |||
22030 | break; | |||
22031 | case Intrinsic::x86_sse42_pcmpestris128: | |||
22032 | Opcode = X86ISD::PCMPESTR; | |||
22033 | X86CC = X86::COND_S; | |||
22034 | break; | |||
22035 | case Intrinsic::x86_sse42_pcmpistriz128: | |||
22036 | Opcode = X86ISD::PCMPISTR; | |||
22037 | X86CC = X86::COND_E; | |||
22038 | break; | |||
22039 | case Intrinsic::x86_sse42_pcmpestriz128: | |||
22040 | Opcode = X86ISD::PCMPESTR; | |||
22041 | X86CC = X86::COND_E; | |||
22042 | break; | |||
22043 | } | |||
22044 | SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end()); | |||
22045 | SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32); | |||
22046 | SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2); | |||
22047 | SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG); | |||
22048 | return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); | |||
22049 | } | |||
22050 | ||||
22051 | case Intrinsic::x86_sse42_pcmpistri128: | |||
22052 | case Intrinsic::x86_sse42_pcmpestri128: { | |||
22053 | unsigned Opcode; | |||
22054 | if (IntNo == Intrinsic::x86_sse42_pcmpistri128) | |||
22055 | Opcode = X86ISD::PCMPISTR; | |||
22056 | else | |||
22057 | Opcode = X86ISD::PCMPESTR; | |||
22058 | ||||
22059 | SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end()); | |||
22060 | SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32); | |||
22061 | return DAG.getNode(Opcode, dl, VTs, NewOps); | |||
22062 | } | |||
22063 | ||||
22064 | case Intrinsic::x86_sse42_pcmpistrm128: | |||
22065 | case Intrinsic::x86_sse42_pcmpestrm128: { | |||
22066 | unsigned Opcode; | |||
22067 | if (IntNo == Intrinsic::x86_sse42_pcmpistrm128) | |||
22068 | Opcode = X86ISD::PCMPISTR; | |||
22069 | else | |||
22070 | Opcode = X86ISD::PCMPESTR; | |||
22071 | ||||
22072 | SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end()); | |||
22073 | SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32); | |||
22074 | return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1); | |||
22075 | } | |||
22076 | ||||
22077 | case Intrinsic::eh_sjlj_lsda: { | |||
22078 | MachineFunction &MF = DAG.getMachineFunction(); | |||
22079 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
22080 | MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); | |||
22081 | auto &Context = MF.getMMI().getContext(); | |||
22082 | MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") + | |||
22083 | Twine(MF.getFunctionNumber())); | |||
22084 | return DAG.getNode(getGlobalWrapperKind(), dl, VT, | |||
22085 | DAG.getMCSymbol(S, PtrVT)); | |||
22086 | } | |||
22087 | ||||
22088 | case Intrinsic::x86_seh_lsda: { | |||
22089 | // Compute the symbol for the LSDA. We know it'll get emitted later. | |||
22090 | MachineFunction &MF = DAG.getMachineFunction(); | |||
22091 | SDValue Op1 = Op.getOperand(1); | |||
22092 | auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal()); | |||
22093 | MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol( | |||
22094 | GlobalValue::dropLLVMManglingEscape(Fn->getName())); | |||
22095 | ||||
22096 | // Generate a simple absolute symbol reference. This intrinsic is only | |||
22097 | // supported on 32-bit Windows, which isn't PIC. | |||
22098 | SDValue Result = DAG.getMCSymbol(LSDASym, VT); | |||
22099 | return DAG.getNode(X86ISD::Wrapper, dl, VT, Result); | |||
22100 | } | |||
22101 | ||||
22102 | case Intrinsic::x86_seh_recoverfp: { | |||
22103 | SDValue FnOp = Op.getOperand(1); | |||
22104 | SDValue IncomingFPOp = Op.getOperand(2); | |||
22105 | GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp); | |||
22106 | auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr); | |||
22107 | if (!Fn) | |||
22108 | report_fatal_error( | |||
22109 | "llvm.x86.seh.recoverfp must take a function as the first argument"); | |||
22110 | return recoverFramePointer(DAG, Fn, IncomingFPOp); | |||
22111 | } | |||
22112 | ||||
22113 | case Intrinsic::localaddress: { | |||
22114 | // Returns one of the stack, base, or frame pointer registers, depending on | |||
22115 | // which is used to reference local variables. | |||
22116 | MachineFunction &MF = DAG.getMachineFunction(); | |||
22117 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
22118 | unsigned Reg; | |||
22119 | if (RegInfo->hasBasePointer(MF)) | |||
22120 | Reg = RegInfo->getBaseRegister(); | |||
22121 | else // This function handles the SP or FP case. | |||
22122 | Reg = RegInfo->getPtrSizedFrameRegister(MF); | |||
22123 | return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT); | |||
22124 | } | |||
22125 | } | |||
22126 | } | |||
22127 | ||||
22128 | static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG, | |||
22129 | SDValue Src, SDValue Mask, SDValue Base, | |||
22130 | SDValue Index, SDValue ScaleOp, SDValue Chain, | |||
22131 | const X86Subtarget &Subtarget) { | |||
22132 | SDLoc dl(Op); | |||
22133 | auto *C = dyn_cast<ConstantSDNode>(ScaleOp); | |||
22134 | // Scale must be constant. | |||
22135 | if (!C) | |||
22136 | return SDValue(); | |||
22137 | SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8); | |||
22138 | EVT MaskVT = Mask.getValueType(); | |||
22139 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other); | |||
22140 | SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32); | |||
22141 | SDValue Segment = DAG.getRegister(0, MVT::i32); | |||
22142 | // If source is undef or we know it won't be used, use a zero vector | |||
22143 | // to break register dependency. | |||
22144 | // TODO: use undef instead and let BreakFalseDeps deal with it? | |||
22145 | if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode())) | |||
22146 | Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl); | |||
22147 | SDValue Ops[] = {Src, Base, Scale, Index, Disp, Segment, Mask, Chain}; | |||
22148 | SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops); | |||
22149 | SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) }; | |||
22150 | return DAG.getMergeValues(RetOps, dl); | |||
22151 | } | |||
22152 | ||||
22153 | static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG, | |||
22154 | SDValue Src, SDValue Mask, SDValue Base, | |||
22155 | SDValue Index, SDValue ScaleOp, SDValue Chain, | |||
22156 | const X86Subtarget &Subtarget) { | |||
22157 | SDLoc dl(Op); | |||
22158 | auto *C = dyn_cast<ConstantSDNode>(ScaleOp); | |||
22159 | // Scale must be constant. | |||
22160 | if (!C) | |||
22161 | return SDValue(); | |||
22162 | SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8); | |||
22163 | MVT MaskVT = MVT::getVectorVT(MVT::i1, | |||
22164 | Index.getSimpleValueType().getVectorNumElements()); | |||
22165 | ||||
22166 | SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl); | |||
22167 | SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other); | |||
22168 | SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32); | |||
22169 | SDValue Segment = DAG.getRegister(0, MVT::i32); | |||
22170 | // If source is undef or we know it won't be used, use a zero vector | |||
22171 | // to break register dependency. | |||
22172 | // TODO: use undef instead and let BreakFalseDeps deal with it? | |||
22173 | if (Src.isUndef() || ISD::isBuildVectorAllOnes(VMask.getNode())) | |||
22174 | Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl); | |||
22175 | SDValue Ops[] = {Src, VMask, Base, Scale, Index, Disp, Segment, Chain}; | |||
22176 | SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops); | |||
22177 | SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) }; | |||
22178 | return DAG.getMergeValues(RetOps, dl); | |||
22179 | } | |||
22180 | ||||
22181 | static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG, | |||
22182 | SDValue Src, SDValue Mask, SDValue Base, | |||
22183 | SDValue Index, SDValue ScaleOp, SDValue Chain, | |||
22184 | const X86Subtarget &Subtarget) { | |||
22185 | SDLoc dl(Op); | |||
22186 | auto *C = dyn_cast<ConstantSDNode>(ScaleOp); | |||
22187 | // Scale must be constant. | |||
22188 | if (!C) | |||
22189 | return SDValue(); | |||
22190 | SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8); | |||
22191 | SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32); | |||
22192 | SDValue Segment = DAG.getRegister(0, MVT::i32); | |||
22193 | MVT MaskVT = MVT::getVectorVT(MVT::i1, | |||
22194 | Index.getSimpleValueType().getVectorNumElements()); | |||
22195 | ||||
22196 | SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl); | |||
22197 | SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other); | |||
22198 | SDValue Ops[] = {Base, Scale, Index, Disp, Segment, VMask, Src, Chain}; | |||
22199 | SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops); | |||
22200 | return SDValue(Res, 1); | |||
22201 | } | |||
22202 | ||||
22203 | static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG, | |||
22204 | SDValue Mask, SDValue Base, SDValue Index, | |||
22205 | SDValue ScaleOp, SDValue Chain, | |||
22206 | const X86Subtarget &Subtarget) { | |||
22207 | SDLoc dl(Op); | |||
22208 | auto *C = dyn_cast<ConstantSDNode>(ScaleOp); | |||
22209 | // Scale must be constant. | |||
22210 | if (!C) | |||
22211 | return SDValue(); | |||
22212 | SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl, MVT::i8); | |||
22213 | SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32); | |||
22214 | SDValue Segment = DAG.getRegister(0, MVT::i32); | |||
22215 | MVT MaskVT = | |||
22216 | MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements()); | |||
22217 | SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl); | |||
22218 | SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain}; | |||
22219 | SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops); | |||
22220 | return SDValue(Res, 0); | |||
22221 | } | |||
22222 | ||||
22223 | /// Handles the lowering of builtin intrinsic that return the value | |||
22224 | /// of the extended control register. | |||
22225 | static void getExtendedControlRegister(SDNode *N, const SDLoc &DL, | |||
22226 | SelectionDAG &DAG, | |||
22227 | const X86Subtarget &Subtarget, | |||
22228 | SmallVectorImpl<SDValue> &Results) { | |||
22229 | assert(N->getNumOperands() == 3 && "Unexpected number of operands!")((N->getNumOperands() == 3 && "Unexpected number of operands!" ) ? static_cast<void> (0) : __assert_fail ("N->getNumOperands() == 3 && \"Unexpected number of operands!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22229, __PRETTY_FUNCTION__)); | |||
22230 | SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
22231 | SDValue LO, HI; | |||
22232 | ||||
22233 | // The ECX register is used to select the index of the XCR register to | |||
22234 | // return. | |||
22235 | SDValue Chain = | |||
22236 | DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX, N->getOperand(2)); | |||
22237 | SDNode *N1 = DAG.getMachineNode(X86::XGETBV, DL, Tys, Chain); | |||
22238 | Chain = SDValue(N1, 0); | |||
22239 | ||||
22240 | // Reads the content of XCR and returns it in registers EDX:EAX. | |||
22241 | if (Subtarget.is64Bit()) { | |||
22242 | LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1)); | |||
22243 | HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64, | |||
22244 | LO.getValue(2)); | |||
22245 | } else { | |||
22246 | LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1)); | |||
22247 | HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32, | |||
22248 | LO.getValue(2)); | |||
22249 | } | |||
22250 | Chain = HI.getValue(1); | |||
22251 | ||||
22252 | if (Subtarget.is64Bit()) { | |||
22253 | // Merge the two 32-bit values into a 64-bit one.. | |||
22254 | SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI, | |||
22255 | DAG.getConstant(32, DL, MVT::i8)); | |||
22256 | Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp)); | |||
22257 | Results.push_back(Chain); | |||
22258 | return; | |||
22259 | } | |||
22260 | ||||
22261 | // Use a buildpair to merge the two 32-bit values into a 64-bit one. | |||
22262 | SDValue Ops[] = { LO, HI }; | |||
22263 | SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops); | |||
22264 | Results.push_back(Pair); | |||
22265 | Results.push_back(Chain); | |||
22266 | } | |||
22267 | ||||
22268 | /// Handles the lowering of builtin intrinsics that read performance monitor | |||
22269 | /// counters (x86_rdpmc). | |||
22270 | static void getReadPerformanceCounter(SDNode *N, const SDLoc &DL, | |||
22271 | SelectionDAG &DAG, | |||
22272 | const X86Subtarget &Subtarget, | |||
22273 | SmallVectorImpl<SDValue> &Results) { | |||
22274 | assert(N->getNumOperands() == 3 && "Unexpected number of operands!")((N->getNumOperands() == 3 && "Unexpected number of operands!" ) ? static_cast<void> (0) : __assert_fail ("N->getNumOperands() == 3 && \"Unexpected number of operands!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22274, __PRETTY_FUNCTION__)); | |||
22275 | SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
22276 | SDValue LO, HI; | |||
22277 | ||||
22278 | // The ECX register is used to select the index of the performance counter | |||
22279 | // to read. | |||
22280 | SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX, | |||
22281 | N->getOperand(2)); | |||
22282 | SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain); | |||
22283 | ||||
22284 | // Reads the content of a 64-bit performance counter and returns it in the | |||
22285 | // registers EDX:EAX. | |||
22286 | if (Subtarget.is64Bit()) { | |||
22287 | LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1)); | |||
22288 | HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64, | |||
22289 | LO.getValue(2)); | |||
22290 | } else { | |||
22291 | LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1)); | |||
22292 | HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32, | |||
22293 | LO.getValue(2)); | |||
22294 | } | |||
22295 | Chain = HI.getValue(1); | |||
22296 | ||||
22297 | if (Subtarget.is64Bit()) { | |||
22298 | // The EAX register is loaded with the low-order 32 bits. The EDX register | |||
22299 | // is loaded with the supported high-order bits of the counter. | |||
22300 | SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI, | |||
22301 | DAG.getConstant(32, DL, MVT::i8)); | |||
22302 | Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp)); | |||
22303 | Results.push_back(Chain); | |||
22304 | return; | |||
22305 | } | |||
22306 | ||||
22307 | // Use a buildpair to merge the two 32-bit values into a 64-bit one. | |||
22308 | SDValue Ops[] = { LO, HI }; | |||
22309 | SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops); | |||
22310 | Results.push_back(Pair); | |||
22311 | Results.push_back(Chain); | |||
22312 | } | |||
22313 | ||||
22314 | /// Handles the lowering of builtin intrinsics that read the time stamp counter | |||
22315 | /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower | |||
22316 | /// READCYCLECOUNTER nodes. | |||
22317 | static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode, | |||
22318 | SelectionDAG &DAG, | |||
22319 | const X86Subtarget &Subtarget, | |||
22320 | SmallVectorImpl<SDValue> &Results) { | |||
22321 | SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
22322 | SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0)); | |||
22323 | SDValue LO, HI; | |||
22324 | ||||
22325 | // The processor's time-stamp counter (a 64-bit MSR) is stored into the | |||
22326 | // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR | |||
22327 | // and the EAX register is loaded with the low-order 32 bits. | |||
22328 | if (Subtarget.is64Bit()) { | |||
22329 | LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1)); | |||
22330 | HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64, | |||
22331 | LO.getValue(2)); | |||
22332 | } else { | |||
22333 | LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1)); | |||
22334 | HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32, | |||
22335 | LO.getValue(2)); | |||
22336 | } | |||
22337 | SDValue Chain = HI.getValue(1); | |||
22338 | ||||
22339 | SDValue TSC; | |||
22340 | if (Subtarget.is64Bit()) { | |||
22341 | // The EDX register is loaded with the high-order 32 bits of the MSR, and | |||
22342 | // the EAX register is loaded with the low-order 32 bits. | |||
22343 | TSC = DAG.getNode(ISD::SHL, DL, MVT::i64, HI, | |||
22344 | DAG.getConstant(32, DL, MVT::i8)); | |||
22345 | TSC = DAG.getNode(ISD::OR, DL, MVT::i64, LO, TSC); | |||
22346 | } else { | |||
22347 | // Use a buildpair to merge the two 32-bit values into a 64-bit one. | |||
22348 | TSC = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, { LO, HI }); | |||
22349 | } | |||
22350 | ||||
22351 | if (Opcode == X86ISD::RDTSCP_DAG) { | |||
22352 | assert(N->getNumOperands() == 2 && "Unexpected number of operands!")((N->getNumOperands() == 2 && "Unexpected number of operands!" ) ? static_cast<void> (0) : __assert_fail ("N->getNumOperands() == 2 && \"Unexpected number of operands!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22352, __PRETTY_FUNCTION__)); | |||
22353 | ||||
22354 | // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into | |||
22355 | // the ECX register. Add 'ecx' explicitly to the chain. | |||
22356 | SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, | |||
22357 | HI.getValue(2)); | |||
22358 | ||||
22359 | Results.push_back(TSC); | |||
22360 | Results.push_back(ecx); | |||
22361 | Results.push_back(ecx.getValue(1)); | |||
22362 | return; | |||
22363 | } | |||
22364 | ||||
22365 | Results.push_back(TSC); | |||
22366 | Results.push_back(Chain); | |||
22367 | } | |||
22368 | ||||
22369 | static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget, | |||
22370 | SelectionDAG &DAG) { | |||
22371 | SmallVector<SDValue, 3> Results; | |||
22372 | SDLoc DL(Op); | |||
22373 | getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget, | |||
22374 | Results); | |||
22375 | return DAG.getMergeValues(Results, DL); | |||
22376 | } | |||
22377 | ||||
22378 | static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) { | |||
22379 | MachineFunction &MF = DAG.getMachineFunction(); | |||
22380 | SDValue Chain = Op.getOperand(0); | |||
22381 | SDValue RegNode = Op.getOperand(2); | |||
22382 | WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo(); | |||
22383 | if (!EHInfo) | |||
22384 | report_fatal_error("EH registrations only live in functions using WinEH"); | |||
22385 | ||||
22386 | // Cast the operand to an alloca, and remember the frame index. | |||
22387 | auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode); | |||
22388 | if (!FINode) | |||
22389 | report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca"); | |||
22390 | EHInfo->EHRegNodeFrameIndex = FINode->getIndex(); | |||
22391 | ||||
22392 | // Return the chain operand without making any DAG nodes. | |||
22393 | return Chain; | |||
22394 | } | |||
22395 | ||||
22396 | static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) { | |||
22397 | MachineFunction &MF = DAG.getMachineFunction(); | |||
22398 | SDValue Chain = Op.getOperand(0); | |||
22399 | SDValue EHGuard = Op.getOperand(2); | |||
22400 | WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo(); | |||
22401 | if (!EHInfo) | |||
22402 | report_fatal_error("EHGuard only live in functions using WinEH"); | |||
22403 | ||||
22404 | // Cast the operand to an alloca, and remember the frame index. | |||
22405 | auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard); | |||
22406 | if (!FINode) | |||
22407 | report_fatal_error("llvm.x86.seh.ehguard expects a static alloca"); | |||
22408 | EHInfo->EHGuardFrameIndex = FINode->getIndex(); | |||
22409 | ||||
22410 | // Return the chain operand without making any DAG nodes. | |||
22411 | return Chain; | |||
22412 | } | |||
22413 | ||||
22414 | /// Emit Truncating Store with signed or unsigned saturation. | |||
22415 | static SDValue | |||
22416 | EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val, | |||
22417 | SDValue Ptr, EVT MemVT, MachineMemOperand *MMO, | |||
22418 | SelectionDAG &DAG) { | |||
22419 | ||||
22420 | SDVTList VTs = DAG.getVTList(MVT::Other); | |||
22421 | SDValue Undef = DAG.getUNDEF(Ptr.getValueType()); | |||
22422 | SDValue Ops[] = { Chain, Val, Ptr, Undef }; | |||
22423 | return SignedSat ? | |||
22424 | DAG.getTargetMemSDNode<TruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) : | |||
22425 | DAG.getTargetMemSDNode<TruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO); | |||
22426 | } | |||
22427 | ||||
22428 | /// Emit Masked Truncating Store with signed or unsigned saturation. | |||
22429 | static SDValue | |||
22430 | EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, | |||
22431 | SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT, | |||
22432 | MachineMemOperand *MMO, SelectionDAG &DAG) { | |||
22433 | ||||
22434 | SDVTList VTs = DAG.getVTList(MVT::Other); | |||
22435 | SDValue Ops[] = { Chain, Val, Ptr, Mask }; | |||
22436 | return SignedSat ? | |||
22437 | DAG.getTargetMemSDNode<MaskedTruncSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO) : | |||
22438 | DAG.getTargetMemSDNode<MaskedTruncUSStoreSDNode>(VTs, Ops, Dl, MemVT, MMO); | |||
22439 | } | |||
22440 | ||||
22441 | static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget, | |||
22442 | SelectionDAG &DAG) { | |||
22443 | unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); | |||
22444 | ||||
22445 | const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo); | |||
22446 | if (!IntrData) { | |||
22447 | switch (IntNo) { | |||
22448 | case llvm::Intrinsic::x86_seh_ehregnode: | |||
22449 | return MarkEHRegistrationNode(Op, DAG); | |||
22450 | case llvm::Intrinsic::x86_seh_ehguard: | |||
22451 | return MarkEHGuard(Op, DAG); | |||
22452 | case llvm::Intrinsic::x86_flags_read_u32: | |||
22453 | case llvm::Intrinsic::x86_flags_read_u64: | |||
22454 | case llvm::Intrinsic::x86_flags_write_u32: | |||
22455 | case llvm::Intrinsic::x86_flags_write_u64: { | |||
22456 | // We need a frame pointer because this will get lowered to a PUSH/POP | |||
22457 | // sequence. | |||
22458 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | |||
22459 | MFI.setHasCopyImplyingStackAdjustment(true); | |||
22460 | // Don't do anything here, we will expand these intrinsics out later | |||
22461 | // during ExpandISelPseudos in EmitInstrWithCustomInserter. | |||
22462 | return SDValue(); | |||
22463 | } | |||
22464 | case Intrinsic::x86_lwpins32: | |||
22465 | case Intrinsic::x86_lwpins64: | |||
22466 | case Intrinsic::x86_umwait: | |||
22467 | case Intrinsic::x86_tpause: { | |||
22468 | SDLoc dl(Op); | |||
22469 | SDValue Chain = Op->getOperand(0); | |||
22470 | SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other); | |||
22471 | unsigned Opcode; | |||
22472 | ||||
22473 | switch (IntNo) { | |||
22474 | default: llvm_unreachable("Impossible intrinsic")::llvm::llvm_unreachable_internal("Impossible intrinsic", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22474); | |||
22475 | case Intrinsic::x86_umwait: | |||
22476 | Opcode = X86ISD::UMWAIT; | |||
22477 | break; | |||
22478 | case Intrinsic::x86_tpause: | |||
22479 | Opcode = X86ISD::TPAUSE; | |||
22480 | break; | |||
22481 | case Intrinsic::x86_lwpins32: | |||
22482 | case Intrinsic::x86_lwpins64: | |||
22483 | Opcode = X86ISD::LWPINS; | |||
22484 | break; | |||
22485 | } | |||
22486 | ||||
22487 | SDValue Operation = | |||
22488 | DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2), | |||
22489 | Op->getOperand(3), Op->getOperand(4)); | |||
22490 | SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG); | |||
22491 | SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, SetCC); | |||
22492 | return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, | |||
22493 | Operation.getValue(1)); | |||
22494 | } | |||
22495 | } | |||
22496 | return SDValue(); | |||
22497 | } | |||
22498 | ||||
22499 | SDLoc dl(Op); | |||
22500 | switch(IntrData->Type) { | |||
22501 | default: llvm_unreachable("Unknown Intrinsic Type")::llvm::llvm_unreachable_internal("Unknown Intrinsic Type", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22501); | |||
22502 | case RDSEED: | |||
22503 | case RDRAND: { | |||
22504 | // Emit the node with the right value type. | |||
22505 | SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other); | |||
22506 | SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0)); | |||
22507 | ||||
22508 | // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1. | |||
22509 | // Otherwise return the value from Rand, which is always 0, casted to i32. | |||
22510 | SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)), | |||
22511 | DAG.getConstant(1, dl, Op->getValueType(1)), | |||
22512 | DAG.getConstant(X86::COND_B, dl, MVT::i8), | |||
22513 | SDValue(Result.getNode(), 1) }; | |||
22514 | SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops); | |||
22515 | ||||
22516 | // Return { result, isValid, chain }. | |||
22517 | return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid, | |||
22518 | SDValue(Result.getNode(), 2)); | |||
22519 | } | |||
22520 | case GATHER_AVX2: { | |||
22521 | SDValue Chain = Op.getOperand(0); | |||
22522 | SDValue Src = Op.getOperand(2); | |||
22523 | SDValue Base = Op.getOperand(3); | |||
22524 | SDValue Index = Op.getOperand(4); | |||
22525 | SDValue Mask = Op.getOperand(5); | |||
22526 | SDValue Scale = Op.getOperand(6); | |||
22527 | return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, | |||
22528 | Scale, Chain, Subtarget); | |||
22529 | } | |||
22530 | case GATHER: { | |||
22531 | //gather(v1, mask, index, base, scale); | |||
22532 | SDValue Chain = Op.getOperand(0); | |||
22533 | SDValue Src = Op.getOperand(2); | |||
22534 | SDValue Base = Op.getOperand(3); | |||
22535 | SDValue Index = Op.getOperand(4); | |||
22536 | SDValue Mask = Op.getOperand(5); | |||
22537 | SDValue Scale = Op.getOperand(6); | |||
22538 | return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, | |||
22539 | Chain, Subtarget); | |||
22540 | } | |||
22541 | case SCATTER: { | |||
22542 | //scatter(base, mask, index, v1, scale); | |||
22543 | SDValue Chain = Op.getOperand(0); | |||
22544 | SDValue Base = Op.getOperand(2); | |||
22545 | SDValue Mask = Op.getOperand(3); | |||
22546 | SDValue Index = Op.getOperand(4); | |||
22547 | SDValue Src = Op.getOperand(5); | |||
22548 | SDValue Scale = Op.getOperand(6); | |||
22549 | return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, | |||
22550 | Scale, Chain, Subtarget); | |||
22551 | } | |||
22552 | case PREFETCH: { | |||
22553 | SDValue Hint = Op.getOperand(6); | |||
22554 | unsigned HintVal = cast<ConstantSDNode>(Hint)->getZExtValue(); | |||
22555 | assert((HintVal == 2 || HintVal == 3) &&(((HintVal == 2 || HintVal == 3) && "Wrong prefetch hint in intrinsic: should be 2 or 3" ) ? static_cast<void> (0) : __assert_fail ("(HintVal == 2 || HintVal == 3) && \"Wrong prefetch hint in intrinsic: should be 2 or 3\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22556, __PRETTY_FUNCTION__)) | |||
22556 | "Wrong prefetch hint in intrinsic: should be 2 or 3")(((HintVal == 2 || HintVal == 3) && "Wrong prefetch hint in intrinsic: should be 2 or 3" ) ? static_cast<void> (0) : __assert_fail ("(HintVal == 2 || HintVal == 3) && \"Wrong prefetch hint in intrinsic: should be 2 or 3\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22556, __PRETTY_FUNCTION__)); | |||
22557 | unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0); | |||
22558 | SDValue Chain = Op.getOperand(0); | |||
22559 | SDValue Mask = Op.getOperand(2); | |||
22560 | SDValue Index = Op.getOperand(3); | |||
22561 | SDValue Base = Op.getOperand(4); | |||
22562 | SDValue Scale = Op.getOperand(5); | |||
22563 | return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain, | |||
22564 | Subtarget); | |||
22565 | } | |||
22566 | // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP). | |||
22567 | case RDTSC: { | |||
22568 | SmallVector<SDValue, 2> Results; | |||
22569 | getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, | |||
22570 | Results); | |||
22571 | return DAG.getMergeValues(Results, dl); | |||
22572 | } | |||
22573 | // Read Performance Monitoring Counters. | |||
22574 | case RDPMC: { | |||
22575 | SmallVector<SDValue, 2> Results; | |||
22576 | getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results); | |||
22577 | return DAG.getMergeValues(Results, dl); | |||
22578 | } | |||
22579 | // Get Extended Control Register. | |||
22580 | case XGETBV: { | |||
22581 | SmallVector<SDValue, 2> Results; | |||
22582 | getExtendedControlRegister(Op.getNode(), dl, DAG, Subtarget, Results); | |||
22583 | return DAG.getMergeValues(Results, dl); | |||
22584 | } | |||
22585 | // XTEST intrinsics. | |||
22586 | case XTEST: { | |||
22587 | SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other); | |||
22588 | SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0)); | |||
22589 | ||||
22590 | SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG); | |||
22591 | SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC); | |||
22592 | return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), | |||
22593 | Ret, SDValue(InTrans.getNode(), 1)); | |||
22594 | } | |||
22595 | case TRUNCATE_TO_MEM_VI8: | |||
22596 | case TRUNCATE_TO_MEM_VI16: | |||
22597 | case TRUNCATE_TO_MEM_VI32: { | |||
22598 | SDValue Mask = Op.getOperand(4); | |||
22599 | SDValue DataToTruncate = Op.getOperand(3); | |||
22600 | SDValue Addr = Op.getOperand(2); | |||
22601 | SDValue Chain = Op.getOperand(0); | |||
22602 | ||||
22603 | MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op); | |||
22604 | assert(MemIntr && "Expected MemIntrinsicSDNode!")((MemIntr && "Expected MemIntrinsicSDNode!") ? static_cast <void> (0) : __assert_fail ("MemIntr && \"Expected MemIntrinsicSDNode!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22604, __PRETTY_FUNCTION__)); | |||
22605 | ||||
22606 | EVT MemVT = MemIntr->getMemoryVT(); | |||
22607 | ||||
22608 | uint16_t TruncationOp = IntrData->Opc0; | |||
22609 | switch (TruncationOp) { | |||
22610 | case X86ISD::VTRUNC: { | |||
22611 | if (isAllOnesConstant(Mask)) // return just a truncate store | |||
22612 | return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT, | |||
22613 | MemIntr->getMemOperand()); | |||
22614 | ||||
22615 | MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements()); | |||
22616 | SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl); | |||
22617 | ||||
22618 | return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, VMask, MemVT, | |||
22619 | MemIntr->getMemOperand(), true /* truncating */); | |||
22620 | } | |||
22621 | case X86ISD::VTRUNCUS: | |||
22622 | case X86ISD::VTRUNCS: { | |||
22623 | bool IsSigned = (TruncationOp == X86ISD::VTRUNCS); | |||
22624 | if (isAllOnesConstant(Mask)) | |||
22625 | return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT, | |||
22626 | MemIntr->getMemOperand(), DAG); | |||
22627 | ||||
22628 | MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements()); | |||
22629 | SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl); | |||
22630 | ||||
22631 | return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, | |||
22632 | VMask, MemVT, MemIntr->getMemOperand(), DAG); | |||
22633 | } | |||
22634 | default: | |||
22635 | llvm_unreachable("Unsupported truncstore intrinsic")::llvm::llvm_unreachable_internal("Unsupported truncstore intrinsic" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22635); | |||
22636 | } | |||
22637 | } | |||
22638 | } | |||
22639 | } | |||
22640 | ||||
22641 | SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op, | |||
22642 | SelectionDAG &DAG) const { | |||
22643 | MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); | |||
22644 | MFI.setReturnAddressIsTaken(true); | |||
22645 | ||||
22646 | if (verifyReturnAddressArgumentIsConstant(Op, DAG)) | |||
22647 | return SDValue(); | |||
22648 | ||||
22649 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
22650 | SDLoc dl(Op); | |||
22651 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
22652 | ||||
22653 | if (Depth > 0) { | |||
22654 | SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); | |||
22655 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
22656 | SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT); | |||
22657 | return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), | |||
22658 | DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset), | |||
22659 | MachinePointerInfo()); | |||
22660 | } | |||
22661 | ||||
22662 | // Just load the return address. | |||
22663 | SDValue RetAddrFI = getReturnAddressFrameIndex(DAG); | |||
22664 | return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI, | |||
22665 | MachinePointerInfo()); | |||
22666 | } | |||
22667 | ||||
22668 | SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op, | |||
22669 | SelectionDAG &DAG) const { | |||
22670 | DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true); | |||
22671 | return getReturnAddressFrameIndex(DAG); | |||
22672 | } | |||
22673 | ||||
22674 | SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { | |||
22675 | MachineFunction &MF = DAG.getMachineFunction(); | |||
22676 | MachineFrameInfo &MFI = MF.getFrameInfo(); | |||
22677 | X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>(); | |||
22678 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
22679 | EVT VT = Op.getValueType(); | |||
22680 | ||||
22681 | MFI.setFrameAddressIsTaken(true); | |||
22682 | ||||
22683 | if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) { | |||
22684 | // Depth > 0 makes no sense on targets which use Windows unwind codes. It | |||
22685 | // is not possible to crawl up the stack without looking at the unwind codes | |||
22686 | // simultaneously. | |||
22687 | int FrameAddrIndex = FuncInfo->getFAIndex(); | |||
22688 | if (!FrameAddrIndex) { | |||
22689 | // Set up a frame object for the return address. | |||
22690 | unsigned SlotSize = RegInfo->getSlotSize(); | |||
22691 | FrameAddrIndex = MF.getFrameInfo().CreateFixedObject( | |||
22692 | SlotSize, /*Offset=*/0, /*IsImmutable=*/false); | |||
22693 | FuncInfo->setFAIndex(FrameAddrIndex); | |||
22694 | } | |||
22695 | return DAG.getFrameIndex(FrameAddrIndex, VT); | |||
22696 | } | |||
22697 | ||||
22698 | unsigned FrameReg = | |||
22699 | RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction()); | |||
22700 | SDLoc dl(Op); // FIXME probably not meaningful | |||
22701 | unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); | |||
22702 | assert(((FrameReg == X86::RBP && VT == MVT::i64) ||((((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && "Invalid Frame Register!" ) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && \"Invalid Frame Register!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22704, __PRETTY_FUNCTION__)) | |||
22703 | (FrameReg == X86::EBP && VT == MVT::i32)) &&((((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && "Invalid Frame Register!" ) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && \"Invalid Frame Register!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22704, __PRETTY_FUNCTION__)) | |||
22704 | "Invalid Frame Register!")((((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && "Invalid Frame Register!" ) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && VT == MVT::i64) || (FrameReg == X86::EBP && VT == MVT::i32)) && \"Invalid Frame Register!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22704, __PRETTY_FUNCTION__)); | |||
22705 | SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT); | |||
22706 | while (Depth--) | |||
22707 | FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr, | |||
22708 | MachinePointerInfo()); | |||
22709 | return FrameAddr; | |||
22710 | } | |||
22711 | ||||
22712 | // FIXME? Maybe this could be a TableGen attribute on some registers and | |||
22713 | // this table could be generated automatically from RegInfo. | |||
22714 | unsigned X86TargetLowering::getRegisterByName(const char* RegName, EVT VT, | |||
22715 | SelectionDAG &DAG) const { | |||
22716 | const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); | |||
22717 | const MachineFunction &MF = DAG.getMachineFunction(); | |||
22718 | ||||
22719 | unsigned Reg = StringSwitch<unsigned>(RegName) | |||
22720 | .Case("esp", X86::ESP) | |||
22721 | .Case("rsp", X86::RSP) | |||
22722 | .Case("ebp", X86::EBP) | |||
22723 | .Case("rbp", X86::RBP) | |||
22724 | .Default(0); | |||
22725 | ||||
22726 | if (Reg == X86::EBP || Reg == X86::RBP) { | |||
22727 | if (!TFI.hasFP(MF)) | |||
22728 | report_fatal_error("register " + StringRef(RegName) + | |||
22729 | " is allocatable: function has no frame pointer"); | |||
22730 | #ifndef NDEBUG | |||
22731 | else { | |||
22732 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
22733 | unsigned FrameReg = | |||
22734 | RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction()); | |||
22735 | assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&(((FrameReg == X86::EBP || FrameReg == X86::RBP) && "Invalid Frame Register!" ) ? static_cast<void> (0) : __assert_fail ("(FrameReg == X86::EBP || FrameReg == X86::RBP) && \"Invalid Frame Register!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22736, __PRETTY_FUNCTION__)) | |||
22736 | "Invalid Frame Register!")(((FrameReg == X86::EBP || FrameReg == X86::RBP) && "Invalid Frame Register!" ) ? static_cast<void> (0) : __assert_fail ("(FrameReg == X86::EBP || FrameReg == X86::RBP) && \"Invalid Frame Register!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22736, __PRETTY_FUNCTION__)); | |||
22737 | } | |||
22738 | #endif | |||
22739 | } | |||
22740 | ||||
22741 | if (Reg) | |||
22742 | return Reg; | |||
22743 | ||||
22744 | report_fatal_error("Invalid register name global variable"); | |||
22745 | } | |||
22746 | ||||
22747 | SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op, | |||
22748 | SelectionDAG &DAG) const { | |||
22749 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
22750 | return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op)); | |||
22751 | } | |||
22752 | ||||
22753 | unsigned X86TargetLowering::getExceptionPointerRegister( | |||
22754 | const Constant *PersonalityFn) const { | |||
22755 | if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR) | |||
22756 | return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX; | |||
22757 | ||||
22758 | return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX; | |||
22759 | } | |||
22760 | ||||
22761 | unsigned X86TargetLowering::getExceptionSelectorRegister( | |||
22762 | const Constant *PersonalityFn) const { | |||
22763 | // Funclet personalities don't use selectors (the runtime does the selection). | |||
22764 | assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)))((!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn ))) ? static_cast<void> (0) : __assert_fail ("!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn))" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22764, __PRETTY_FUNCTION__)); | |||
22765 | return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX; | |||
22766 | } | |||
22767 | ||||
22768 | bool X86TargetLowering::needsFixedCatchObjects() const { | |||
22769 | return Subtarget.isTargetWin64(); | |||
22770 | } | |||
22771 | ||||
22772 | SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { | |||
22773 | SDValue Chain = Op.getOperand(0); | |||
22774 | SDValue Offset = Op.getOperand(1); | |||
22775 | SDValue Handler = Op.getOperand(2); | |||
22776 | SDLoc dl (Op); | |||
22777 | ||||
22778 | EVT PtrVT = getPointerTy(DAG.getDataLayout()); | |||
22779 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
22780 | unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction()); | |||
22781 | assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||((((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && "Invalid Frame Register!" ) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && \"Invalid Frame Register!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22783, __PRETTY_FUNCTION__)) | |||
22782 | (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&((((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && "Invalid Frame Register!" ) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && \"Invalid Frame Register!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22783, __PRETTY_FUNCTION__)) | |||
22783 | "Invalid Frame Register!")((((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && "Invalid Frame Register!" ) ? static_cast<void> (0) : __assert_fail ("((FrameReg == X86::RBP && PtrVT == MVT::i64) || (FrameReg == X86::EBP && PtrVT == MVT::i32)) && \"Invalid Frame Register!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22783, __PRETTY_FUNCTION__)); | |||
22784 | SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT); | |||
22785 | unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX; | |||
22786 | ||||
22787 | SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame, | |||
22788 | DAG.getIntPtrConstant(RegInfo->getSlotSize(), | |||
22789 | dl)); | |||
22790 | StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset); | |||
22791 | Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo()); | |||
22792 | Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); | |||
22793 | ||||
22794 | return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain, | |||
22795 | DAG.getRegister(StoreAddrReg, PtrVT)); | |||
22796 | } | |||
22797 | ||||
22798 | SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op, | |||
22799 | SelectionDAG &DAG) const { | |||
22800 | SDLoc DL(Op); | |||
22801 | // If the subtarget is not 64bit, we may need the global base reg | |||
22802 | // after isel expand pseudo, i.e., after CGBR pass ran. | |||
22803 | // Therefore, ask for the GlobalBaseReg now, so that the pass | |||
22804 | // inserts the code for us in case we need it. | |||
22805 | // Otherwise, we will end up in a situation where we will | |||
22806 | // reference a virtual register that is not defined! | |||
22807 | if (!Subtarget.is64Bit()) { | |||
22808 | const X86InstrInfo *TII = Subtarget.getInstrInfo(); | |||
22809 | (void)TII->getGlobalBaseReg(&DAG.getMachineFunction()); | |||
22810 | } | |||
22811 | return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL, | |||
22812 | DAG.getVTList(MVT::i32, MVT::Other), | |||
22813 | Op.getOperand(0), Op.getOperand(1)); | |||
22814 | } | |||
22815 | ||||
22816 | SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op, | |||
22817 | SelectionDAG &DAG) const { | |||
22818 | SDLoc DL(Op); | |||
22819 | return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other, | |||
22820 | Op.getOperand(0), Op.getOperand(1)); | |||
22821 | } | |||
22822 | ||||
22823 | SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, | |||
22824 | SelectionDAG &DAG) const { | |||
22825 | SDLoc DL(Op); | |||
22826 | return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other, | |||
22827 | Op.getOperand(0)); | |||
22828 | } | |||
22829 | ||||
22830 | static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) { | |||
22831 | return Op.getOperand(0); | |||
22832 | } | |||
22833 | ||||
22834 | SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op, | |||
22835 | SelectionDAG &DAG) const { | |||
22836 | SDValue Root = Op.getOperand(0); | |||
22837 | SDValue Trmp = Op.getOperand(1); // trampoline | |||
22838 | SDValue FPtr = Op.getOperand(2); // nested function | |||
22839 | SDValue Nest = Op.getOperand(3); // 'nest' parameter value | |||
22840 | SDLoc dl (Op); | |||
22841 | ||||
22842 | const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); | |||
22843 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
22844 | ||||
22845 | if (Subtarget.is64Bit()) { | |||
22846 | SDValue OutChains[6]; | |||
22847 | ||||
22848 | // Large code-model. | |||
22849 | const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode. | |||
22850 | const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode. | |||
22851 | ||||
22852 | const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7; | |||
22853 | const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7; | |||
22854 | ||||
22855 | const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix | |||
22856 | ||||
22857 | // Load the pointer to the nested function into R11. | |||
22858 | unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11 | |||
22859 | SDValue Addr = Trmp; | |||
22860 | OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16), | |||
22861 | Addr, MachinePointerInfo(TrmpAddr)); | |||
22862 | ||||
22863 | Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, | |||
22864 | DAG.getConstant(2, dl, MVT::i64)); | |||
22865 | OutChains[1] = | |||
22866 | DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2), | |||
22867 | /* Alignment = */ 2); | |||
22868 | ||||
22869 | // Load the 'nest' parameter value into R10. | |||
22870 | // R10 is specified in X86CallingConv.td | |||
22871 | OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10 | |||
22872 | Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, | |||
22873 | DAG.getConstant(10, dl, MVT::i64)); | |||
22874 | OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16), | |||
22875 | Addr, MachinePointerInfo(TrmpAddr, 10)); | |||
22876 | ||||
22877 | Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, | |||
22878 | DAG.getConstant(12, dl, MVT::i64)); | |||
22879 | OutChains[3] = | |||
22880 | DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12), | |||
22881 | /* Alignment = */ 2); | |||
22882 | ||||
22883 | // Jump to the nested function. | |||
22884 | OpCode = (JMP64r << 8) | REX_WB; // jmpq *... | |||
22885 | Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, | |||
22886 | DAG.getConstant(20, dl, MVT::i64)); | |||
22887 | OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16), | |||
22888 | Addr, MachinePointerInfo(TrmpAddr, 20)); | |||
22889 | ||||
22890 | unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11 | |||
22891 | Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp, | |||
22892 | DAG.getConstant(22, dl, MVT::i64)); | |||
22893 | OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8), | |||
22894 | Addr, MachinePointerInfo(TrmpAddr, 22)); | |||
22895 | ||||
22896 | return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); | |||
22897 | } else { | |||
22898 | const Function *Func = | |||
22899 | cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue()); | |||
22900 | CallingConv::ID CC = Func->getCallingConv(); | |||
22901 | unsigned NestReg; | |||
22902 | ||||
22903 | switch (CC) { | |||
22904 | default: | |||
22905 | llvm_unreachable("Unsupported calling convention")::llvm::llvm_unreachable_internal("Unsupported calling convention" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 22905); | |||
22906 | case CallingConv::C: | |||
22907 | case CallingConv::X86_StdCall: { | |||
22908 | // Pass 'nest' parameter in ECX. | |||
22909 | // Must be kept in sync with X86CallingConv.td | |||
22910 | NestReg = X86::ECX; | |||
22911 | ||||
22912 | // Check that ECX wasn't needed by an 'inreg' parameter. | |||
22913 | FunctionType *FTy = Func->getFunctionType(); | |||
22914 | const AttributeList &Attrs = Func->getAttributes(); | |||
22915 | ||||
22916 | if (!Attrs.isEmpty() && !Func->isVarArg()) { | |||
22917 | unsigned InRegCount = 0; | |||
22918 | unsigned Idx = 1; | |||
22919 | ||||
22920 | for (FunctionType::param_iterator I = FTy->param_begin(), | |||
22921 | E = FTy->param_end(); I != E; ++I, ++Idx) | |||
22922 | if (Attrs.hasAttribute(Idx, Attribute::InReg)) { | |||
22923 | auto &DL = DAG.getDataLayout(); | |||
22924 | // FIXME: should only count parameters that are lowered to integers. | |||
22925 | InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32; | |||
22926 | } | |||
22927 | ||||
22928 | if (InRegCount > 2) { | |||
22929 | report_fatal_error("Nest register in use - reduce number of inreg" | |||
22930 | " parameters!"); | |||
22931 | } | |||
22932 | } | |||
22933 | break; | |||
22934 | } | |||
22935 | case CallingConv::X86_FastCall: | |||
22936 | case CallingConv::X86_ThisCall: | |||
22937 | case CallingConv::Fast: | |||
22938 | // Pass 'nest' parameter in EAX. | |||
22939 | // Must be kept in sync with X86CallingConv.td | |||
22940 | NestReg = X86::EAX; | |||
22941 | break; | |||
22942 | } | |||
22943 | ||||
22944 | SDValue OutChains[4]; | |||
22945 | SDValue Addr, Disp; | |||
22946 | ||||
22947 | Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, | |||
22948 | DAG.getConstant(10, dl, MVT::i32)); | |||
22949 | Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr); | |||
22950 | ||||
22951 | // This is storing the opcode for MOV32ri. | |||
22952 | const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte. | |||
22953 | const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7; | |||
22954 | OutChains[0] = | |||
22955 | DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8), | |||
22956 | Trmp, MachinePointerInfo(TrmpAddr)); | |||
22957 | ||||
22958 | Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, | |||
22959 | DAG.getConstant(1, dl, MVT::i32)); | |||
22960 | OutChains[1] = | |||
22961 | DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1), | |||
22962 | /* Alignment = */ 1); | |||
22963 | ||||
22964 | const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode. | |||
22965 | Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, | |||
22966 | DAG.getConstant(5, dl, MVT::i32)); | |||
22967 | OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8), | |||
22968 | Addr, MachinePointerInfo(TrmpAddr, 5), | |||
22969 | /* Alignment = */ 1); | |||
22970 | ||||
22971 | Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp, | |||
22972 | DAG.getConstant(6, dl, MVT::i32)); | |||
22973 | OutChains[3] = | |||
22974 | DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6), | |||
22975 | /* Alignment = */ 1); | |||
22976 | ||||
22977 | return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains); | |||
22978 | } | |||
22979 | } | |||
22980 | ||||
22981 | SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op, | |||
22982 | SelectionDAG &DAG) const { | |||
22983 | /* | |||
22984 | The rounding mode is in bits 11:10 of FPSR, and has the following | |||
22985 | settings: | |||
22986 | 00 Round to nearest | |||
22987 | 01 Round to -inf | |||
22988 | 10 Round to +inf | |||
22989 | 11 Round to 0 | |||
22990 | ||||
22991 | FLT_ROUNDS, on the other hand, expects the following: | |||
22992 | -1 Undefined | |||
22993 | 0 Round to 0 | |||
22994 | 1 Round to nearest | |||
22995 | 2 Round to +inf | |||
22996 | 3 Round to -inf | |||
22997 | ||||
22998 | To perform the conversion, we do: | |||
22999 | (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3) | |||
23000 | */ | |||
23001 | ||||
23002 | MachineFunction &MF = DAG.getMachineFunction(); | |||
23003 | const TargetFrameLowering &TFI = *Subtarget.getFrameLowering(); | |||
23004 | unsigned StackAlignment = TFI.getStackAlignment(); | |||
23005 | MVT VT = Op.getSimpleValueType(); | |||
23006 | SDLoc DL(Op); | |||
23007 | ||||
23008 | // Save FP Control Word to stack slot | |||
23009 | int SSFI = MF.getFrameInfo().CreateStackObject(2, StackAlignment, false); | |||
23010 | SDValue StackSlot = | |||
23011 | DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout())); | |||
23012 | ||||
23013 | MachineMemOperand *MMO = | |||
23014 | MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, SSFI), | |||
23015 | MachineMemOperand::MOStore, 2, 2); | |||
23016 | ||||
23017 | SDValue Ops[] = { DAG.getEntryNode(), StackSlot }; | |||
23018 | SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL, | |||
23019 | DAG.getVTList(MVT::Other), | |||
23020 | Ops, MVT::i16, MMO); | |||
23021 | ||||
23022 | // Load FP Control Word from stack slot | |||
23023 | SDValue CWD = | |||
23024 | DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MachinePointerInfo()); | |||
23025 | ||||
23026 | // Transform as necessary | |||
23027 | SDValue CWD1 = | |||
23028 | DAG.getNode(ISD::SRL, DL, MVT::i16, | |||
23029 | DAG.getNode(ISD::AND, DL, MVT::i16, | |||
23030 | CWD, DAG.getConstant(0x800, DL, MVT::i16)), | |||
23031 | DAG.getConstant(11, DL, MVT::i8)); | |||
23032 | SDValue CWD2 = | |||
23033 | DAG.getNode(ISD::SRL, DL, MVT::i16, | |||
23034 | DAG.getNode(ISD::AND, DL, MVT::i16, | |||
23035 | CWD, DAG.getConstant(0x400, DL, MVT::i16)), | |||
23036 | DAG.getConstant(9, DL, MVT::i8)); | |||
23037 | ||||
23038 | SDValue RetVal = | |||
23039 | DAG.getNode(ISD::AND, DL, MVT::i16, | |||
23040 | DAG.getNode(ISD::ADD, DL, MVT::i16, | |||
23041 | DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2), | |||
23042 | DAG.getConstant(1, DL, MVT::i16)), | |||
23043 | DAG.getConstant(3, DL, MVT::i16)); | |||
23044 | ||||
23045 | return DAG.getNode((VT.getSizeInBits() < 16 ? | |||
23046 | ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal); | |||
23047 | } | |||
23048 | ||||
23049 | // Split an unary integer op into 2 half sized ops. | |||
23050 | static SDValue LowerVectorIntUnary(SDValue Op, SelectionDAG &DAG) { | |||
23051 | MVT VT = Op.getSimpleValueType(); | |||
23052 | unsigned NumElems = VT.getVectorNumElements(); | |||
23053 | unsigned SizeInBits = VT.getSizeInBits(); | |||
23054 | MVT EltVT = VT.getVectorElementType(); | |||
23055 | SDValue Src = Op.getOperand(0); | |||
23056 | assert(EltVT == Src.getSimpleValueType().getVectorElementType() &&((EltVT == Src.getSimpleValueType().getVectorElementType() && "Src and Op should have the same element type!") ? static_cast <void> (0) : __assert_fail ("EltVT == Src.getSimpleValueType().getVectorElementType() && \"Src and Op should have the same element type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23057, __PRETTY_FUNCTION__)) | |||
23057 | "Src and Op should have the same element type!")((EltVT == Src.getSimpleValueType().getVectorElementType() && "Src and Op should have the same element type!") ? static_cast <void> (0) : __assert_fail ("EltVT == Src.getSimpleValueType().getVectorElementType() && \"Src and Op should have the same element type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23057, __PRETTY_FUNCTION__)); | |||
23058 | ||||
23059 | // Extract the Lo/Hi vectors | |||
23060 | SDLoc dl(Op); | |||
23061 | SDValue Lo = extractSubVector(Src, 0, DAG, dl, SizeInBits / 2); | |||
23062 | SDValue Hi = extractSubVector(Src, NumElems / 2, DAG, dl, SizeInBits / 2); | |||
23063 | ||||
23064 | MVT NewVT = MVT::getVectorVT(EltVT, NumElems / 2); | |||
23065 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, | |||
23066 | DAG.getNode(Op.getOpcode(), dl, NewVT, Lo), | |||
23067 | DAG.getNode(Op.getOpcode(), dl, NewVT, Hi)); | |||
23068 | } | |||
23069 | ||||
23070 | // Decompose 256-bit ops into smaller 128-bit ops. | |||
23071 | static SDValue Lower256IntUnary(SDValue Op, SelectionDAG &DAG) { | |||
23072 | assert(Op.getSimpleValueType().is256BitVector() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23074, __PRETTY_FUNCTION__)) | |||
23073 | Op.getSimpleValueType().isInteger() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23074, __PRETTY_FUNCTION__)) | |||
23074 | "Only handle AVX 256-bit vector integer operation")((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23074, __PRETTY_FUNCTION__)); | |||
23075 | return LowerVectorIntUnary(Op, DAG); | |||
23076 | } | |||
23077 | ||||
23078 | // Decompose 512-bit ops into smaller 256-bit ops. | |||
23079 | static SDValue Lower512IntUnary(SDValue Op, SelectionDAG &DAG) { | |||
23080 | assert(Op.getSimpleValueType().is512BitVector() &&((Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 512-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 512-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23082, __PRETTY_FUNCTION__)) | |||
23081 | Op.getSimpleValueType().isInteger() &&((Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 512-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 512-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23082, __PRETTY_FUNCTION__)) | |||
23082 | "Only handle AVX 512-bit vector integer operation")((Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 512-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is512BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 512-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23082, __PRETTY_FUNCTION__)); | |||
23083 | return LowerVectorIntUnary(Op, DAG); | |||
23084 | } | |||
23085 | ||||
23086 | /// Lower a vector CTLZ using native supported vector CTLZ instruction. | |||
23087 | // | |||
23088 | // i8/i16 vector implemented using dword LZCNT vector instruction | |||
23089 | // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal, | |||
23090 | // split the vector, perform operation on it's Lo a Hi part and | |||
23091 | // concatenate the results. | |||
23092 | static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG, | |||
23093 | const X86Subtarget &Subtarget) { | |||
23094 | assert(Op.getOpcode() == ISD::CTLZ)((Op.getOpcode() == ISD::CTLZ) ? static_cast<void> (0) : __assert_fail ("Op.getOpcode() == ISD::CTLZ", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23094, __PRETTY_FUNCTION__)); | |||
23095 | SDLoc dl(Op); | |||
23096 | MVT VT = Op.getSimpleValueType(); | |||
23097 | MVT EltVT = VT.getVectorElementType(); | |||
23098 | unsigned NumElems = VT.getVectorNumElements(); | |||
23099 | ||||
23100 | assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&(((EltVT == MVT::i8 || EltVT == MVT::i16) && "Unsupported element type" ) ? static_cast<void> (0) : __assert_fail ("(EltVT == MVT::i8 || EltVT == MVT::i16) && \"Unsupported element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23101, __PRETTY_FUNCTION__)) | |||
23101 | "Unsupported element type")(((EltVT == MVT::i8 || EltVT == MVT::i16) && "Unsupported element type" ) ? static_cast<void> (0) : __assert_fail ("(EltVT == MVT::i8 || EltVT == MVT::i16) && \"Unsupported element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23101, __PRETTY_FUNCTION__)); | |||
23102 | ||||
23103 | // Split vector, it's Lo and Hi parts will be handled in next iteration. | |||
23104 | if (NumElems > 16 || | |||
23105 | (NumElems == 16 && !Subtarget.canExtendTo512DQ())) | |||
23106 | return LowerVectorIntUnary(Op, DAG); | |||
23107 | ||||
23108 | MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems); | |||
23109 | assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&(((NewVT.is256BitVector() || NewVT.is512BitVector()) && "Unsupported value type for operation") ? static_cast<void > (0) : __assert_fail ("(NewVT.is256BitVector() || NewVT.is512BitVector()) && \"Unsupported value type for operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23110, __PRETTY_FUNCTION__)) | |||
23110 | "Unsupported value type for operation")(((NewVT.is256BitVector() || NewVT.is512BitVector()) && "Unsupported value type for operation") ? static_cast<void > (0) : __assert_fail ("(NewVT.is256BitVector() || NewVT.is512BitVector()) && \"Unsupported value type for operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23110, __PRETTY_FUNCTION__)); | |||
23111 | ||||
23112 | // Use native supported vector instruction vplzcntd. | |||
23113 | Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0)); | |||
23114 | SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op); | |||
23115 | SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode); | |||
23116 | SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT); | |||
23117 | ||||
23118 | return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta); | |||
23119 | } | |||
23120 | ||||
23121 | // Lower CTLZ using a PSHUFB lookup table implementation. | |||
23122 | static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL, | |||
23123 | const X86Subtarget &Subtarget, | |||
23124 | SelectionDAG &DAG) { | |||
23125 | MVT VT = Op.getSimpleValueType(); | |||
23126 | int NumElts = VT.getVectorNumElements(); | |||
23127 | int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8); | |||
23128 | MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes); | |||
23129 | ||||
23130 | // Per-nibble leading zero PSHUFB lookup table. | |||
23131 | const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2, | |||
23132 | /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1, | |||
23133 | /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0, | |||
23134 | /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0}; | |||
23135 | ||||
23136 | SmallVector<SDValue, 64> LUTVec; | |||
23137 | for (int i = 0; i < NumBytes; ++i) | |||
23138 | LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8)); | |||
23139 | SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec); | |||
23140 | ||||
23141 | // Begin by bitcasting the input to byte vector, then split those bytes | |||
23142 | // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them. | |||
23143 | // If the hi input nibble is zero then we add both results together, otherwise | |||
23144 | // we just take the hi result (by masking the lo result to zero before the | |||
23145 | // add). | |||
23146 | SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0)); | |||
23147 | SDValue Zero = DAG.getConstant(0, DL, CurrVT); | |||
23148 | ||||
23149 | SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT); | |||
23150 | SDValue Lo = Op0; | |||
23151 | SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift); | |||
23152 | SDValue HiZ; | |||
23153 | if (CurrVT.is512BitVector()) { | |||
23154 | MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements()); | |||
23155 | HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ); | |||
23156 | HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ); | |||
23157 | } else { | |||
23158 | HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ); | |||
23159 | } | |||
23160 | ||||
23161 | Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo); | |||
23162 | Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi); | |||
23163 | Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ); | |||
23164 | SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi); | |||
23165 | ||||
23166 | // Merge result back from vXi8 back to VT, working on the lo/hi halves | |||
23167 | // of the current vector width in the same way we did for the nibbles. | |||
23168 | // If the upper half of the input element is zero then add the halves' | |||
23169 | // leading zero counts together, otherwise just use the upper half's. | |||
23170 | // Double the width of the result until we are at target width. | |||
23171 | while (CurrVT != VT) { | |||
23172 | int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits(); | |||
23173 | int CurrNumElts = CurrVT.getVectorNumElements(); | |||
23174 | MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2); | |||
23175 | MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2); | |||
23176 | SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT); | |||
23177 | ||||
23178 | // Check if the upper half of the input element is zero. | |||
23179 | if (CurrVT.is512BitVector()) { | |||
23180 | MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements()); | |||
23181 | HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0), | |||
23182 | DAG.getBitcast(CurrVT, Zero), ISD::SETEQ); | |||
23183 | HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ); | |||
23184 | } else { | |||
23185 | HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0), | |||
23186 | DAG.getBitcast(CurrVT, Zero), ISD::SETEQ); | |||
23187 | } | |||
23188 | HiZ = DAG.getBitcast(NextVT, HiZ); | |||
23189 | ||||
23190 | // Move the upper/lower halves to the lower bits as we'll be extending to | |||
23191 | // NextVT. Mask the lower result to zero if HiZ is true and add the results | |||
23192 | // together. | |||
23193 | SDValue ResNext = Res = DAG.getBitcast(NextVT, Res); | |||
23194 | SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift); | |||
23195 | SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift); | |||
23196 | R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1); | |||
23197 | Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1); | |||
23198 | CurrVT = NextVT; | |||
23199 | } | |||
23200 | ||||
23201 | return Res; | |||
23202 | } | |||
23203 | ||||
23204 | static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL, | |||
23205 | const X86Subtarget &Subtarget, | |||
23206 | SelectionDAG &DAG) { | |||
23207 | MVT VT = Op.getSimpleValueType(); | |||
23208 | ||||
23209 | if (Subtarget.hasCDI() && | |||
23210 | // vXi8 vectors need to be promoted to 512-bits for vXi32. | |||
23211 | (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8)) | |||
23212 | return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget); | |||
23213 | ||||
23214 | // Decompose 256-bit ops into smaller 128-bit ops. | |||
23215 | if (VT.is256BitVector() && !Subtarget.hasInt256()) | |||
23216 | return Lower256IntUnary(Op, DAG); | |||
23217 | ||||
23218 | // Decompose 512-bit ops into smaller 256-bit ops. | |||
23219 | if (VT.is512BitVector() && !Subtarget.hasBWI()) | |||
23220 | return Lower512IntUnary(Op, DAG); | |||
23221 | ||||
23222 | assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB")((Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSSE3() && \"Expected SSSE3 support for PSHUFB\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23222, __PRETTY_FUNCTION__)); | |||
23223 | return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG); | |||
23224 | } | |||
23225 | ||||
23226 | static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget, | |||
23227 | SelectionDAG &DAG) { | |||
23228 | MVT VT = Op.getSimpleValueType(); | |||
23229 | MVT OpVT = VT; | |||
23230 | unsigned NumBits = VT.getSizeInBits(); | |||
23231 | SDLoc dl(Op); | |||
23232 | unsigned Opc = Op.getOpcode(); | |||
23233 | ||||
23234 | if (VT.isVector()) | |||
23235 | return LowerVectorCTLZ(Op, dl, Subtarget, DAG); | |||
23236 | ||||
23237 | Op = Op.getOperand(0); | |||
23238 | if (VT == MVT::i8) { | |||
23239 | // Zero extend to i32 since there is not an i8 bsr. | |||
23240 | OpVT = MVT::i32; | |||
23241 | Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op); | |||
23242 | } | |||
23243 | ||||
23244 | // Issue a bsr (scan bits in reverse) which also sets EFLAGS. | |||
23245 | SDVTList VTs = DAG.getVTList(OpVT, MVT::i32); | |||
23246 | Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op); | |||
23247 | ||||
23248 | if (Opc == ISD::CTLZ) { | |||
23249 | // If src is zero (i.e. bsr sets ZF), returns NumBits. | |||
23250 | SDValue Ops[] = { | |||
23251 | Op, | |||
23252 | DAG.getConstant(NumBits + NumBits - 1, dl, OpVT), | |||
23253 | DAG.getConstant(X86::COND_E, dl, MVT::i8), | |||
23254 | Op.getValue(1) | |||
23255 | }; | |||
23256 | Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops); | |||
23257 | } | |||
23258 | ||||
23259 | // Finally xor with NumBits-1. | |||
23260 | Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, | |||
23261 | DAG.getConstant(NumBits - 1, dl, OpVT)); | |||
23262 | ||||
23263 | if (VT == MVT::i8) | |||
23264 | Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op); | |||
23265 | return Op; | |||
23266 | } | |||
23267 | ||||
23268 | static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget, | |||
23269 | SelectionDAG &DAG) { | |||
23270 | MVT VT = Op.getSimpleValueType(); | |||
23271 | unsigned NumBits = VT.getScalarSizeInBits(); | |||
23272 | SDValue N0 = Op.getOperand(0); | |||
23273 | SDLoc dl(Op); | |||
23274 | ||||
23275 | // Decompose 256-bit ops into smaller 128-bit ops. | |||
23276 | if (VT.is256BitVector() && !Subtarget.hasInt256()) | |||
23277 | return Lower256IntUnary(Op, DAG); | |||
23278 | ||||
23279 | assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&((!VT.isVector() && Op.getOpcode() == ISD::CTTZ && "Only scalar CTTZ requires custom lowering") ? static_cast< void> (0) : __assert_fail ("!VT.isVector() && Op.getOpcode() == ISD::CTTZ && \"Only scalar CTTZ requires custom lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23280, __PRETTY_FUNCTION__)) | |||
23280 | "Only scalar CTTZ requires custom lowering")((!VT.isVector() && Op.getOpcode() == ISD::CTTZ && "Only scalar CTTZ requires custom lowering") ? static_cast< void> (0) : __assert_fail ("!VT.isVector() && Op.getOpcode() == ISD::CTTZ && \"Only scalar CTTZ requires custom lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23280, __PRETTY_FUNCTION__)); | |||
23281 | ||||
23282 | // Issue a bsf (scan bits forward) which also sets EFLAGS. | |||
23283 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
23284 | Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0); | |||
23285 | ||||
23286 | // If src is zero (i.e. bsf sets ZF), returns NumBits. | |||
23287 | SDValue Ops[] = { | |||
23288 | Op, | |||
23289 | DAG.getConstant(NumBits, dl, VT), | |||
23290 | DAG.getConstant(X86::COND_E, dl, MVT::i8), | |||
23291 | Op.getValue(1) | |||
23292 | }; | |||
23293 | return DAG.getNode(X86ISD::CMOV, dl, VT, Ops); | |||
23294 | } | |||
23295 | ||||
23296 | /// Break a 256-bit integer operation into two new 128-bit ones and then | |||
23297 | /// concatenate the result back. | |||
23298 | static SDValue split256IntArith(SDValue Op, SelectionDAG &DAG) { | |||
23299 | MVT VT = Op.getSimpleValueType(); | |||
23300 | ||||
23301 | assert(VT.is256BitVector() && VT.isInteger() &&((VT.is256BitVector() && VT.isInteger() && "Unsupported value type for operation" ) ? static_cast<void> (0) : __assert_fail ("VT.is256BitVector() && VT.isInteger() && \"Unsupported value type for operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23302, __PRETTY_FUNCTION__)) | |||
23302 | "Unsupported value type for operation")((VT.is256BitVector() && VT.isInteger() && "Unsupported value type for operation" ) ? static_cast<void> (0) : __assert_fail ("VT.is256BitVector() && VT.isInteger() && \"Unsupported value type for operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23302, __PRETTY_FUNCTION__)); | |||
23303 | ||||
23304 | unsigned NumElems = VT.getVectorNumElements(); | |||
23305 | SDLoc dl(Op); | |||
23306 | ||||
23307 | // Extract the LHS vectors | |||
23308 | SDValue LHS = Op.getOperand(0); | |||
23309 | SDValue LHS1 = extract128BitVector(LHS, 0, DAG, dl); | |||
23310 | SDValue LHS2 = extract128BitVector(LHS, NumElems / 2, DAG, dl); | |||
23311 | ||||
23312 | // Extract the RHS vectors | |||
23313 | SDValue RHS = Op.getOperand(1); | |||
23314 | SDValue RHS1 = extract128BitVector(RHS, 0, DAG, dl); | |||
23315 | SDValue RHS2 = extract128BitVector(RHS, NumElems / 2, DAG, dl); | |||
23316 | ||||
23317 | MVT EltVT = VT.getVectorElementType(); | |||
23318 | MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); | |||
23319 | ||||
23320 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, | |||
23321 | DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), | |||
23322 | DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); | |||
23323 | } | |||
23324 | ||||
23325 | /// Break a 512-bit integer operation into two new 256-bit ones and then | |||
23326 | /// concatenate the result back. | |||
23327 | static SDValue split512IntArith(SDValue Op, SelectionDAG &DAG) { | |||
23328 | MVT VT = Op.getSimpleValueType(); | |||
23329 | ||||
23330 | assert(VT.is512BitVector() && VT.isInteger() &&((VT.is512BitVector() && VT.isInteger() && "Unsupported value type for operation" ) ? static_cast<void> (0) : __assert_fail ("VT.is512BitVector() && VT.isInteger() && \"Unsupported value type for operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23331, __PRETTY_FUNCTION__)) | |||
23331 | "Unsupported value type for operation")((VT.is512BitVector() && VT.isInteger() && "Unsupported value type for operation" ) ? static_cast<void> (0) : __assert_fail ("VT.is512BitVector() && VT.isInteger() && \"Unsupported value type for operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23331, __PRETTY_FUNCTION__)); | |||
23332 | ||||
23333 | unsigned NumElems = VT.getVectorNumElements(); | |||
23334 | SDLoc dl(Op); | |||
23335 | ||||
23336 | // Extract the LHS vectors | |||
23337 | SDValue LHS = Op.getOperand(0); | |||
23338 | SDValue LHS1 = extract256BitVector(LHS, 0, DAG, dl); | |||
23339 | SDValue LHS2 = extract256BitVector(LHS, NumElems / 2, DAG, dl); | |||
23340 | ||||
23341 | // Extract the RHS vectors | |||
23342 | SDValue RHS = Op.getOperand(1); | |||
23343 | SDValue RHS1 = extract256BitVector(RHS, 0, DAG, dl); | |||
23344 | SDValue RHS2 = extract256BitVector(RHS, NumElems / 2, DAG, dl); | |||
23345 | ||||
23346 | MVT EltVT = VT.getVectorElementType(); | |||
23347 | MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2); | |||
23348 | ||||
23349 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, | |||
23350 | DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1), | |||
23351 | DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2)); | |||
23352 | } | |||
23353 | ||||
23354 | static SDValue LowerADD_SUB(SDValue Op, SelectionDAG &DAG) { | |||
23355 | MVT VT = Op.getSimpleValueType(); | |||
23356 | if (VT.getScalarType() == MVT::i1) | |||
23357 | return DAG.getNode(ISD::XOR, SDLoc(Op), VT, | |||
23358 | Op.getOperand(0), Op.getOperand(1)); | |||
23359 | assert(Op.getSimpleValueType().is256BitVector() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23361, __PRETTY_FUNCTION__)) | |||
23360 | Op.getSimpleValueType().isInteger() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23361, __PRETTY_FUNCTION__)) | |||
23361 | "Only handle AVX 256-bit vector integer operation")((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23361, __PRETTY_FUNCTION__)); | |||
23362 | return split256IntArith(Op, DAG); | |||
23363 | } | |||
23364 | ||||
23365 | static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG) { | |||
23366 | MVT VT = Op.getSimpleValueType(); | |||
23367 | if (VT.getScalarType() == MVT::i1) { | |||
23368 | SDLoc dl(Op); | |||
23369 | switch (Op.getOpcode()) { | |||
23370 | default: llvm_unreachable("Expected saturated arithmetic opcode")::llvm::llvm_unreachable_internal("Expected saturated arithmetic opcode" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23370); | |||
23371 | case ISD::UADDSAT: | |||
23372 | case ISD::SADDSAT: | |||
23373 | return DAG.getNode(ISD::OR, dl, VT, Op.getOperand(0), Op.getOperand(1)); | |||
23374 | case ISD::USUBSAT: | |||
23375 | case ISD::SSUBSAT: | |||
23376 | return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), | |||
23377 | DAG.getNOT(dl, Op.getOperand(1), VT)); | |||
23378 | } | |||
23379 | } | |||
23380 | ||||
23381 | assert(Op.getSimpleValueType().is256BitVector() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23383, __PRETTY_FUNCTION__)) | |||
23382 | Op.getSimpleValueType().isInteger() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23383, __PRETTY_FUNCTION__)) | |||
23383 | "Only handle AVX 256-bit vector integer operation")((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23383, __PRETTY_FUNCTION__)); | |||
23384 | return split256IntArith(Op, DAG); | |||
23385 | } | |||
23386 | ||||
23387 | static SDValue LowerABS(SDValue Op, SelectionDAG &DAG) { | |||
23388 | MVT VT = Op.getSimpleValueType(); | |||
23389 | if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) { | |||
23390 | // Since X86 does not have CMOV for 8-bit integer, we don't convert | |||
23391 | // 8-bit integer abs to NEG and CMOV. | |||
23392 | SDLoc DL(Op); | |||
23393 | SDValue N0 = Op.getOperand(0); | |||
23394 | SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32), | |||
23395 | DAG.getConstant(0, DL, VT), N0); | |||
23396 | SDValue Ops[] = {N0, Neg, DAG.getConstant(X86::COND_GE, DL, MVT::i8), | |||
23397 | SDValue(Neg.getNode(), 1)}; | |||
23398 | return DAG.getNode(X86ISD::CMOV, DL, VT, Ops); | |||
23399 | } | |||
23400 | ||||
23401 | assert(Op.getSimpleValueType().is256BitVector() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23403, __PRETTY_FUNCTION__)) | |||
23402 | Op.getSimpleValueType().isInteger() &&((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23403, __PRETTY_FUNCTION__)) | |||
23403 | "Only handle AVX 256-bit vector integer operation")((Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType ().isInteger() && "Only handle AVX 256-bit vector integer operation" ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().is256BitVector() && Op.getSimpleValueType().isInteger() && \"Only handle AVX 256-bit vector integer operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23403, __PRETTY_FUNCTION__)); | |||
23404 | return Lower256IntUnary(Op, DAG); | |||
23405 | } | |||
23406 | ||||
23407 | static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) { | |||
23408 | MVT VT = Op.getSimpleValueType(); | |||
23409 | ||||
23410 | // For AVX1 cases, split to use legal ops (everything but v4i64). | |||
23411 | if (VT.getScalarType() != MVT::i64 && VT.is256BitVector()) | |||
23412 | return split256IntArith(Op, DAG); | |||
23413 | ||||
23414 | SDLoc DL(Op); | |||
23415 | unsigned Opcode = Op.getOpcode(); | |||
23416 | SDValue N0 = Op.getOperand(0); | |||
23417 | SDValue N1 = Op.getOperand(1); | |||
23418 | ||||
23419 | // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit, | |||
23420 | // using the SMIN/SMAX instructions and flipping the signbit back. | |||
23421 | if (VT == MVT::v8i16) { | |||
23422 | assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&(((Opcode == ISD::UMIN || Opcode == ISD::UMAX) && "Unexpected MIN/MAX opcode" ) ? static_cast<void> (0) : __assert_fail ("(Opcode == ISD::UMIN || Opcode == ISD::UMAX) && \"Unexpected MIN/MAX opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23423, __PRETTY_FUNCTION__)) | |||
23423 | "Unexpected MIN/MAX opcode")(((Opcode == ISD::UMIN || Opcode == ISD::UMAX) && "Unexpected MIN/MAX opcode" ) ? static_cast<void> (0) : __assert_fail ("(Opcode == ISD::UMIN || Opcode == ISD::UMAX) && \"Unexpected MIN/MAX opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23423, __PRETTY_FUNCTION__)); | |||
23424 | SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT); | |||
23425 | N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign); | |||
23426 | N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign); | |||
23427 | Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX); | |||
23428 | SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1); | |||
23429 | return DAG.getNode(ISD::XOR, DL, VT, Result, Sign); | |||
23430 | } | |||
23431 | ||||
23432 | // Else, expand to a compare/select. | |||
23433 | ISD::CondCode CC; | |||
23434 | switch (Opcode) { | |||
23435 | case ISD::SMIN: CC = ISD::CondCode::SETLT; break; | |||
23436 | case ISD::SMAX: CC = ISD::CondCode::SETGT; break; | |||
23437 | case ISD::UMIN: CC = ISD::CondCode::SETULT; break; | |||
23438 | case ISD::UMAX: CC = ISD::CondCode::SETUGT; break; | |||
23439 | default: llvm_unreachable("Unknown MINMAX opcode")::llvm::llvm_unreachable_internal("Unknown MINMAX opcode", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23439); | |||
23440 | } | |||
23441 | ||||
23442 | SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC); | |||
23443 | return DAG.getSelect(DL, VT, Cond, N0, N1); | |||
23444 | } | |||
23445 | ||||
23446 | static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget, | |||
23447 | SelectionDAG &DAG) { | |||
23448 | SDLoc dl(Op); | |||
23449 | MVT VT = Op.getSimpleValueType(); | |||
23450 | ||||
23451 | if (VT.getScalarType() == MVT::i1) | |||
23452 | return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1)); | |||
23453 | ||||
23454 | // Decompose 256-bit ops into 128-bit ops. | |||
23455 | if (VT.is256BitVector() && !Subtarget.hasInt256()) | |||
23456 | return split256IntArith(Op, DAG); | |||
23457 | ||||
23458 | SDValue A = Op.getOperand(0); | |||
23459 | SDValue B = Op.getOperand(1); | |||
23460 | ||||
23461 | // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16 | |||
23462 | // vector pairs, multiply and truncate. | |||
23463 | if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) { | |||
23464 | unsigned NumElts = VT.getVectorNumElements(); | |||
23465 | ||||
23466 | if ((VT == MVT::v16i8 && Subtarget.hasInt256()) || | |||
23467 | (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) { | |||
23468 | MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements()); | |||
23469 | return DAG.getNode( | |||
23470 | ISD::TRUNCATE, dl, VT, | |||
23471 | DAG.getNode(ISD::MUL, dl, ExVT, | |||
23472 | DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A), | |||
23473 | DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B))); | |||
23474 | } | |||
23475 | ||||
23476 | MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2); | |||
23477 | ||||
23478 | // Extract the lo/hi parts to any extend to i16. | |||
23479 | // We're going to mask off the low byte of each result element of the | |||
23480 | // pmullw, so it doesn't matter what's in the high byte of each 16-bit | |||
23481 | // element. | |||
23482 | SDValue Undef = DAG.getUNDEF(VT); | |||
23483 | SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef)); | |||
23484 | SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef)); | |||
23485 | ||||
23486 | SDValue BLo, BHi; | |||
23487 | if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) { | |||
23488 | // If the LHS is a constant, manually unpackl/unpackh. | |||
23489 | SmallVector<SDValue, 16> LoOps, HiOps; | |||
23490 | for (unsigned i = 0; i != NumElts; i += 16) { | |||
23491 | for (unsigned j = 0; j != 8; ++j) { | |||
23492 | LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl, | |||
23493 | MVT::i16)); | |||
23494 | HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl, | |||
23495 | MVT::i16)); | |||
23496 | } | |||
23497 | } | |||
23498 | ||||
23499 | BLo = DAG.getBuildVector(ExVT, dl, LoOps); | |||
23500 | BHi = DAG.getBuildVector(ExVT, dl, HiOps); | |||
23501 | } else { | |||
23502 | BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef)); | |||
23503 | BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef)); | |||
23504 | } | |||
23505 | ||||
23506 | // Multiply, mask the lower 8bits of the lo/hi results and pack. | |||
23507 | SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo); | |||
23508 | SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi); | |||
23509 | RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT)); | |||
23510 | RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT)); | |||
23511 | return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi); | |||
23512 | } | |||
23513 | ||||
23514 | // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle. | |||
23515 | if (VT == MVT::v4i32) { | |||
23516 | assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&((Subtarget.hasSSE2() && !Subtarget.hasSSE41() && "Should not custom lower when pmulld is available!") ? static_cast <void> (0) : __assert_fail ("Subtarget.hasSSE2() && !Subtarget.hasSSE41() && \"Should not custom lower when pmulld is available!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23517, __PRETTY_FUNCTION__)) | |||
23517 | "Should not custom lower when pmulld is available!")((Subtarget.hasSSE2() && !Subtarget.hasSSE41() && "Should not custom lower when pmulld is available!") ? static_cast <void> (0) : __assert_fail ("Subtarget.hasSSE2() && !Subtarget.hasSSE41() && \"Should not custom lower when pmulld is available!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23517, __PRETTY_FUNCTION__)); | |||
23518 | ||||
23519 | // Extract the odd parts. | |||
23520 | static const int UnpackMask[] = { 1, -1, 3, -1 }; | |||
23521 | SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask); | |||
23522 | SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask); | |||
23523 | ||||
23524 | // Multiply the even parts. | |||
23525 | SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, | |||
23526 | DAG.getBitcast(MVT::v2i64, A), | |||
23527 | DAG.getBitcast(MVT::v2i64, B)); | |||
23528 | // Now multiply odd parts. | |||
23529 | SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, | |||
23530 | DAG.getBitcast(MVT::v2i64, Aodds), | |||
23531 | DAG.getBitcast(MVT::v2i64, Bodds)); | |||
23532 | ||||
23533 | Evens = DAG.getBitcast(VT, Evens); | |||
23534 | Odds = DAG.getBitcast(VT, Odds); | |||
23535 | ||||
23536 | // Merge the two vectors back together with a shuffle. This expands into 2 | |||
23537 | // shuffles. | |||
23538 | static const int ShufMask[] = { 0, 4, 2, 6 }; | |||
23539 | return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask); | |||
23540 | } | |||
23541 | ||||
23542 | assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&(((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) && "Only know how to lower V2I64/V4I64/V8I64 multiply") ? static_cast <void> (0) : __assert_fail ("(VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) && \"Only know how to lower V2I64/V4I64/V8I64 multiply\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23543, __PRETTY_FUNCTION__)) | |||
23543 | "Only know how to lower V2I64/V4I64/V8I64 multiply")(((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) && "Only know how to lower V2I64/V4I64/V8I64 multiply") ? static_cast <void> (0) : __assert_fail ("(VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) && \"Only know how to lower V2I64/V4I64/V8I64 multiply\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23543, __PRETTY_FUNCTION__)); | |||
23544 | assert(!Subtarget.hasDQI() && "DQI should use MULLQ")((!Subtarget.hasDQI() && "DQI should use MULLQ") ? static_cast <void> (0) : __assert_fail ("!Subtarget.hasDQI() && \"DQI should use MULLQ\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23544, __PRETTY_FUNCTION__)); | |||
23545 | ||||
23546 | // Ahi = psrlqi(a, 32); | |||
23547 | // Bhi = psrlqi(b, 32); | |||
23548 | // | |||
23549 | // AloBlo = pmuludq(a, b); | |||
23550 | // AloBhi = pmuludq(a, Bhi); | |||
23551 | // AhiBlo = pmuludq(Ahi, b); | |||
23552 | // | |||
23553 | // Hi = psllqi(AloBhi + AhiBlo, 32); | |||
23554 | // return AloBlo + Hi; | |||
23555 | KnownBits AKnown = DAG.computeKnownBits(A); | |||
23556 | KnownBits BKnown = DAG.computeKnownBits(B); | |||
23557 | ||||
23558 | APInt LowerBitsMask = APInt::getLowBitsSet(64, 32); | |||
23559 | bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero); | |||
23560 | bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero); | |||
23561 | ||||
23562 | APInt UpperBitsMask = APInt::getHighBitsSet(64, 32); | |||
23563 | bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero); | |||
23564 | bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero); | |||
23565 | ||||
23566 | SDValue Zero = DAG.getConstant(0, dl, VT); | |||
23567 | ||||
23568 | // Only multiply lo/hi halves that aren't known to be zero. | |||
23569 | SDValue AloBlo = Zero; | |||
23570 | if (!ALoIsZero && !BLoIsZero) | |||
23571 | AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B); | |||
23572 | ||||
23573 | SDValue AloBhi = Zero; | |||
23574 | if (!ALoIsZero && !BHiIsZero) { | |||
23575 | SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG); | |||
23576 | AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi); | |||
23577 | } | |||
23578 | ||||
23579 | SDValue AhiBlo = Zero; | |||
23580 | if (!AHiIsZero && !BLoIsZero) { | |||
23581 | SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG); | |||
23582 | AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B); | |||
23583 | } | |||
23584 | ||||
23585 | SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo); | |||
23586 | Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG); | |||
23587 | ||||
23588 | return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi); | |||
23589 | } | |||
23590 | ||||
23591 | static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget, | |||
23592 | SelectionDAG &DAG) { | |||
23593 | SDLoc dl(Op); | |||
23594 | MVT VT = Op.getSimpleValueType(); | |||
23595 | bool IsSigned = Op->getOpcode() == ISD::MULHS; | |||
23596 | unsigned NumElts = VT.getVectorNumElements(); | |||
23597 | SDValue A = Op.getOperand(0); | |||
23598 | SDValue B = Op.getOperand(1); | |||
23599 | ||||
23600 | // Decompose 256-bit ops into 128-bit ops. | |||
23601 | if (VT.is256BitVector() && !Subtarget.hasInt256()) | |||
23602 | return split256IntArith(Op, DAG); | |||
23603 | ||||
23604 | if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) { | |||
23605 | assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||(((VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT == MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT:: v16i32 && Subtarget.hasAVX512())) ? static_cast<void > (0) : __assert_fail ("(VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT == MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT::v16i32 && Subtarget.hasAVX512())" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23607, __PRETTY_FUNCTION__)) | |||
23606 | (VT == MVT::v8i32 && Subtarget.hasInt256()) ||(((VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT == MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT:: v16i32 && Subtarget.hasAVX512())) ? static_cast<void > (0) : __assert_fail ("(VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT == MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT::v16i32 && Subtarget.hasAVX512())" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23607, __PRETTY_FUNCTION__)) | |||
23607 | (VT == MVT::v16i32 && Subtarget.hasAVX512()))(((VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT == MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT:: v16i32 && Subtarget.hasAVX512())) ? static_cast<void > (0) : __assert_fail ("(VT == MVT::v4i32 && Subtarget.hasSSE2()) || (VT == MVT::v8i32 && Subtarget.hasInt256()) || (VT == MVT::v16i32 && Subtarget.hasAVX512())" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23607, __PRETTY_FUNCTION__)); | |||
23608 | ||||
23609 | // PMULxD operations multiply each even value (starting at 0) of LHS with | |||
23610 | // the related value of RHS and produce a widen result. | |||
23611 | // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h> | |||
23612 | // => <2 x i64> <ae|cg> | |||
23613 | // | |||
23614 | // In other word, to have all the results, we need to perform two PMULxD: | |||
23615 | // 1. one with the even values. | |||
23616 | // 2. one with the odd values. | |||
23617 | // To achieve #2, with need to place the odd values at an even position. | |||
23618 | // | |||
23619 | // Place the odd value at an even position (basically, shift all values 1 | |||
23620 | // step to the left): | |||
23621 | const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1, | |||
23622 | 9, -1, 11, -1, 13, -1, 15, -1}; | |||
23623 | // <a|b|c|d> => <b|undef|d|undef> | |||
23624 | SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A, | |||
23625 | makeArrayRef(&Mask[0], NumElts)); | |||
23626 | // <e|f|g|h> => <f|undef|h|undef> | |||
23627 | SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B, | |||
23628 | makeArrayRef(&Mask[0], NumElts)); | |||
23629 | ||||
23630 | // Emit two multiplies, one for the lower 2 ints and one for the higher 2 | |||
23631 | // ints. | |||
23632 | MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2); | |||
23633 | unsigned Opcode = | |||
23634 | (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ; | |||
23635 | // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h> | |||
23636 | // => <2 x i64> <ae|cg> | |||
23637 | SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, | |||
23638 | DAG.getBitcast(MulVT, A), | |||
23639 | DAG.getBitcast(MulVT, B))); | |||
23640 | // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef> | |||
23641 | // => <2 x i64> <bf|dh> | |||
23642 | SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT, | |||
23643 | DAG.getBitcast(MulVT, Odd0), | |||
23644 | DAG.getBitcast(MulVT, Odd1))); | |||
23645 | ||||
23646 | // Shuffle it back into the right order. | |||
23647 | SmallVector<int, 16> ShufMask(NumElts); | |||
23648 | for (int i = 0; i != (int)NumElts; ++i) | |||
23649 | ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1; | |||
23650 | ||||
23651 | SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask); | |||
23652 | ||||
23653 | // If we have a signed multiply but no PMULDQ fix up the result of an | |||
23654 | // unsigned multiply. | |||
23655 | if (IsSigned && !Subtarget.hasSSE41()) { | |||
23656 | SDValue Zero = DAG.getConstant(0, dl, VT); | |||
23657 | SDValue T1 = DAG.getNode(ISD::AND, dl, VT, | |||
23658 | DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B); | |||
23659 | SDValue T2 = DAG.getNode(ISD::AND, dl, VT, | |||
23660 | DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A); | |||
23661 | ||||
23662 | SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2); | |||
23663 | Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup); | |||
23664 | } | |||
23665 | ||||
23666 | return Res; | |||
23667 | } | |||
23668 | ||||
23669 | // Only i8 vectors should need custom lowering after this. | |||
23670 | assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||(((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget .hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI ())) && "Unsupported vector type") ? static_cast<void > (0) : __assert_fail ("(VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI())) && \"Unsupported vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23672, __PRETTY_FUNCTION__)) | |||
23671 | (VT == MVT::v64i8 && Subtarget.hasBWI())) &&(((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget .hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI ())) && "Unsupported vector type") ? static_cast<void > (0) : __assert_fail ("(VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI())) && \"Unsupported vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23672, __PRETTY_FUNCTION__)) | |||
23672 | "Unsupported vector type")(((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget .hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI ())) && "Unsupported vector type") ? static_cast<void > (0) : __assert_fail ("(VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) || (VT == MVT::v64i8 && Subtarget.hasBWI())) && \"Unsupported vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23672, __PRETTY_FUNCTION__)); | |||
23673 | ||||
23674 | // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply, | |||
23675 | // logical shift down the upper half and pack back to i8. | |||
23676 | ||||
23677 | // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack | |||
23678 | // and then ashr/lshr the upper bits down to the lower bits before multiply. | |||
23679 | unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | |||
23680 | ||||
23681 | if ((VT == MVT::v16i8 && Subtarget.hasInt256()) || | |||
23682 | (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) { | |||
23683 | MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts); | |||
23684 | SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A); | |||
23685 | SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B); | |||
23686 | SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB); | |||
23687 | Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG); | |||
23688 | return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); | |||
23689 | } | |||
23690 | ||||
23691 | // For signed 512-bit vectors, split into 256-bit vectors to allow the | |||
23692 | // sign-extension to occur. | |||
23693 | if (VT == MVT::v64i8 && IsSigned) | |||
23694 | return split512IntArith(Op, DAG); | |||
23695 | ||||
23696 | // Signed AVX2 implementation - extend xmm subvectors to ymm. | |||
23697 | if (VT == MVT::v32i8 && IsSigned) { | |||
23698 | SDValue Lo = DAG.getIntPtrConstant(0, dl); | |||
23699 | SDValue Hi = DAG.getIntPtrConstant(NumElts / 2, dl); | |||
23700 | ||||
23701 | MVT ExVT = MVT::v16i16; | |||
23702 | SDValue ALo = extract128BitVector(A, 0, DAG, dl); | |||
23703 | SDValue BLo = extract128BitVector(B, 0, DAG, dl); | |||
23704 | SDValue AHi = extract128BitVector(A, NumElts / 2, DAG, dl); | |||
23705 | SDValue BHi = extract128BitVector(B, NumElts / 2, DAG, dl); | |||
23706 | ALo = DAG.getNode(ExAVX, dl, ExVT, ALo); | |||
23707 | BLo = DAG.getNode(ExAVX, dl, ExVT, BLo); | |||
23708 | AHi = DAG.getNode(ExAVX, dl, ExVT, AHi); | |||
23709 | BHi = DAG.getNode(ExAVX, dl, ExVT, BHi); | |||
23710 | Lo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo); | |||
23711 | Hi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi); | |||
23712 | Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Lo, 8, DAG); | |||
23713 | Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Hi, 8, DAG); | |||
23714 | ||||
23715 | // Bitcast back to VT and then pack all the even elements from Lo and Hi. | |||
23716 | // Shuffle lowering should turn this into PACKUS+PERMQ | |||
23717 | Lo = DAG.getBitcast(VT, Lo); | |||
23718 | Hi = DAG.getBitcast(VT, Hi); | |||
23719 | return DAG.getVectorShuffle(VT, dl, Lo, Hi, | |||
23720 | { 0, 2, 4, 6, 8, 10, 12, 14, | |||
23721 | 16, 18, 20, 22, 24, 26, 28, 30, | |||
23722 | 32, 34, 36, 38, 40, 42, 44, 46, | |||
23723 | 48, 50, 52, 54, 56, 58, 60, 62}); | |||
23724 | } | |||
23725 | ||||
23726 | // For signed v16i8 and all unsigned vXi8 we will unpack the low and high | |||
23727 | // half of each 128 bit lane to widen to a vXi16 type. Do the multiplies, | |||
23728 | // shift the results and pack the half lane results back together. | |||
23729 | ||||
23730 | MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2); | |||
23731 | ||||
23732 | static const int PSHUFDMask[] = { 8, 9, 10, 11, 12, 13, 14, 15, | |||
23733 | -1, -1, -1, -1, -1, -1, -1, -1}; | |||
23734 | ||||
23735 | // Extract the lo parts and zero/sign extend to i16. | |||
23736 | // Only use SSE4.1 instructions for signed v16i8 where using unpack requires | |||
23737 | // shifts to sign extend. Using unpack for unsigned only requires an xor to | |||
23738 | // create zeros and a copy due to tied registers contraints pre-avx. But using | |||
23739 | // zero_extend_vector_inreg would require an additional pshufd for the high | |||
23740 | // part. | |||
23741 | ||||
23742 | SDValue ALo, AHi; | |||
23743 | if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) { | |||
23744 | ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A); | |||
23745 | ||||
23746 | AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask); | |||
23747 | AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi); | |||
23748 | } else if (IsSigned) { | |||
23749 | ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A)); | |||
23750 | AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A)); | |||
23751 | ||||
23752 | ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG); | |||
23753 | AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG); | |||
23754 | } else { | |||
23755 | ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, | |||
23756 | DAG.getConstant(0, dl, VT))); | |||
23757 | AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, | |||
23758 | DAG.getConstant(0, dl, VT))); | |||
23759 | } | |||
23760 | ||||
23761 | SDValue BLo, BHi; | |||
23762 | if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) { | |||
23763 | // If the LHS is a constant, manually unpackl/unpackh and extend. | |||
23764 | SmallVector<SDValue, 16> LoOps, HiOps; | |||
23765 | for (unsigned i = 0; i != NumElts; i += 16) { | |||
23766 | for (unsigned j = 0; j != 8; ++j) { | |||
23767 | SDValue LoOp = B.getOperand(i + j); | |||
23768 | SDValue HiOp = B.getOperand(i + j + 8); | |||
23769 | ||||
23770 | if (IsSigned) { | |||
23771 | LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16); | |||
23772 | HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16); | |||
23773 | } else { | |||
23774 | LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16); | |||
23775 | HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16); | |||
23776 | } | |||
23777 | ||||
23778 | LoOps.push_back(LoOp); | |||
23779 | HiOps.push_back(HiOp); | |||
23780 | } | |||
23781 | } | |||
23782 | ||||
23783 | BLo = DAG.getBuildVector(ExVT, dl, LoOps); | |||
23784 | BHi = DAG.getBuildVector(ExVT, dl, HiOps); | |||
23785 | } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) { | |||
23786 | BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B); | |||
23787 | ||||
23788 | BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask); | |||
23789 | BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi); | |||
23790 | } else if (IsSigned) { | |||
23791 | BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B)); | |||
23792 | BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B)); | |||
23793 | ||||
23794 | BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG); | |||
23795 | BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG); | |||
23796 | } else { | |||
23797 | BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, | |||
23798 | DAG.getConstant(0, dl, VT))); | |||
23799 | BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, | |||
23800 | DAG.getConstant(0, dl, VT))); | |||
23801 | } | |||
23802 | ||||
23803 | // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and | |||
23804 | // pack back to vXi8. | |||
23805 | SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo); | |||
23806 | SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi); | |||
23807 | RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG); | |||
23808 | RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG); | |||
23809 | ||||
23810 | // Bitcast back to VT and then pack all the even elements from Lo and Hi. | |||
23811 | return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi); | |||
23812 | } | |||
23813 | ||||
23814 | SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const { | |||
23815 | assert(Subtarget.isTargetWin64() && "Unexpected target")((Subtarget.isTargetWin64() && "Unexpected target") ? static_cast<void> (0) : __assert_fail ("Subtarget.isTargetWin64() && \"Unexpected target\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23815, __PRETTY_FUNCTION__)); | |||
23816 | EVT VT = Op.getValueType(); | |||
23817 | assert(VT.isInteger() && VT.getSizeInBits() == 128 &&((VT.isInteger() && VT.getSizeInBits() == 128 && "Unexpected return type for lowering") ? static_cast<void > (0) : __assert_fail ("VT.isInteger() && VT.getSizeInBits() == 128 && \"Unexpected return type for lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23818, __PRETTY_FUNCTION__)) | |||
23818 | "Unexpected return type for lowering")((VT.isInteger() && VT.getSizeInBits() == 128 && "Unexpected return type for lowering") ? static_cast<void > (0) : __assert_fail ("VT.isInteger() && VT.getSizeInBits() == 128 && \"Unexpected return type for lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23818, __PRETTY_FUNCTION__)); | |||
23819 | ||||
23820 | RTLIB::Libcall LC; | |||
23821 | bool isSigned; | |||
23822 | switch (Op->getOpcode()) { | |||
23823 | default: llvm_unreachable("Unexpected request for libcall!")::llvm::llvm_unreachable_internal("Unexpected request for libcall!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23823); | |||
23824 | case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break; | |||
23825 | case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break; | |||
23826 | case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break; | |||
23827 | case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break; | |||
23828 | case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break; | |||
23829 | case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break; | |||
23830 | } | |||
23831 | ||||
23832 | SDLoc dl(Op); | |||
23833 | SDValue InChain = DAG.getEntryNode(); | |||
23834 | ||||
23835 | TargetLowering::ArgListTy Args; | |||
23836 | TargetLowering::ArgListEntry Entry; | |||
23837 | for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) { | |||
23838 | EVT ArgVT = Op->getOperand(i).getValueType(); | |||
23839 | assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&((ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 && "Unexpected argument type for lowering") ? static_cast<void > (0) : __assert_fail ("ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 && \"Unexpected argument type for lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23840, __PRETTY_FUNCTION__)) | |||
23840 | "Unexpected argument type for lowering")((ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 && "Unexpected argument type for lowering") ? static_cast<void > (0) : __assert_fail ("ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 && \"Unexpected argument type for lowering\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23840, __PRETTY_FUNCTION__)); | |||
23841 | SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16); | |||
23842 | Entry.Node = StackPtr; | |||
23843 | InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, | |||
23844 | MachinePointerInfo(), /* Alignment = */ 16); | |||
23845 | Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); | |||
23846 | Entry.Ty = PointerType::get(ArgTy,0); | |||
23847 | Entry.IsSExt = false; | |||
23848 | Entry.IsZExt = false; | |||
23849 | Args.push_back(Entry); | |||
23850 | } | |||
23851 | ||||
23852 | SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), | |||
23853 | getPointerTy(DAG.getDataLayout())); | |||
23854 | ||||
23855 | TargetLowering::CallLoweringInfo CLI(DAG); | |||
23856 | CLI.setDebugLoc(dl) | |||
23857 | .setChain(InChain) | |||
23858 | .setLibCallee( | |||
23859 | getLibcallCallingConv(LC), | |||
23860 | static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee, | |||
23861 | std::move(Args)) | |||
23862 | .setInRegister() | |||
23863 | .setSExtResult(isSigned) | |||
23864 | .setZExtResult(!isSigned); | |||
23865 | ||||
23866 | std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI); | |||
23867 | return DAG.getBitcast(VT, CallInfo.first); | |||
23868 | } | |||
23869 | ||||
23870 | // Return true if the required (according to Opcode) shift-imm form is natively | |||
23871 | // supported by the Subtarget | |||
23872 | static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget, | |||
23873 | unsigned Opcode) { | |||
23874 | if (VT.getScalarSizeInBits() < 16) | |||
23875 | return false; | |||
23876 | ||||
23877 | if (VT.is512BitVector() && Subtarget.hasAVX512() && | |||
23878 | (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI())) | |||
23879 | return true; | |||
23880 | ||||
23881 | bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) || | |||
23882 | (VT.is256BitVector() && Subtarget.hasInt256()); | |||
23883 | ||||
23884 | bool AShift = LShift && (Subtarget.hasAVX512() || | |||
23885 | (VT != MVT::v2i64 && VT != MVT::v4i64)); | |||
23886 | return (Opcode == ISD::SRA) ? AShift : LShift; | |||
23887 | } | |||
23888 | ||||
23889 | // The shift amount is a variable, but it is the same for all vector lanes. | |||
23890 | // These instructions are defined together with shift-immediate. | |||
23891 | static | |||
23892 | bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget, | |||
23893 | unsigned Opcode) { | |||
23894 | return SupportedVectorShiftWithImm(VT, Subtarget, Opcode); | |||
23895 | } | |||
23896 | ||||
23897 | // Return true if the required (according to Opcode) variable-shift form is | |||
23898 | // natively supported by the Subtarget | |||
23899 | static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget, | |||
23900 | unsigned Opcode) { | |||
23901 | ||||
23902 | if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16) | |||
23903 | return false; | |||
23904 | ||||
23905 | // vXi16 supported only on AVX-512, BWI | |||
23906 | if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI()) | |||
23907 | return false; | |||
23908 | ||||
23909 | if (Subtarget.hasAVX512()) | |||
23910 | return true; | |||
23911 | ||||
23912 | bool LShift = VT.is128BitVector() || VT.is256BitVector(); | |||
23913 | bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64; | |||
23914 | return (Opcode == ISD::SRA) ? AShift : LShift; | |||
23915 | } | |||
23916 | ||||
23917 | static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG, | |||
23918 | const X86Subtarget &Subtarget) { | |||
23919 | MVT VT = Op.getSimpleValueType(); | |||
23920 | SDLoc dl(Op); | |||
23921 | SDValue R = Op.getOperand(0); | |||
23922 | SDValue Amt = Op.getOperand(1); | |||
23923 | unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false); | |||
23924 | ||||
23925 | auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) { | |||
23926 | assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type")(((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v2i64 || VT == MVT::v4i64) && \"Unexpected SRA type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23926, __PRETTY_FUNCTION__)); | |||
23927 | MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2); | |||
23928 | SDValue Ex = DAG.getBitcast(ExVT, R); | |||
23929 | ||||
23930 | // ashr(R, 63) === cmp_slt(R, 0) | |||
23931 | if (ShiftAmt == 63 && Subtarget.hasSSE42()) { | |||
23932 | assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&(((VT != MVT::v4i64 || Subtarget.hasInt256()) && "Unsupported PCMPGT op" ) ? static_cast<void> (0) : __assert_fail ("(VT != MVT::v4i64 || Subtarget.hasInt256()) && \"Unsupported PCMPGT op\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23933, __PRETTY_FUNCTION__)) | |||
23933 | "Unsupported PCMPGT op")(((VT != MVT::v4i64 || Subtarget.hasInt256()) && "Unsupported PCMPGT op" ) ? static_cast<void> (0) : __assert_fail ("(VT != MVT::v4i64 || Subtarget.hasInt256()) && \"Unsupported PCMPGT op\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23933, __PRETTY_FUNCTION__)); | |||
23934 | return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R); | |||
23935 | } | |||
23936 | ||||
23937 | if (ShiftAmt >= 32) { | |||
23938 | // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32. | |||
23939 | SDValue Upper = | |||
23940 | getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG); | |||
23941 | SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, | |||
23942 | ShiftAmt - 32, DAG); | |||
23943 | if (VT == MVT::v2i64) | |||
23944 | Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3}); | |||
23945 | if (VT == MVT::v4i64) | |||
23946 | Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, | |||
23947 | {9, 1, 11, 3, 13, 5, 15, 7}); | |||
23948 | } else { | |||
23949 | // SRA upper i32, SRL whole i64 and select lower i32. | |||
23950 | SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, | |||
23951 | ShiftAmt, DAG); | |||
23952 | SDValue Lower = | |||
23953 | getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG); | |||
23954 | Lower = DAG.getBitcast(ExVT, Lower); | |||
23955 | if (VT == MVT::v2i64) | |||
23956 | Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3}); | |||
23957 | if (VT == MVT::v4i64) | |||
23958 | Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, | |||
23959 | {8, 1, 10, 3, 12, 5, 14, 7}); | |||
23960 | } | |||
23961 | return DAG.getBitcast(VT, Ex); | |||
23962 | }; | |||
23963 | ||||
23964 | // Optimize shl/srl/sra with constant shift amount. | |||
23965 | APInt APIntShiftAmt; | |||
23966 | if (!isConstantSplat(Amt, APIntShiftAmt)) | |||
23967 | return SDValue(); | |||
23968 | uint64_t ShiftAmt = APIntShiftAmt.getZExtValue(); | |||
23969 | ||||
23970 | if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) | |||
23971 | return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG); | |||
23972 | ||||
23973 | // i64 SRA needs to be performed as partial shifts. | |||
23974 | if (((!Subtarget.hasXOP() && VT == MVT::v2i64) || | |||
23975 | (Subtarget.hasInt256() && VT == MVT::v4i64)) && | |||
23976 | Op.getOpcode() == ISD::SRA) | |||
23977 | return ArithmeticShiftRight64(ShiftAmt); | |||
23978 | ||||
23979 | if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) || | |||
23980 | VT == MVT::v64i8) { | |||
23981 | unsigned NumElts = VT.getVectorNumElements(); | |||
23982 | MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2); | |||
23983 | ||||
23984 | // Simple i8 add case | |||
23985 | if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) | |||
23986 | return DAG.getNode(ISD::ADD, dl, VT, R, R); | |||
23987 | ||||
23988 | // ashr(R, 7) === cmp_slt(R, 0) | |||
23989 | if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) { | |||
23990 | SDValue Zeros = DAG.getConstant(0, dl, VT); | |||
23991 | if (VT.is512BitVector()) { | |||
23992 | assert(VT == MVT::v64i8 && "Unexpected element type!")((VT == MVT::v64i8 && "Unexpected element type!") ? static_cast <void> (0) : __assert_fail ("VT == MVT::v64i8 && \"Unexpected element type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 23992, __PRETTY_FUNCTION__)); | |||
23993 | SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT); | |||
23994 | return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP); | |||
23995 | } | |||
23996 | return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R); | |||
23997 | } | |||
23998 | ||||
23999 | // XOP can shift v16i8 directly instead of as shift v8i16 + mask. | |||
24000 | if (VT == MVT::v16i8 && Subtarget.hasXOP()) | |||
24001 | return SDValue(); | |||
24002 | ||||
24003 | if (Op.getOpcode() == ISD::SHL) { | |||
24004 | // Make a large shift. | |||
24005 | SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R, | |||
24006 | ShiftAmt, DAG); | |||
24007 | SHL = DAG.getBitcast(VT, SHL); | |||
24008 | // Zero out the rightmost bits. | |||
24009 | return DAG.getNode(ISD::AND, dl, VT, SHL, | |||
24010 | DAG.getConstant(uint8_t(-1U << ShiftAmt), dl, VT)); | |||
24011 | } | |||
24012 | if (Op.getOpcode() == ISD::SRL) { | |||
24013 | // Make a large shift. | |||
24014 | SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R, | |||
24015 | ShiftAmt, DAG); | |||
24016 | SRL = DAG.getBitcast(VT, SRL); | |||
24017 | // Zero out the leftmost bits. | |||
24018 | return DAG.getNode(ISD::AND, dl, VT, SRL, | |||
24019 | DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT)); | |||
24020 | } | |||
24021 | if (Op.getOpcode() == ISD::SRA) { | |||
24022 | // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask) | |||
24023 | SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt); | |||
24024 | ||||
24025 | SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT); | |||
24026 | Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask); | |||
24027 | Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask); | |||
24028 | return Res; | |||
24029 | } | |||
24030 | llvm_unreachable("Unknown shift opcode.")::llvm::llvm_unreachable_internal("Unknown shift opcode.", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24030); | |||
24031 | } | |||
24032 | ||||
24033 | return SDValue(); | |||
24034 | } | |||
24035 | ||||
24036 | // If V is a splat value, return the source vector and splat index; | |||
24037 | static SDValue IsSplatVector(SDValue V, int &SplatIdx, SelectionDAG &DAG) { | |||
24038 | V = peekThroughEXTRACT_SUBVECTORs(V); | |||
24039 | ||||
24040 | EVT VT = V.getValueType(); | |||
24041 | unsigned Opcode = V.getOpcode(); | |||
24042 | switch (Opcode) { | |||
24043 | default: { | |||
24044 | APInt UndefElts; | |||
24045 | APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); | |||
24046 | if (DAG.isSplatValue(V, DemandedElts, UndefElts)) { | |||
24047 | // Handle case where all demanded elements are UNDEF. | |||
24048 | if (DemandedElts.isSubsetOf(UndefElts)) { | |||
24049 | SplatIdx = 0; | |||
24050 | return DAG.getUNDEF(VT); | |||
24051 | } | |||
24052 | SplatIdx = (UndefElts & DemandedElts).countTrailingOnes(); | |||
24053 | return V; | |||
24054 | } | |||
24055 | break; | |||
24056 | } | |||
24057 | case ISD::VECTOR_SHUFFLE: { | |||
24058 | // Check if this is a shuffle node doing a splat. | |||
24059 | // TODO - remove this and rely purely on SelectionDAG::isSplatValue, | |||
24060 | // getTargetVShiftNode currently struggles without the splat source. | |||
24061 | auto *SVN = cast<ShuffleVectorSDNode>(V); | |||
24062 | if (!SVN->isSplat()) | |||
24063 | break; | |||
24064 | int Idx = SVN->getSplatIndex(); | |||
24065 | int NumElts = V.getValueType().getVectorNumElements(); | |||
24066 | SplatIdx = Idx % NumElts; | |||
24067 | return V.getOperand(Idx / NumElts); | |||
24068 | } | |||
24069 | } | |||
24070 | ||||
24071 | return SDValue(); | |||
24072 | } | |||
24073 | ||||
24074 | static SDValue GetSplatValue(SDValue V, const SDLoc &dl, | |||
24075 | SelectionDAG &DAG) { | |||
24076 | int SplatIdx; | |||
24077 | if (SDValue SrcVector = IsSplatVector(V, SplatIdx, DAG)) | |||
24078 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, | |||
24079 | SrcVector.getValueType().getScalarType(), SrcVector, | |||
24080 | DAG.getIntPtrConstant(SplatIdx, dl)); | |||
24081 | return SDValue(); | |||
24082 | } | |||
24083 | ||||
24084 | static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG, | |||
24085 | const X86Subtarget &Subtarget) { | |||
24086 | MVT VT = Op.getSimpleValueType(); | |||
24087 | SDLoc dl(Op); | |||
24088 | SDValue R = Op.getOperand(0); | |||
24089 | SDValue Amt = Op.getOperand(1); | |||
24090 | unsigned Opcode = Op.getOpcode(); | |||
24091 | unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false); | |||
24092 | unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true); | |||
24093 | ||||
24094 | if (SDValue BaseShAmt = GetSplatValue(Amt, dl, DAG)) { | |||
24095 | if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) { | |||
24096 | MVT EltVT = VT.getVectorElementType(); | |||
24097 | assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!")((EltVT.bitsLE(MVT::i64) && "Unexpected element type!" ) ? static_cast<void> (0) : __assert_fail ("EltVT.bitsLE(MVT::i64) && \"Unexpected element type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24097, __PRETTY_FUNCTION__)); | |||
24098 | if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32)) | |||
24099 | BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt); | |||
24100 | else if (EltVT.bitsLT(MVT::i32)) | |||
24101 | BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt); | |||
24102 | ||||
24103 | return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG); | |||
24104 | } | |||
24105 | ||||
24106 | // vXi8 shifts - shift as v8i16 + mask result. | |||
24107 | if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) || | |||
24108 | (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) || | |||
24109 | VT == MVT::v64i8) && | |||
24110 | !Subtarget.hasXOP()) { | |||
24111 | unsigned NumElts = VT.getVectorNumElements(); | |||
24112 | MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2); | |||
24113 | if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) { | |||
24114 | unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL); | |||
24115 | unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false); | |||
24116 | BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt); | |||
24117 | ||||
24118 | // Create the mask using vXi16 shifts. For shift-rights we need to move | |||
24119 | // the upper byte down before splatting the vXi8 mask. | |||
24120 | SDValue BitMask = DAG.getConstant(-1, dl, ExtVT); | |||
24121 | BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask, | |||
24122 | BaseShAmt, Subtarget, DAG); | |||
24123 | if (Opcode != ISD::SHL) | |||
24124 | BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask, | |||
24125 | 8, DAG); | |||
24126 | BitMask = DAG.getBitcast(VT, BitMask); | |||
24127 | BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask, | |||
24128 | SmallVector<int, 64>(NumElts, 0)); | |||
24129 | ||||
24130 | SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, | |||
24131 | DAG.getBitcast(ExtVT, R), BaseShAmt, | |||
24132 | Subtarget, DAG); | |||
24133 | Res = DAG.getBitcast(VT, Res); | |||
24134 | Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask); | |||
24135 | ||||
24136 | if (Opcode == ISD::SRA) { | |||
24137 | // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask) | |||
24138 | // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW. | |||
24139 | SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT); | |||
24140 | SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask, | |||
24141 | BaseShAmt, Subtarget, DAG); | |||
24142 | SignMask = DAG.getBitcast(VT, SignMask); | |||
24143 | Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask); | |||
24144 | Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask); | |||
24145 | } | |||
24146 | return Res; | |||
24147 | } | |||
24148 | } | |||
24149 | } | |||
24150 | ||||
24151 | // Check cases (mainly 32-bit) where i64 is expanded into high and low parts. | |||
24152 | if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST && | |||
24153 | Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { | |||
24154 | Amt = Amt.getOperand(0); | |||
24155 | unsigned Ratio = 64 / Amt.getScalarValueSizeInBits(); | |||
24156 | std::vector<SDValue> Vals(Ratio); | |||
24157 | for (unsigned i = 0; i != Ratio; ++i) | |||
24158 | Vals[i] = Amt.getOperand(i); | |||
24159 | for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) { | |||
24160 | for (unsigned j = 0; j != Ratio; ++j) | |||
24161 | if (Vals[j] != Amt.getOperand(i + j)) | |||
24162 | return SDValue(); | |||
24163 | } | |||
24164 | ||||
24165 | if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode())) | |||
24166 | return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1)); | |||
24167 | } | |||
24168 | return SDValue(); | |||
24169 | } | |||
24170 | ||||
24171 | // Convert a shift/rotate left amount to a multiplication scale factor. | |||
24172 | static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl, | |||
24173 | const X86Subtarget &Subtarget, | |||
24174 | SelectionDAG &DAG) { | |||
24175 | MVT VT = Amt.getSimpleValueType(); | |||
24176 | if (!(VT == MVT::v8i16 || VT == MVT::v4i32 || | |||
24177 | (Subtarget.hasInt256() && VT == MVT::v16i16) || | |||
24178 | (!Subtarget.hasAVX512() && VT == MVT::v16i8))) | |||
24179 | return SDValue(); | |||
24180 | ||||
24181 | if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) { | |||
24182 | SmallVector<SDValue, 8> Elts; | |||
24183 | MVT SVT = VT.getVectorElementType(); | |||
24184 | unsigned SVTBits = SVT.getSizeInBits(); | |||
24185 | APInt One(SVTBits, 1); | |||
24186 | unsigned NumElems = VT.getVectorNumElements(); | |||
24187 | ||||
24188 | for (unsigned i = 0; i != NumElems; ++i) { | |||
24189 | SDValue Op = Amt->getOperand(i); | |||
24190 | if (Op->isUndef()) { | |||
24191 | Elts.push_back(Op); | |||
24192 | continue; | |||
24193 | } | |||
24194 | ||||
24195 | ConstantSDNode *ND = cast<ConstantSDNode>(Op); | |||
24196 | APInt C(SVTBits, ND->getAPIntValue().getZExtValue()); | |||
24197 | uint64_t ShAmt = C.getZExtValue(); | |||
24198 | if (ShAmt >= SVTBits) { | |||
24199 | Elts.push_back(DAG.getUNDEF(SVT)); | |||
24200 | continue; | |||
24201 | } | |||
24202 | Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT)); | |||
24203 | } | |||
24204 | return DAG.getBuildVector(VT, dl, Elts); | |||
24205 | } | |||
24206 | ||||
24207 | // If the target doesn't support variable shifts, use either FP conversion | |||
24208 | // or integer multiplication to avoid shifting each element individually. | |||
24209 | if (VT == MVT::v4i32) { | |||
24210 | Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT)); | |||
24211 | Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, | |||
24212 | DAG.getConstant(0x3f800000U, dl, VT)); | |||
24213 | Amt = DAG.getBitcast(MVT::v4f32, Amt); | |||
24214 | return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt); | |||
24215 | } | |||
24216 | ||||
24217 | // AVX2 can more effectively perform this as a zext/trunc to/from v8i32. | |||
24218 | if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) { | |||
24219 | SDValue Z = DAG.getConstant(0, dl, VT); | |||
24220 | SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z)); | |||
24221 | SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z)); | |||
24222 | Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG); | |||
24223 | Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG); | |||
24224 | if (Subtarget.hasSSE41()) | |||
24225 | return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi); | |||
24226 | ||||
24227 | return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo), | |||
24228 | DAG.getBitcast(VT, Hi), | |||
24229 | {0, 2, 4, 6, 8, 10, 12, 14}); | |||
24230 | } | |||
24231 | ||||
24232 | return SDValue(); | |||
24233 | } | |||
24234 | ||||
24235 | static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget, | |||
24236 | SelectionDAG &DAG) { | |||
24237 | MVT VT = Op.getSimpleValueType(); | |||
24238 | SDLoc dl(Op); | |||
24239 | SDValue R = Op.getOperand(0); | |||
24240 | SDValue Amt = Op.getOperand(1); | |||
24241 | unsigned EltSizeInBits = VT.getScalarSizeInBits(); | |||
24242 | bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()); | |||
24243 | ||||
24244 | unsigned Opc = Op.getOpcode(); | |||
24245 | unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true); | |||
24246 | unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false); | |||
24247 | ||||
24248 | assert(VT.isVector() && "Custom lowering only for vector shifts!")((VT.isVector() && "Custom lowering only for vector shifts!" ) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && \"Custom lowering only for vector shifts!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24248, __PRETTY_FUNCTION__)); | |||
24249 | assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!")((Subtarget.hasSSE2() && "Only custom lower when we have SSE2!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Only custom lower when we have SSE2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24249, __PRETTY_FUNCTION__)); | |||
24250 | ||||
24251 | if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget)) | |||
24252 | return V; | |||
24253 | ||||
24254 | if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget)) | |||
24255 | return V; | |||
24256 | ||||
24257 | if (SupportedVectorVarShift(VT, Subtarget, Opc)) | |||
24258 | return Op; | |||
24259 | ||||
24260 | // XOP has 128-bit variable logical/arithmetic shifts. | |||
24261 | // +ve/-ve Amt = shift left/right. | |||
24262 | if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 || | |||
24263 | VT == MVT::v8i16 || VT == MVT::v16i8)) { | |||
24264 | if (Opc == ISD::SRL || Opc == ISD::SRA) { | |||
24265 | SDValue Zero = DAG.getConstant(0, dl, VT); | |||
24266 | Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt); | |||
24267 | } | |||
24268 | if (Opc == ISD::SHL || Opc == ISD::SRL) | |||
24269 | return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt); | |||
24270 | if (Opc == ISD::SRA) | |||
24271 | return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt); | |||
24272 | } | |||
24273 | ||||
24274 | // 2i64 vector logical shifts can efficiently avoid scalarization - do the | |||
24275 | // shifts per-lane and then shuffle the partial results back together. | |||
24276 | if (VT == MVT::v2i64 && Opc != ISD::SRA) { | |||
24277 | // Splat the shift amounts so the scalar shifts above will catch it. | |||
24278 | SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0}); | |||
24279 | SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1}); | |||
24280 | SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0); | |||
24281 | SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1); | |||
24282 | return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3}); | |||
24283 | } | |||
24284 | ||||
24285 | // i64 vector arithmetic shift can be emulated with the transform: | |||
24286 | // M = lshr(SIGN_MASK, Amt) | |||
24287 | // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M) | |||
24288 | if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) && | |||
24289 | Opc == ISD::SRA) { | |||
24290 | SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT); | |||
24291 | SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt); | |||
24292 | R = DAG.getNode(ISD::SRL, dl, VT, R, Amt); | |||
24293 | R = DAG.getNode(ISD::XOR, dl, VT, R, M); | |||
24294 | R = DAG.getNode(ISD::SUB, dl, VT, R, M); | |||
24295 | return R; | |||
24296 | } | |||
24297 | ||||
24298 | // If possible, lower this shift as a sequence of two shifts by | |||
24299 | // constant plus a BLENDing shuffle instead of scalarizing it. | |||
24300 | // Example: | |||
24301 | // (v4i32 (srl A, (build_vector < X, Y, Y, Y>))) | |||
24302 | // | |||
24303 | // Could be rewritten as: | |||
24304 | // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>))) | |||
24305 | // | |||
24306 | // The advantage is that the two shifts from the example would be | |||
24307 | // lowered as X86ISD::VSRLI nodes in parallel before blending. | |||
24308 | if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 || | |||
24309 | (VT == MVT::v16i16 && Subtarget.hasInt256()))) { | |||
24310 | SDValue Amt1, Amt2; | |||
24311 | unsigned NumElts = VT.getVectorNumElements(); | |||
24312 | SmallVector<int, 8> ShuffleMask; | |||
24313 | for (unsigned i = 0; i != NumElts; ++i) { | |||
24314 | SDValue A = Amt->getOperand(i); | |||
24315 | if (A.isUndef()) { | |||
24316 | ShuffleMask.push_back(SM_SentinelUndef); | |||
24317 | continue; | |||
24318 | } | |||
24319 | if (!Amt1 || Amt1 == A) { | |||
24320 | ShuffleMask.push_back(i); | |||
24321 | Amt1 = A; | |||
24322 | continue; | |||
24323 | } | |||
24324 | if (!Amt2 || Amt2 == A) { | |||
24325 | ShuffleMask.push_back(i + NumElts); | |||
24326 | Amt2 = A; | |||
24327 | continue; | |||
24328 | } | |||
24329 | break; | |||
24330 | } | |||
24331 | ||||
24332 | // Only perform this blend if we can perform it without loading a mask. | |||
24333 | if (ShuffleMask.size() == NumElts && Amt1 && Amt2 && | |||
24334 | (VT != MVT::v16i16 || | |||
24335 | is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) && | |||
24336 | (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL || | |||
24337 | canWidenShuffleElements(ShuffleMask))) { | |||
24338 | auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1); | |||
24339 | auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2); | |||
24340 | if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) && | |||
24341 | Cst2->getAPIntValue().ult(EltSizeInBits)) { | |||
24342 | SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, | |||
24343 | Cst1->getZExtValue(), DAG); | |||
24344 | SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, | |||
24345 | Cst2->getZExtValue(), DAG); | |||
24346 | return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask); | |||
24347 | } | |||
24348 | } | |||
24349 | } | |||
24350 | ||||
24351 | // If possible, lower this packed shift into a vector multiply instead of | |||
24352 | // expanding it into a sequence of scalar shifts. | |||
24353 | if (Opc == ISD::SHL) | |||
24354 | if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG)) | |||
24355 | return DAG.getNode(ISD::MUL, dl, VT, R, Scale); | |||
24356 | ||||
24357 | // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we | |||
24358 | // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt). | |||
24359 | if (Opc == ISD::SRL && ConstantAmt && | |||
24360 | (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) { | |||
24361 | SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT); | |||
24362 | SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt); | |||
24363 | if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) { | |||
24364 | SDValue Zero = DAG.getConstant(0, dl, VT); | |||
24365 | SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ); | |||
24366 | SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale); | |||
24367 | return DAG.getSelect(dl, VT, ZAmt, R, Res); | |||
24368 | } | |||
24369 | } | |||
24370 | ||||
24371 | // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we | |||
24372 | // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt). | |||
24373 | // TODO: Special case handling for shift by 0/1, really we can afford either | |||
24374 | // of these cases in pre-SSE41/XOP/AVX512 but not both. | |||
24375 | if (Opc == ISD::SRA && ConstantAmt && | |||
24376 | (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) && | |||
24377 | ((Subtarget.hasSSE41() && !Subtarget.hasXOP() && | |||
24378 | !Subtarget.hasAVX512()) || | |||
24379 | DAG.isKnownNeverZero(Amt))) { | |||
24380 | SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT); | |||
24381 | SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt); | |||
24382 | if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) { | |||
24383 | SDValue Amt0 = | |||
24384 | DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ); | |||
24385 | SDValue Amt1 = | |||
24386 | DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ); | |||
24387 | SDValue Sra1 = | |||
24388 | getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG); | |||
24389 | SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale); | |||
24390 | Res = DAG.getSelect(dl, VT, Amt0, R, Res); | |||
24391 | return DAG.getSelect(dl, VT, Amt1, Sra1, Res); | |||
24392 | } | |||
24393 | } | |||
24394 | ||||
24395 | // v4i32 Non Uniform Shifts. | |||
24396 | // If the shift amount is constant we can shift each lane using the SSE2 | |||
24397 | // immediate shifts, else we need to zero-extend each lane to the lower i64 | |||
24398 | // and shift using the SSE2 variable shifts. | |||
24399 | // The separate results can then be blended together. | |||
24400 | if (VT == MVT::v4i32) { | |||
24401 | SDValue Amt0, Amt1, Amt2, Amt3; | |||
24402 | if (ConstantAmt) { | |||
24403 | Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0}); | |||
24404 | Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1}); | |||
24405 | Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2}); | |||
24406 | Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3}); | |||
24407 | } else { | |||
24408 | // The SSE2 shifts use the lower i64 as the same shift amount for | |||
24409 | // all lanes and the upper i64 is ignored. On AVX we're better off | |||
24410 | // just zero-extending, but for SSE just duplicating the top 16-bits is | |||
24411 | // cheaper and has the same effect for out of range values. | |||
24412 | if (Subtarget.hasAVX()) { | |||
24413 | SDValue Z = DAG.getConstant(0, dl, VT); | |||
24414 | Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1}); | |||
24415 | Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1}); | |||
24416 | Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1}); | |||
24417 | Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1}); | |||
24418 | } else { | |||
24419 | SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt); | |||
24420 | SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01, | |||
24421 | {4, 5, 6, 7, -1, -1, -1, -1}); | |||
24422 | Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01, | |||
24423 | {0, 1, 1, 1, -1, -1, -1, -1}); | |||
24424 | Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01, | |||
24425 | {2, 3, 3, 3, -1, -1, -1, -1}); | |||
24426 | Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23, | |||
24427 | {0, 1, 1, 1, -1, -1, -1, -1}); | |||
24428 | Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23, | |||
24429 | {2, 3, 3, 3, -1, -1, -1, -1}); | |||
24430 | } | |||
24431 | } | |||
24432 | ||||
24433 | unsigned ShOpc = ConstantAmt ? Opc : X86OpcV; | |||
24434 | SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0)); | |||
24435 | SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1)); | |||
24436 | SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2)); | |||
24437 | SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3)); | |||
24438 | ||||
24439 | // Merge the shifted lane results optimally with/without PBLENDW. | |||
24440 | // TODO - ideally shuffle combining would handle this. | |||
24441 | if (Subtarget.hasSSE41()) { | |||
24442 | SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1}); | |||
24443 | SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7}); | |||
24444 | return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7}); | |||
24445 | } | |||
24446 | SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5}); | |||
24447 | SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7}); | |||
24448 | return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7}); | |||
24449 | } | |||
24450 | ||||
24451 | // It's worth extending once and using the vXi16/vXi32 shifts for smaller | |||
24452 | // types, but without AVX512 the extra overheads to get from vXi8 to vXi32 | |||
24453 | // make the existing SSE solution better. | |||
24454 | // NOTE: We honor prefered vector width before promoting to 512-bits. | |||
24455 | if ((Subtarget.hasInt256() && VT == MVT::v8i16) || | |||
24456 | (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) || | |||
24457 | (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) || | |||
24458 | (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) || | |||
24459 | (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) { | |||
24460 | assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&(((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8 ) && "Unexpected vector type") ? static_cast<void> (0) : __assert_fail ("(!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) && \"Unexpected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24461, __PRETTY_FUNCTION__)) | |||
24461 | "Unexpected vector type")(((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8 ) && "Unexpected vector type") ? static_cast<void> (0) : __assert_fail ("(!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) && \"Unexpected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24461, __PRETTY_FUNCTION__)); | |||
24462 | MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32; | |||
24463 | MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements()); | |||
24464 | unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; | |||
24465 | R = DAG.getNode(ExtOpc, dl, ExtVT, R); | |||
24466 | Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt); | |||
24467 | return DAG.getNode(ISD::TRUNCATE, dl, VT, | |||
24468 | DAG.getNode(Opc, dl, ExtVT, R, Amt)); | |||
24469 | } | |||
24470 | ||||
24471 | // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we | |||
24472 | // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI. | |||
24473 | if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) && | |||
24474 | (VT == MVT::v16i8 || VT == MVT::v64i8 || | |||
24475 | (VT == MVT::v32i8 && Subtarget.hasInt256())) && | |||
24476 | !Subtarget.hasXOP()) { | |||
24477 | int NumElts = VT.getVectorNumElements(); | |||
24478 | SDValue Cst8 = DAG.getConstant(8, dl, MVT::i8); | |||
24479 | ||||
24480 | // Extend constant shift amount to vXi16 (it doesn't matter if the type | |||
24481 | // isn't legal). | |||
24482 | MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts); | |||
24483 | Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT); | |||
24484 | Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt); | |||
24485 | Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt); | |||
24486 | assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&((ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) && "Constant build vector expected") ? static_cast<void> ( 0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) && \"Constant build vector expected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24487, __PRETTY_FUNCTION__)) | |||
24487 | "Constant build vector expected")((ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) && "Constant build vector expected") ? static_cast<void> ( 0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) && \"Constant build vector expected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24487, __PRETTY_FUNCTION__)); | |||
24488 | ||||
24489 | if (VT == MVT::v16i8 && Subtarget.hasInt256()) { | |||
24490 | R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT) | |||
24491 | : DAG.getZExtOrTrunc(R, dl, ExVT); | |||
24492 | R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt); | |||
24493 | R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8); | |||
24494 | return DAG.getZExtOrTrunc(R, dl, VT); | |||
24495 | } | |||
24496 | ||||
24497 | SmallVector<SDValue, 16> LoAmt, HiAmt; | |||
24498 | for (int i = 0; i != NumElts; i += 16) { | |||
24499 | for (int j = 0; j != 8; ++j) { | |||
24500 | LoAmt.push_back(Amt.getOperand(i + j)); | |||
24501 | HiAmt.push_back(Amt.getOperand(i + j + 8)); | |||
24502 | } | |||
24503 | } | |||
24504 | ||||
24505 | MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2); | |||
24506 | SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt); | |||
24507 | SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt); | |||
24508 | ||||
24509 | SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R)); | |||
24510 | SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R)); | |||
24511 | LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8); | |||
24512 | HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8); | |||
24513 | LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA); | |||
24514 | HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA); | |||
24515 | LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8); | |||
24516 | HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8); | |||
24517 | return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR); | |||
24518 | } | |||
24519 | ||||
24520 | if (VT == MVT::v16i8 || | |||
24521 | (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) || | |||
24522 | (VT == MVT::v64i8 && Subtarget.hasBWI())) { | |||
24523 | MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2); | |||
24524 | ||||
24525 | auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) { | |||
24526 | if (VT.is512BitVector()) { | |||
24527 | // On AVX512BW targets we make use of the fact that VSELECT lowers | |||
24528 | // to a masked blend which selects bytes based just on the sign bit | |||
24529 | // extracted to a mask. | |||
24530 | MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements()); | |||
24531 | V0 = DAG.getBitcast(VT, V0); | |||
24532 | V1 = DAG.getBitcast(VT, V1); | |||
24533 | Sel = DAG.getBitcast(VT, Sel); | |||
24534 | Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel, | |||
24535 | ISD::SETGT); | |||
24536 | return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1)); | |||
24537 | } else if (Subtarget.hasSSE41()) { | |||
24538 | // On SSE41 targets we make use of the fact that VSELECT lowers | |||
24539 | // to PBLENDVB which selects bytes based just on the sign bit. | |||
24540 | V0 = DAG.getBitcast(VT, V0); | |||
24541 | V1 = DAG.getBitcast(VT, V1); | |||
24542 | Sel = DAG.getBitcast(VT, Sel); | |||
24543 | return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1)); | |||
24544 | } | |||
24545 | // On pre-SSE41 targets we test for the sign bit by comparing to | |||
24546 | // zero - a negative value will set all bits of the lanes to true | |||
24547 | // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering. | |||
24548 | SDValue Z = DAG.getConstant(0, dl, SelVT); | |||
24549 | SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel); | |||
24550 | return DAG.getSelect(dl, SelVT, C, V0, V1); | |||
24551 | }; | |||
24552 | ||||
24553 | // Turn 'a' into a mask suitable for VSELECT: a = a << 5; | |||
24554 | // We can safely do this using i16 shifts as we're only interested in | |||
24555 | // the 3 lower bits of each byte. | |||
24556 | Amt = DAG.getBitcast(ExtVT, Amt); | |||
24557 | Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG); | |||
24558 | Amt = DAG.getBitcast(VT, Amt); | |||
24559 | ||||
24560 | if (Opc == ISD::SHL || Opc == ISD::SRL) { | |||
24561 | // r = VSELECT(r, shift(r, 4), a); | |||
24562 | SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT)); | |||
24563 | R = SignBitSelect(VT, Amt, M, R); | |||
24564 | ||||
24565 | // a += a | |||
24566 | Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); | |||
24567 | ||||
24568 | // r = VSELECT(r, shift(r, 2), a); | |||
24569 | M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT)); | |||
24570 | R = SignBitSelect(VT, Amt, M, R); | |||
24571 | ||||
24572 | // a += a | |||
24573 | Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); | |||
24574 | ||||
24575 | // return VSELECT(r, shift(r, 1), a); | |||
24576 | M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT)); | |||
24577 | R = SignBitSelect(VT, Amt, M, R); | |||
24578 | return R; | |||
24579 | } | |||
24580 | ||||
24581 | if (Opc == ISD::SRA) { | |||
24582 | // For SRA we need to unpack each byte to the higher byte of a i16 vector | |||
24583 | // so we can correctly sign extend. We don't care what happens to the | |||
24584 | // lower byte. | |||
24585 | SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt); | |||
24586 | SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt); | |||
24587 | SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R); | |||
24588 | SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R); | |||
24589 | ALo = DAG.getBitcast(ExtVT, ALo); | |||
24590 | AHi = DAG.getBitcast(ExtVT, AHi); | |||
24591 | RLo = DAG.getBitcast(ExtVT, RLo); | |||
24592 | RHi = DAG.getBitcast(ExtVT, RHi); | |||
24593 | ||||
24594 | // r = VSELECT(r, shift(r, 4), a); | |||
24595 | SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG); | |||
24596 | SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG); | |||
24597 | RLo = SignBitSelect(ExtVT, ALo, MLo, RLo); | |||
24598 | RHi = SignBitSelect(ExtVT, AHi, MHi, RHi); | |||
24599 | ||||
24600 | // a += a | |||
24601 | ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo); | |||
24602 | AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi); | |||
24603 | ||||
24604 | // r = VSELECT(r, shift(r, 2), a); | |||
24605 | MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG); | |||
24606 | MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG); | |||
24607 | RLo = SignBitSelect(ExtVT, ALo, MLo, RLo); | |||
24608 | RHi = SignBitSelect(ExtVT, AHi, MHi, RHi); | |||
24609 | ||||
24610 | // a += a | |||
24611 | ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo); | |||
24612 | AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi); | |||
24613 | ||||
24614 | // r = VSELECT(r, shift(r, 1), a); | |||
24615 | MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG); | |||
24616 | MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG); | |||
24617 | RLo = SignBitSelect(ExtVT, ALo, MLo, RLo); | |||
24618 | RHi = SignBitSelect(ExtVT, AHi, MHi, RHi); | |||
24619 | ||||
24620 | // Logical shift the result back to the lower byte, leaving a zero upper | |||
24621 | // byte meaning that we can safely pack with PACKUSWB. | |||
24622 | RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG); | |||
24623 | RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG); | |||
24624 | return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi); | |||
24625 | } | |||
24626 | } | |||
24627 | ||||
24628 | if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) { | |||
24629 | MVT ExtVT = MVT::v8i32; | |||
24630 | SDValue Z = DAG.getConstant(0, dl, VT); | |||
24631 | SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z); | |||
24632 | SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z); | |||
24633 | SDValue RLo = getUnpackl(DAG, dl, VT, Z, R); | |||
24634 | SDValue RHi = getUnpackh(DAG, dl, VT, Z, R); | |||
24635 | ALo = DAG.getBitcast(ExtVT, ALo); | |||
24636 | AHi = DAG.getBitcast(ExtVT, AHi); | |||
24637 | RLo = DAG.getBitcast(ExtVT, RLo); | |||
24638 | RHi = DAG.getBitcast(ExtVT, RHi); | |||
24639 | SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo); | |||
24640 | SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi); | |||
24641 | Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG); | |||
24642 | Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG); | |||
24643 | return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi); | |||
24644 | } | |||
24645 | ||||
24646 | if (VT == MVT::v8i16) { | |||
24647 | // If we have a constant shift amount, the non-SSE41 path is best as | |||
24648 | // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW. | |||
24649 | bool UseSSE41 = Subtarget.hasSSE41() && | |||
24650 | !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()); | |||
24651 | ||||
24652 | auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) { | |||
24653 | // On SSE41 targets we make use of the fact that VSELECT lowers | |||
24654 | // to PBLENDVB which selects bytes based just on the sign bit. | |||
24655 | if (UseSSE41) { | |||
24656 | MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2); | |||
24657 | V0 = DAG.getBitcast(ExtVT, V0); | |||
24658 | V1 = DAG.getBitcast(ExtVT, V1); | |||
24659 | Sel = DAG.getBitcast(ExtVT, Sel); | |||
24660 | return DAG.getBitcast(VT, DAG.getSelect(dl, ExtVT, Sel, V0, V1)); | |||
24661 | } | |||
24662 | // On pre-SSE41 targets we splat the sign bit - a negative value will | |||
24663 | // set all bits of the lanes to true and VSELECT uses that in | |||
24664 | // its OR(AND(V0,C),AND(V1,~C)) lowering. | |||
24665 | SDValue C = | |||
24666 | getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG); | |||
24667 | return DAG.getSelect(dl, VT, C, V0, V1); | |||
24668 | }; | |||
24669 | ||||
24670 | // Turn 'a' into a mask suitable for VSELECT: a = a << 12; | |||
24671 | if (UseSSE41) { | |||
24672 | // On SSE41 targets we need to replicate the shift mask in both | |||
24673 | // bytes for PBLENDVB. | |||
24674 | Amt = DAG.getNode( | |||
24675 | ISD::OR, dl, VT, | |||
24676 | getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG), | |||
24677 | getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG)); | |||
24678 | } else { | |||
24679 | Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG); | |||
24680 | } | |||
24681 | ||||
24682 | // r = VSELECT(r, shift(r, 8), a); | |||
24683 | SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG); | |||
24684 | R = SignBitSelect(Amt, M, R); | |||
24685 | ||||
24686 | // a += a | |||
24687 | Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); | |||
24688 | ||||
24689 | // r = VSELECT(r, shift(r, 4), a); | |||
24690 | M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG); | |||
24691 | R = SignBitSelect(Amt, M, R); | |||
24692 | ||||
24693 | // a += a | |||
24694 | Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); | |||
24695 | ||||
24696 | // r = VSELECT(r, shift(r, 2), a); | |||
24697 | M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG); | |||
24698 | R = SignBitSelect(Amt, M, R); | |||
24699 | ||||
24700 | // a += a | |||
24701 | Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt); | |||
24702 | ||||
24703 | // return VSELECT(r, shift(r, 1), a); | |||
24704 | M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG); | |||
24705 | R = SignBitSelect(Amt, M, R); | |||
24706 | return R; | |||
24707 | } | |||
24708 | ||||
24709 | // Decompose 256-bit shifts into 128-bit shifts. | |||
24710 | if (VT.is256BitVector()) | |||
24711 | return split256IntArith(Op, DAG); | |||
24712 | ||||
24713 | return SDValue(); | |||
24714 | } | |||
24715 | ||||
24716 | static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget, | |||
24717 | SelectionDAG &DAG) { | |||
24718 | MVT VT = Op.getSimpleValueType(); | |||
24719 | assert(VT.isVector() && "Custom lowering only for vector rotates!")((VT.isVector() && "Custom lowering only for vector rotates!" ) ? static_cast<void> (0) : __assert_fail ("VT.isVector() && \"Custom lowering only for vector rotates!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24719, __PRETTY_FUNCTION__)); | |||
24720 | ||||
24721 | SDLoc DL(Op); | |||
24722 | SDValue R = Op.getOperand(0); | |||
24723 | SDValue Amt = Op.getOperand(1); | |||
24724 | unsigned Opcode = Op.getOpcode(); | |||
24725 | unsigned EltSizeInBits = VT.getScalarSizeInBits(); | |||
24726 | int NumElts = VT.getVectorNumElements(); | |||
24727 | ||||
24728 | // Check for constant splat rotation amount. | |||
24729 | APInt UndefElts; | |||
24730 | SmallVector<APInt, 32> EltBits; | |||
24731 | int CstSplatIndex = -1; | |||
24732 | if (getTargetConstantBitsFromNode(Amt, EltSizeInBits, UndefElts, EltBits)) | |||
24733 | for (int i = 0; i != NumElts; ++i) | |||
24734 | if (!UndefElts[i]) { | |||
24735 | if (CstSplatIndex < 0 || EltBits[i] == EltBits[CstSplatIndex]) { | |||
24736 | CstSplatIndex = i; | |||
24737 | continue; | |||
24738 | } | |||
24739 | CstSplatIndex = -1; | |||
24740 | break; | |||
24741 | } | |||
24742 | ||||
24743 | // AVX512 implicitly uses modulo rotation amounts. | |||
24744 | if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) { | |||
24745 | // Attempt to rotate by immediate. | |||
24746 | if (0 <= CstSplatIndex) { | |||
24747 | unsigned Op = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI); | |||
24748 | uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits); | |||
24749 | return DAG.getNode(Op, DL, VT, R, | |||
24750 | DAG.getConstant(RotateAmt, DL, MVT::i8)); | |||
24751 | } | |||
24752 | ||||
24753 | // Else, fall-back on VPROLV/VPRORV. | |||
24754 | return Op; | |||
24755 | } | |||
24756 | ||||
24757 | assert((Opcode == ISD::ROTL) && "Only ROTL supported")(((Opcode == ISD::ROTL) && "Only ROTL supported") ? static_cast <void> (0) : __assert_fail ("(Opcode == ISD::ROTL) && \"Only ROTL supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24757, __PRETTY_FUNCTION__)); | |||
24758 | ||||
24759 | // XOP has 128-bit vector variable + immediate rotates. | |||
24760 | // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL. | |||
24761 | // XOP implicitly uses modulo rotation amounts. | |||
24762 | if (Subtarget.hasXOP()) { | |||
24763 | if (VT.is256BitVector()) | |||
24764 | return split256IntArith(Op, DAG); | |||
24765 | assert(VT.is128BitVector() && "Only rotate 128-bit vectors!")((VT.is128BitVector() && "Only rotate 128-bit vectors!" ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only rotate 128-bit vectors!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24765, __PRETTY_FUNCTION__)); | |||
24766 | ||||
24767 | // Attempt to rotate by immediate. | |||
24768 | if (0 <= CstSplatIndex) { | |||
24769 | uint64_t RotateAmt = EltBits[CstSplatIndex].urem(EltSizeInBits); | |||
24770 | return DAG.getNode(X86ISD::VROTLI, DL, VT, R, | |||
24771 | DAG.getConstant(RotateAmt, DL, MVT::i8)); | |||
24772 | } | |||
24773 | ||||
24774 | // Use general rotate by variable (per-element). | |||
24775 | return Op; | |||
24776 | } | |||
24777 | ||||
24778 | // Split 256-bit integers on pre-AVX2 targets. | |||
24779 | if (VT.is256BitVector() && !Subtarget.hasAVX2()) | |||
24780 | return split256IntArith(Op, DAG); | |||
24781 | ||||
24782 | assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||(((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && "Only vXi32/vXi16/vXi8 vector rotates supported" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && \"Only vXi32/vXi16/vXi8 vector rotates supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24785, __PRETTY_FUNCTION__)) | |||
24783 | ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&(((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && "Only vXi32/vXi16/vXi8 vector rotates supported" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && \"Only vXi32/vXi16/vXi8 vector rotates supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24785, __PRETTY_FUNCTION__)) | |||
24784 | Subtarget.hasAVX2())) &&(((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && "Only vXi32/vXi16/vXi8 vector rotates supported" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && \"Only vXi32/vXi16/vXi8 vector rotates supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24785, __PRETTY_FUNCTION__)) | |||
24785 | "Only vXi32/vXi16/vXi8 vector rotates supported")(((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && "Only vXi32/vXi16/vXi8 vector rotates supported" ) ? static_cast<void> (0) : __assert_fail ("(VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 || ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) && Subtarget.hasAVX2())) && \"Only vXi32/vXi16/vXi8 vector rotates supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24785, __PRETTY_FUNCTION__)); | |||
24786 | ||||
24787 | // Rotate by an uniform constant - expand back to shifts. | |||
24788 | if (0 <= CstSplatIndex) | |||
24789 | return SDValue(); | |||
24790 | ||||
24791 | bool IsSplatAmt = DAG.isSplatValue(Amt); | |||
24792 | ||||
24793 | // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by | |||
24794 | // the amount bit. | |||
24795 | if (EltSizeInBits == 8 && !IsSplatAmt) { | |||
24796 | if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) | |||
24797 | return SDValue(); | |||
24798 | ||||
24799 | // We don't need ModuloAmt here as we just peek at individual bits. | |||
24800 | MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2); | |||
24801 | ||||
24802 | auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) { | |||
24803 | if (Subtarget.hasSSE41()) { | |||
24804 | // On SSE41 targets we make use of the fact that VSELECT lowers | |||
24805 | // to PBLENDVB which selects bytes based just on the sign bit. | |||
24806 | V0 = DAG.getBitcast(VT, V0); | |||
24807 | V1 = DAG.getBitcast(VT, V1); | |||
24808 | Sel = DAG.getBitcast(VT, Sel); | |||
24809 | return DAG.getBitcast(SelVT, DAG.getSelect(DL, VT, Sel, V0, V1)); | |||
24810 | } | |||
24811 | // On pre-SSE41 targets we test for the sign bit by comparing to | |||
24812 | // zero - a negative value will set all bits of the lanes to true | |||
24813 | // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering. | |||
24814 | SDValue Z = DAG.getConstant(0, DL, SelVT); | |||
24815 | SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel); | |||
24816 | return DAG.getSelect(DL, SelVT, C, V0, V1); | |||
24817 | }; | |||
24818 | ||||
24819 | // Turn 'a' into a mask suitable for VSELECT: a = a << 5; | |||
24820 | // We can safely do this using i16 shifts as we're only interested in | |||
24821 | // the 3 lower bits of each byte. | |||
24822 | Amt = DAG.getBitcast(ExtVT, Amt); | |||
24823 | Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT)); | |||
24824 | Amt = DAG.getBitcast(VT, Amt); | |||
24825 | ||||
24826 | // r = VSELECT(r, rot(r, 4), a); | |||
24827 | SDValue M; | |||
24828 | M = DAG.getNode( | |||
24829 | ISD::OR, DL, VT, | |||
24830 | DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)), | |||
24831 | DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT))); | |||
24832 | R = SignBitSelect(VT, Amt, M, R); | |||
24833 | ||||
24834 | // a += a | |||
24835 | Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt); | |||
24836 | ||||
24837 | // r = VSELECT(r, rot(r, 2), a); | |||
24838 | M = DAG.getNode( | |||
24839 | ISD::OR, DL, VT, | |||
24840 | DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)), | |||
24841 | DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT))); | |||
24842 | R = SignBitSelect(VT, Amt, M, R); | |||
24843 | ||||
24844 | // a += a | |||
24845 | Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt); | |||
24846 | ||||
24847 | // return VSELECT(r, rot(r, 1), a); | |||
24848 | M = DAG.getNode( | |||
24849 | ISD::OR, DL, VT, | |||
24850 | DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)), | |||
24851 | DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT))); | |||
24852 | return SignBitSelect(VT, Amt, M, R); | |||
24853 | } | |||
24854 | ||||
24855 | // ISD::ROT* uses modulo rotate amounts. | |||
24856 | Amt = DAG.getNode(ISD::AND, DL, VT, Amt, | |||
24857 | DAG.getConstant(EltSizeInBits - 1, DL, VT)); | |||
24858 | ||||
24859 | bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()); | |||
24860 | bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) && | |||
24861 | SupportedVectorVarShift(VT, Subtarget, ISD::SRL); | |||
24862 | ||||
24863 | // Fallback for splats + all supported variable shifts. | |||
24864 | // Fallback for non-constants AVX2 vXi16 as well. | |||
24865 | if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) { | |||
24866 | SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT); | |||
24867 | AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt); | |||
24868 | SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt); | |||
24869 | SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR); | |||
24870 | return DAG.getNode(ISD::OR, DL, VT, SHL, SRL); | |||
24871 | } | |||
24872 | ||||
24873 | // As with shifts, convert the rotation amount to a multiplication factor. | |||
24874 | SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG); | |||
24875 | assert(Scale && "Failed to convert ROTL amount to scale")((Scale && "Failed to convert ROTL amount to scale") ? static_cast<void> (0) : __assert_fail ("Scale && \"Failed to convert ROTL amount to scale\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24875, __PRETTY_FUNCTION__)); | |||
24876 | ||||
24877 | // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results. | |||
24878 | if (EltSizeInBits == 16) { | |||
24879 | SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale); | |||
24880 | SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale); | |||
24881 | return DAG.getNode(ISD::OR, DL, VT, Lo, Hi); | |||
24882 | } | |||
24883 | ||||
24884 | // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32 | |||
24885 | // to v2i64 results at a time. The upper 32-bits contain the wrapped bits | |||
24886 | // that can then be OR'd with the lower 32-bits. | |||
24887 | assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected")((VT == MVT::v4i32 && "Only v4i32 vector rotate expected" ) ? static_cast<void> (0) : __assert_fail ("VT == MVT::v4i32 && \"Only v4i32 vector rotate expected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24887, __PRETTY_FUNCTION__)); | |||
24888 | static const int OddMask[] = {1, -1, 3, -1}; | |||
24889 | SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask); | |||
24890 | SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask); | |||
24891 | ||||
24892 | SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64, | |||
24893 | DAG.getBitcast(MVT::v2i64, R), | |||
24894 | DAG.getBitcast(MVT::v2i64, Scale)); | |||
24895 | SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64, | |||
24896 | DAG.getBitcast(MVT::v2i64, R13), | |||
24897 | DAG.getBitcast(MVT::v2i64, Scale13)); | |||
24898 | Res02 = DAG.getBitcast(VT, Res02); | |||
24899 | Res13 = DAG.getBitcast(VT, Res13); | |||
24900 | ||||
24901 | return DAG.getNode(ISD::OR, DL, VT, | |||
24902 | DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}), | |||
24903 | DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7})); | |||
24904 | } | |||
24905 | ||||
24906 | static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { | |||
24907 | // Lower the "add/sub/mul with overflow" instruction into a regular ins plus | |||
24908 | // a "setcc" instruction that checks the overflow flag. The "brcond" lowering | |||
24909 | // looks for this combo and may remove the "setcc" instruction if the "setcc" | |||
24910 | // has only one use. | |||
24911 | SDNode *N = Op.getNode(); | |||
24912 | SDValue LHS = N->getOperand(0); | |||
24913 | SDValue RHS = N->getOperand(1); | |||
24914 | unsigned BaseOp = 0; | |||
24915 | X86::CondCode Cond; | |||
24916 | SDLoc DL(Op); | |||
24917 | switch (Op.getOpcode()) { | |||
24918 | default: llvm_unreachable("Unknown ovf instruction!")::llvm::llvm_unreachable_internal("Unknown ovf instruction!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 24918); | |||
24919 | case ISD::SADDO: | |||
24920 | // A subtract of one will be selected as a INC. Note that INC doesn't | |||
24921 | // set CF, so we can't do this for UADDO. | |||
24922 | if (isOneConstant(RHS)) { | |||
24923 | BaseOp = X86ISD::INC; | |||
24924 | Cond = X86::COND_O; | |||
24925 | break; | |||
24926 | } | |||
24927 | BaseOp = X86ISD::ADD; | |||
24928 | Cond = X86::COND_O; | |||
24929 | break; | |||
24930 | case ISD::UADDO: | |||
24931 | BaseOp = X86ISD::ADD; | |||
24932 | Cond = X86::COND_B; | |||
24933 | break; | |||
24934 | case ISD::SSUBO: | |||
24935 | // A subtract of one will be selected as a DEC. Note that DEC doesn't | |||
24936 | // set CF, so we can't do this for USUBO. | |||
24937 | if (isOneConstant(RHS)) { | |||
24938 | BaseOp = X86ISD::DEC; | |||
24939 | Cond = X86::COND_O; | |||
24940 | break; | |||
24941 | } | |||
24942 | BaseOp = X86ISD::SUB; | |||
24943 | Cond = X86::COND_O; | |||
24944 | break; | |||
24945 | case ISD::USUBO: | |||
24946 | BaseOp = X86ISD::SUB; | |||
24947 | Cond = X86::COND_B; | |||
24948 | break; | |||
24949 | case ISD::SMULO: | |||
24950 | BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL; | |||
24951 | Cond = X86::COND_O; | |||
24952 | break; | |||
24953 | case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs | |||
24954 | if (N->getValueType(0) == MVT::i8) { | |||
24955 | BaseOp = X86ISD::UMUL8; | |||
24956 | Cond = X86::COND_O; | |||
24957 | break; | |||
24958 | } | |||
24959 | SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0), | |||
24960 | MVT::i32); | |||
24961 | SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS); | |||
24962 | ||||
24963 | SDValue SetCC = getSETCC(X86::COND_O, SDValue(Sum.getNode(), 2), DL, DAG); | |||
24964 | ||||
24965 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); | |||
24966 | } | |||
24967 | } | |||
24968 | ||||
24969 | // Also sets EFLAGS. | |||
24970 | SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32); | |||
24971 | SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS); | |||
24972 | ||||
24973 | SDValue SetCC = getSETCC(Cond, SDValue(Sum.getNode(), 1), DL, DAG); | |||
24974 | ||||
24975 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); | |||
24976 | } | |||
24977 | ||||
24978 | /// Returns true if the operand type is exactly twice the native width, and | |||
24979 | /// the corresponding cmpxchg8b or cmpxchg16b instruction is available. | |||
24980 | /// Used to know whether to use cmpxchg8/16b when expanding atomic operations | |||
24981 | /// (otherwise we leave them alone to become __sync_fetch_and_... calls). | |||
24982 | bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const { | |||
24983 | unsigned OpWidth = MemType->getPrimitiveSizeInBits(); | |||
24984 | ||||
24985 | if (OpWidth == 64) | |||
24986 | return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b | |||
24987 | else if (OpWidth == 128) | |||
24988 | return Subtarget.hasCmpxchg16b(); | |||
24989 | else | |||
24990 | return false; | |||
24991 | } | |||
24992 | ||||
24993 | bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { | |||
24994 | return needsCmpXchgNb(SI->getValueOperand()->getType()); | |||
24995 | } | |||
24996 | ||||
24997 | // Note: this turns large loads into lock cmpxchg8b/16b. | |||
24998 | // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b. | |||
24999 | TargetLowering::AtomicExpansionKind | |||
25000 | X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { | |||
25001 | auto PTy = cast<PointerType>(LI->getPointerOperandType()); | |||
25002 | return needsCmpXchgNb(PTy->getElementType()) ? AtomicExpansionKind::CmpXChg | |||
25003 | : AtomicExpansionKind::None; | |||
25004 | } | |||
25005 | ||||
25006 | TargetLowering::AtomicExpansionKind | |||
25007 | X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { | |||
25008 | unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32; | |||
25009 | Type *MemType = AI->getType(); | |||
25010 | ||||
25011 | // If the operand is too big, we must see if cmpxchg8/16b is available | |||
25012 | // and default to library calls otherwise. | |||
25013 | if (MemType->getPrimitiveSizeInBits() > NativeWidth) { | |||
25014 | return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg | |||
25015 | : AtomicExpansionKind::None; | |||
25016 | } | |||
25017 | ||||
25018 | AtomicRMWInst::BinOp Op = AI->getOperation(); | |||
25019 | switch (Op) { | |||
25020 | default: | |||
25021 | llvm_unreachable("Unknown atomic operation")::llvm::llvm_unreachable_internal("Unknown atomic operation", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25021); | |||
25022 | case AtomicRMWInst::Xchg: | |||
25023 | case AtomicRMWInst::Add: | |||
25024 | case AtomicRMWInst::Sub: | |||
25025 | // It's better to use xadd, xsub or xchg for these in all cases. | |||
25026 | return AtomicExpansionKind::None; | |||
25027 | case AtomicRMWInst::Or: | |||
25028 | case AtomicRMWInst::And: | |||
25029 | case AtomicRMWInst::Xor: | |||
25030 | // If the atomicrmw's result isn't actually used, we can just add a "lock" | |||
25031 | // prefix to a normal instruction for these operations. | |||
25032 | return !AI->use_empty() ? AtomicExpansionKind::CmpXChg | |||
25033 | : AtomicExpansionKind::None; | |||
25034 | case AtomicRMWInst::Nand: | |||
25035 | case AtomicRMWInst::Max: | |||
25036 | case AtomicRMWInst::Min: | |||
25037 | case AtomicRMWInst::UMax: | |||
25038 | case AtomicRMWInst::UMin: | |||
25039 | // These always require a non-trivial set of data operations on x86. We must | |||
25040 | // use a cmpxchg loop. | |||
25041 | return AtomicExpansionKind::CmpXChg; | |||
25042 | } | |||
25043 | } | |||
25044 | ||||
25045 | LoadInst * | |||
25046 | X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const { | |||
25047 | unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32; | |||
25048 | Type *MemType = AI->getType(); | |||
25049 | // Accesses larger than the native width are turned into cmpxchg/libcalls, so | |||
25050 | // there is no benefit in turning such RMWs into loads, and it is actually | |||
25051 | // harmful as it introduces a mfence. | |||
25052 | if (MemType->getPrimitiveSizeInBits() > NativeWidth) | |||
25053 | return nullptr; | |||
25054 | ||||
25055 | auto Builder = IRBuilder<>(AI); | |||
25056 | Module *M = Builder.GetInsertBlock()->getParent()->getParent(); | |||
25057 | auto SSID = AI->getSyncScopeID(); | |||
25058 | // We must restrict the ordering to avoid generating loads with Release or | |||
25059 | // ReleaseAcquire orderings. | |||
25060 | auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering()); | |||
25061 | auto Ptr = AI->getPointerOperand(); | |||
25062 | ||||
25063 | // Before the load we need a fence. Here is an example lifted from | |||
25064 | // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence | |||
25065 | // is required: | |||
25066 | // Thread 0: | |||
25067 | // x.store(1, relaxed); | |||
25068 | // r1 = y.fetch_add(0, release); | |||
25069 | // Thread 1: | |||
25070 | // y.fetch_add(42, acquire); | |||
25071 | // r2 = x.load(relaxed); | |||
25072 | // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is | |||
25073 | // lowered to just a load without a fence. A mfence flushes the store buffer, | |||
25074 | // making the optimization clearly correct. | |||
25075 | // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear | |||
25076 | // otherwise, we might be able to be more aggressive on relaxed idempotent | |||
25077 | // rmw. In practice, they do not look useful, so we don't try to be | |||
25078 | // especially clever. | |||
25079 | if (SSID == SyncScope::SingleThread) | |||
25080 | // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at | |||
25081 | // the IR level, so we must wrap it in an intrinsic. | |||
25082 | return nullptr; | |||
25083 | ||||
25084 | if (!Subtarget.hasMFence()) | |||
25085 | // FIXME: it might make sense to use a locked operation here but on a | |||
25086 | // different cache-line to prevent cache-line bouncing. In practice it | |||
25087 | // is probably a small win, and x86 processors without mfence are rare | |||
25088 | // enough that we do not bother. | |||
25089 | return nullptr; | |||
25090 | ||||
25091 | Function *MFence = | |||
25092 | llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence); | |||
25093 | Builder.CreateCall(MFence, {}); | |||
25094 | ||||
25095 | // Finally we can emit the atomic load. | |||
25096 | LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr, | |||
25097 | AI->getType()->getPrimitiveSizeInBits()); | |||
25098 | Loaded->setAtomic(Order, SSID); | |||
25099 | AI->replaceAllUsesWith(Loaded); | |||
25100 | AI->eraseFromParent(); | |||
25101 | return Loaded; | |||
25102 | } | |||
25103 | ||||
25104 | static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget, | |||
25105 | SelectionDAG &DAG) { | |||
25106 | SDLoc dl(Op); | |||
25107 | AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>( | |||
25108 | cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()); | |||
25109 | SyncScope::ID FenceSSID = static_cast<SyncScope::ID>( | |||
25110 | cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); | |||
25111 | ||||
25112 | // The only fence that needs an instruction is a sequentially-consistent | |||
25113 | // cross-thread fence. | |||
25114 | if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && | |||
25115 | FenceSSID == SyncScope::System) { | |||
25116 | if (Subtarget.hasMFence()) | |||
25117 | return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); | |||
25118 | ||||
25119 | SDValue Chain = Op.getOperand(0); | |||
25120 | SDValue Zero = DAG.getTargetConstant(0, dl, MVT::i32); | |||
25121 | SDValue Ops[] = { | |||
25122 | DAG.getRegister(X86::ESP, MVT::i32), // Base | |||
25123 | DAG.getTargetConstant(1, dl, MVT::i8), // Scale | |||
25124 | DAG.getRegister(0, MVT::i32), // Index | |||
25125 | DAG.getTargetConstant(0, dl, MVT::i32), // Disp | |||
25126 | DAG.getRegister(0, MVT::i32), // Segment. | |||
25127 | Zero, | |||
25128 | Chain | |||
25129 | }; | |||
25130 | SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, dl, MVT::Other, Ops); | |||
25131 | return SDValue(Res, 0); | |||
25132 | } | |||
25133 | ||||
25134 | // MEMBARRIER is a compiler barrier; it codegens to a no-op. | |||
25135 | return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); | |||
25136 | } | |||
25137 | ||||
25138 | static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget, | |||
25139 | SelectionDAG &DAG) { | |||
25140 | MVT T = Op.getSimpleValueType(); | |||
25141 | SDLoc DL(Op); | |||
25142 | unsigned Reg = 0; | |||
25143 | unsigned size = 0; | |||
25144 | switch(T.SimpleTy) { | |||
25145 | default: llvm_unreachable("Invalid value type!")::llvm::llvm_unreachable_internal("Invalid value type!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25145); | |||
25146 | case MVT::i8: Reg = X86::AL; size = 1; break; | |||
25147 | case MVT::i16: Reg = X86::AX; size = 2; break; | |||
25148 | case MVT::i32: Reg = X86::EAX; size = 4; break; | |||
25149 | case MVT::i64: | |||
25150 | assert(Subtarget.is64Bit() && "Node not type legal!")((Subtarget.is64Bit() && "Node not type legal!") ? static_cast <void> (0) : __assert_fail ("Subtarget.is64Bit() && \"Node not type legal!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25150, __PRETTY_FUNCTION__)); | |||
25151 | Reg = X86::RAX; size = 8; | |||
25152 | break; | |||
25153 | } | |||
25154 | SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg, | |||
25155 | Op.getOperand(2), SDValue()); | |||
25156 | SDValue Ops[] = { cpIn.getValue(0), | |||
25157 | Op.getOperand(1), | |||
25158 | Op.getOperand(3), | |||
25159 | DAG.getTargetConstant(size, DL, MVT::i8), | |||
25160 | cpIn.getValue(1) }; | |||
25161 | SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
25162 | MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand(); | |||
25163 | SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys, | |||
25164 | Ops, T, MMO); | |||
25165 | ||||
25166 | SDValue cpOut = | |||
25167 | DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1)); | |||
25168 | SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS, | |||
25169 | MVT::i32, cpOut.getValue(2)); | |||
25170 | SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG); | |||
25171 | ||||
25172 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut); | |||
25173 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success); | |||
25174 | DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1)); | |||
25175 | return SDValue(); | |||
25176 | } | |||
25177 | ||||
25178 | // Create MOVMSKB, taking into account whether we need to split for AVX1. | |||
25179 | static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG, | |||
25180 | const X86Subtarget &Subtarget) { | |||
25181 | MVT InVT = V.getSimpleValueType(); | |||
25182 | ||||
25183 | if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) { | |||
25184 | SDValue Lo, Hi; | |||
25185 | std::tie(Lo, Hi) = DAG.SplitVector(V, DL); | |||
25186 | Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo); | |||
25187 | Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi); | |||
25188 | Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi, | |||
25189 | DAG.getConstant(16, DL, MVT::i8)); | |||
25190 | return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi); | |||
25191 | } | |||
25192 | ||||
25193 | return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V); | |||
25194 | } | |||
25195 | ||||
25196 | static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget, | |||
25197 | SelectionDAG &DAG) { | |||
25198 | SDValue Src = Op.getOperand(0); | |||
25199 | MVT SrcVT = Src.getSimpleValueType(); | |||
25200 | MVT DstVT = Op.getSimpleValueType(); | |||
25201 | ||||
25202 | // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each | |||
25203 | // half to v32i1 and concatenating the result. | |||
25204 | if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) { | |||
25205 | assert(!Subtarget.is64Bit() && "Expected 32-bit mode")((!Subtarget.is64Bit() && "Expected 32-bit mode") ? static_cast <void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Expected 32-bit mode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25205, __PRETTY_FUNCTION__)); | |||
25206 | assert(Subtarget.hasBWI() && "Expected BWI target")((Subtarget.hasBWI() && "Expected BWI target") ? static_cast <void> (0) : __assert_fail ("Subtarget.hasBWI() && \"Expected BWI target\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25206, __PRETTY_FUNCTION__)); | |||
25207 | SDLoc dl(Op); | |||
25208 | SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src, | |||
25209 | DAG.getIntPtrConstant(0, dl)); | |||
25210 | Lo = DAG.getBitcast(MVT::v32i1, Lo); | |||
25211 | SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src, | |||
25212 | DAG.getIntPtrConstant(1, dl)); | |||
25213 | Hi = DAG.getBitcast(MVT::v32i1, Hi); | |||
25214 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi); | |||
25215 | } | |||
25216 | ||||
25217 | // Custom splitting for BWI types when AVX512F is available but BWI isn't. | |||
25218 | if ((SrcVT == MVT::v32i16 || SrcVT == MVT::v64i8) && DstVT.isVector() && | |||
25219 | DAG.getTargetLoweringInfo().isTypeLegal(DstVT)) { | |||
25220 | SDLoc dl(Op); | |||
25221 | SDValue Lo, Hi; | |||
25222 | std::tie(Lo, Hi) = DAG.SplitVector(Op.getOperand(0), dl); | |||
25223 | EVT CastVT = MVT::getVectorVT(DstVT.getVectorElementType(), | |||
25224 | DstVT.getVectorNumElements() / 2); | |||
25225 | Lo = DAG.getBitcast(CastVT, Lo); | |||
25226 | Hi = DAG.getBitcast(CastVT, Hi); | |||
25227 | return DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi); | |||
25228 | } | |||
25229 | ||||
25230 | // Use MOVMSK for vector to scalar conversion to prevent scalarization. | |||
25231 | if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) { | |||
25232 | assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512")((!Subtarget.hasAVX512() && "Should use K-registers with AVX512" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.hasAVX512() && \"Should use K-registers with AVX512\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25232, __PRETTY_FUNCTION__)); | |||
25233 | MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8; | |||
25234 | SDLoc DL(Op); | |||
25235 | SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT); | |||
25236 | V = getPMOVMSKB(DL, V, DAG, Subtarget); | |||
25237 | return DAG.getZExtOrTrunc(V, DL, DstVT); | |||
25238 | } | |||
25239 | ||||
25240 | if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 || | |||
25241 | SrcVT == MVT::i64) { | |||
25242 | assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25242, __PRETTY_FUNCTION__)); | |||
25243 | if (DstVT != MVT::f64 && DstVT != MVT::i64 && | |||
25244 | !(DstVT == MVT::x86mmx && SrcVT.isVector())) | |||
25245 | // This conversion needs to be expanded. | |||
25246 | return SDValue(); | |||
25247 | ||||
25248 | SDLoc dl(Op); | |||
25249 | if (SrcVT.isVector()) { | |||
25250 | // Widen the vector in input in the case of MVT::v2i32. | |||
25251 | // Example: from MVT::v2i32 to MVT::v4i32. | |||
25252 | MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(), | |||
25253 | SrcVT.getVectorNumElements() * 2); | |||
25254 | Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src, | |||
25255 | DAG.getUNDEF(SrcVT)); | |||
25256 | } else { | |||
25257 | assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&((SrcVT == MVT::i64 && !Subtarget.is64Bit() && "Unexpected source type in LowerBITCAST") ? static_cast<void > (0) : __assert_fail ("SrcVT == MVT::i64 && !Subtarget.is64Bit() && \"Unexpected source type in LowerBITCAST\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25258, __PRETTY_FUNCTION__)) | |||
25258 | "Unexpected source type in LowerBITCAST")((SrcVT == MVT::i64 && !Subtarget.is64Bit() && "Unexpected source type in LowerBITCAST") ? static_cast<void > (0) : __assert_fail ("SrcVT == MVT::i64 && !Subtarget.is64Bit() && \"Unexpected source type in LowerBITCAST\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25258, __PRETTY_FUNCTION__)); | |||
25259 | Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src); | |||
25260 | } | |||
25261 | ||||
25262 | MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64; | |||
25263 | Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src); | |||
25264 | ||||
25265 | if (DstVT == MVT::x86mmx) | |||
25266 | return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src); | |||
25267 | ||||
25268 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src, | |||
25269 | DAG.getIntPtrConstant(0, dl)); | |||
25270 | } | |||
25271 | ||||
25272 | assert(Subtarget.is64Bit() && !Subtarget.hasSSE2() &&((Subtarget.is64Bit() && !Subtarget.hasSSE2() && Subtarget.hasMMX() && "Unexpected custom BITCAST") ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && !Subtarget.hasSSE2() && Subtarget.hasMMX() && \"Unexpected custom BITCAST\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25273, __PRETTY_FUNCTION__)) | |||
25273 | Subtarget.hasMMX() && "Unexpected custom BITCAST")((Subtarget.is64Bit() && !Subtarget.hasSSE2() && Subtarget.hasMMX() && "Unexpected custom BITCAST") ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && !Subtarget.hasSSE2() && Subtarget.hasMMX() && \"Unexpected custom BITCAST\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25273, __PRETTY_FUNCTION__)); | |||
25274 | assert((DstVT == MVT::i64 ||(((DstVT == MVT::i64 || (DstVT.isVector() && DstVT.getSizeInBits ()==64)) && "Unexpected custom BITCAST") ? static_cast <void> (0) : __assert_fail ("(DstVT == MVT::i64 || (DstVT.isVector() && DstVT.getSizeInBits()==64)) && \"Unexpected custom BITCAST\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25276, __PRETTY_FUNCTION__)) | |||
25275 | (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&(((DstVT == MVT::i64 || (DstVT.isVector() && DstVT.getSizeInBits ()==64)) && "Unexpected custom BITCAST") ? static_cast <void> (0) : __assert_fail ("(DstVT == MVT::i64 || (DstVT.isVector() && DstVT.getSizeInBits()==64)) && \"Unexpected custom BITCAST\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25276, __PRETTY_FUNCTION__)) | |||
25276 | "Unexpected custom BITCAST")(((DstVT == MVT::i64 || (DstVT.isVector() && DstVT.getSizeInBits ()==64)) && "Unexpected custom BITCAST") ? static_cast <void> (0) : __assert_fail ("(DstVT == MVT::i64 || (DstVT.isVector() && DstVT.getSizeInBits()==64)) && \"Unexpected custom BITCAST\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25276, __PRETTY_FUNCTION__)); | |||
25277 | // i64 <=> MMX conversions are Legal. | |||
25278 | if (SrcVT==MVT::i64 && DstVT.isVector()) | |||
25279 | return Op; | |||
25280 | if (DstVT==MVT::i64 && SrcVT.isVector()) | |||
25281 | return Op; | |||
25282 | // MMX <=> MMX conversions are Legal. | |||
25283 | if (SrcVT.isVector() && DstVT.isVector()) | |||
25284 | return Op; | |||
25285 | // All other conversions need to be expanded. | |||
25286 | return SDValue(); | |||
25287 | } | |||
25288 | ||||
25289 | /// Compute the horizontal sum of bytes in V for the elements of VT. | |||
25290 | /// | |||
25291 | /// Requires V to be a byte vector and VT to be an integer vector type with | |||
25292 | /// wider elements than V's type. The width of the elements of VT determines | |||
25293 | /// how many bytes of V are summed horizontally to produce each element of the | |||
25294 | /// result. | |||
25295 | static SDValue LowerHorizontalByteSum(SDValue V, MVT VT, | |||
25296 | const X86Subtarget &Subtarget, | |||
25297 | SelectionDAG &DAG) { | |||
25298 | SDLoc DL(V); | |||
25299 | MVT ByteVecVT = V.getSimpleValueType(); | |||
25300 | MVT EltVT = VT.getVectorElementType(); | |||
25301 | assert(ByteVecVT.getVectorElementType() == MVT::i8 &&((ByteVecVT.getVectorElementType() == MVT::i8 && "Expected value to have byte element type." ) ? static_cast<void> (0) : __assert_fail ("ByteVecVT.getVectorElementType() == MVT::i8 && \"Expected value to have byte element type.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25302, __PRETTY_FUNCTION__)) | |||
25302 | "Expected value to have byte element type.")((ByteVecVT.getVectorElementType() == MVT::i8 && "Expected value to have byte element type." ) ? static_cast<void> (0) : __assert_fail ("ByteVecVT.getVectorElementType() == MVT::i8 && \"Expected value to have byte element type.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25302, __PRETTY_FUNCTION__)); | |||
25303 | assert(EltVT != MVT::i8 &&((EltVT != MVT::i8 && "Horizontal byte sum only makes sense for wider elements!" ) ? static_cast<void> (0) : __assert_fail ("EltVT != MVT::i8 && \"Horizontal byte sum only makes sense for wider elements!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25304, __PRETTY_FUNCTION__)) | |||
25304 | "Horizontal byte sum only makes sense for wider elements!")((EltVT != MVT::i8 && "Horizontal byte sum only makes sense for wider elements!" ) ? static_cast<void> (0) : __assert_fail ("EltVT != MVT::i8 && \"Horizontal byte sum only makes sense for wider elements!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25304, __PRETTY_FUNCTION__)); | |||
25305 | unsigned VecSize = VT.getSizeInBits(); | |||
25306 | assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!")((ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!" ) ? static_cast<void> (0) : __assert_fail ("ByteVecVT.getSizeInBits() == VecSize && \"Cannot change vector size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25306, __PRETTY_FUNCTION__)); | |||
25307 | ||||
25308 | // PSADBW instruction horizontally add all bytes and leave the result in i64 | |||
25309 | // chunks, thus directly computes the pop count for v2i64 and v4i64. | |||
25310 | if (EltVT == MVT::i64) { | |||
25311 | SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT); | |||
25312 | MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64); | |||
25313 | V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros); | |||
25314 | return DAG.getBitcast(VT, V); | |||
25315 | } | |||
25316 | ||||
25317 | if (EltVT == MVT::i32) { | |||
25318 | // We unpack the low half and high half into i32s interleaved with zeros so | |||
25319 | // that we can use PSADBW to horizontally sum them. The most useful part of | |||
25320 | // this is that it lines up the results of two PSADBW instructions to be | |||
25321 | // two v2i64 vectors which concatenated are the 4 population counts. We can | |||
25322 | // then use PACKUSWB to shrink and concatenate them into a v4i32 again. | |||
25323 | SDValue Zeros = DAG.getConstant(0, DL, VT); | |||
25324 | SDValue V32 = DAG.getBitcast(VT, V); | |||
25325 | SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros); | |||
25326 | SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros); | |||
25327 | ||||
25328 | // Do the horizontal sums into two v2i64s. | |||
25329 | Zeros = DAG.getConstant(0, DL, ByteVecVT); | |||
25330 | MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64); | |||
25331 | Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, | |||
25332 | DAG.getBitcast(ByteVecVT, Low), Zeros); | |||
25333 | High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, | |||
25334 | DAG.getBitcast(ByteVecVT, High), Zeros); | |||
25335 | ||||
25336 | // Merge them together. | |||
25337 | MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16); | |||
25338 | V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT, | |||
25339 | DAG.getBitcast(ShortVecVT, Low), | |||
25340 | DAG.getBitcast(ShortVecVT, High)); | |||
25341 | ||||
25342 | return DAG.getBitcast(VT, V); | |||
25343 | } | |||
25344 | ||||
25345 | // The only element type left is i16. | |||
25346 | assert(EltVT == MVT::i16 && "Unknown how to handle type")((EltVT == MVT::i16 && "Unknown how to handle type") ? static_cast<void> (0) : __assert_fail ("EltVT == MVT::i16 && \"Unknown how to handle type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25346, __PRETTY_FUNCTION__)); | |||
25347 | ||||
25348 | // To obtain pop count for each i16 element starting from the pop count for | |||
25349 | // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s | |||
25350 | // right by 8. It is important to shift as i16s as i8 vector shift isn't | |||
25351 | // directly supported. | |||
25352 | SDValue ShifterV = DAG.getConstant(8, DL, VT); | |||
25353 | SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV); | |||
25354 | V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl), | |||
25355 | DAG.getBitcast(ByteVecVT, V)); | |||
25356 | return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV); | |||
25357 | } | |||
25358 | ||||
25359 | static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL, | |||
25360 | const X86Subtarget &Subtarget, | |||
25361 | SelectionDAG &DAG) { | |||
25362 | MVT VT = Op.getSimpleValueType(); | |||
25363 | MVT EltVT = VT.getVectorElementType(); | |||
25364 | int NumElts = VT.getVectorNumElements(); | |||
25365 | (void)EltVT; | |||
25366 | assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.")((EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported." ) ? static_cast<void> (0) : __assert_fail ("EltVT == MVT::i8 && \"Only vXi8 vector CTPOP lowering supported.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25366, __PRETTY_FUNCTION__)); | |||
25367 | ||||
25368 | // Implement a lookup table in register by using an algorithm based on: | |||
25369 | // http://wm.ite.pl/articles/sse-popcount.html | |||
25370 | // | |||
25371 | // The general idea is that every lower byte nibble in the input vector is an | |||
25372 | // index into a in-register pre-computed pop count table. We then split up the | |||
25373 | // input vector in two new ones: (1) a vector with only the shifted-right | |||
25374 | // higher nibbles for each byte and (2) a vector with the lower nibbles (and | |||
25375 | // masked out higher ones) for each byte. PSHUFB is used separately with both | |||
25376 | // to index the in-register table. Next, both are added and the result is a | |||
25377 | // i8 vector where each element contains the pop count for input byte. | |||
25378 | const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2, | |||
25379 | /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3, | |||
25380 | /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3, | |||
25381 | /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4}; | |||
25382 | ||||
25383 | SmallVector<SDValue, 64> LUTVec; | |||
25384 | for (int i = 0; i < NumElts; ++i) | |||
25385 | LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8)); | |||
25386 | SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec); | |||
25387 | SDValue M0F = DAG.getConstant(0x0F, DL, VT); | |||
25388 | ||||
25389 | // High nibbles | |||
25390 | SDValue FourV = DAG.getConstant(4, DL, VT); | |||
25391 | SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV); | |||
25392 | ||||
25393 | // Low nibbles | |||
25394 | SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F); | |||
25395 | ||||
25396 | // The input vector is used as the shuffle mask that index elements into the | |||
25397 | // LUT. After counting low and high nibbles, add the vector to obtain the | |||
25398 | // final pop count per i8 element. | |||
25399 | SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles); | |||
25400 | SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles); | |||
25401 | return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt); | |||
25402 | } | |||
25403 | ||||
25404 | // Please ensure that any codegen change from LowerVectorCTPOP is reflected in | |||
25405 | // updated cost models in X86TTIImpl::getIntrinsicInstrCost. | |||
25406 | static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget, | |||
25407 | SelectionDAG &DAG) { | |||
25408 | MVT VT = Op.getSimpleValueType(); | |||
25409 | assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&(((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector ()) && "Unknown CTPOP type to handle") ? static_cast< void> (0) : __assert_fail ("(VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) && \"Unknown CTPOP type to handle\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25410, __PRETTY_FUNCTION__)) | |||
25410 | "Unknown CTPOP type to handle")(((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector ()) && "Unknown CTPOP type to handle") ? static_cast< void> (0) : __assert_fail ("(VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) && \"Unknown CTPOP type to handle\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25410, __PRETTY_FUNCTION__)); | |||
25411 | SDLoc DL(Op.getNode()); | |||
25412 | SDValue Op0 = Op.getOperand(0); | |||
25413 | ||||
25414 | // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions. | |||
25415 | if (Subtarget.hasVPOPCNTDQ()) { | |||
25416 | unsigned NumElems = VT.getVectorNumElements(); | |||
25417 | assert((VT.getVectorElementType() == MVT::i8 ||(((VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType () == MVT::i16) && "Unexpected type") ? static_cast< void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType() == MVT::i16) && \"Unexpected type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25418, __PRETTY_FUNCTION__)) | |||
25418 | VT.getVectorElementType() == MVT::i16) && "Unexpected type")(((VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType () == MVT::i16) && "Unexpected type") ? static_cast< void> (0) : __assert_fail ("(VT.getVectorElementType() == MVT::i8 || VT.getVectorElementType() == MVT::i16) && \"Unexpected type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25418, __PRETTY_FUNCTION__)); | |||
25419 | if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) { | |||
25420 | MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems); | |||
25421 | Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0); | |||
25422 | Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op); | |||
25423 | return DAG.getNode(ISD::TRUNCATE, DL, VT, Op); | |||
25424 | } | |||
25425 | } | |||
25426 | ||||
25427 | // Decompose 256-bit ops into smaller 128-bit ops. | |||
25428 | if (VT.is256BitVector() && !Subtarget.hasInt256()) | |||
25429 | return Lower256IntUnary(Op, DAG); | |||
25430 | ||||
25431 | // Decompose 512-bit ops into smaller 256-bit ops. | |||
25432 | if (VT.is512BitVector() && !Subtarget.hasBWI()) | |||
25433 | return Lower512IntUnary(Op, DAG); | |||
25434 | ||||
25435 | // For element types greater than i8, do vXi8 pop counts and a bytesum. | |||
25436 | if (VT.getScalarType() != MVT::i8) { | |||
25437 | MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8); | |||
25438 | SDValue ByteOp = DAG.getBitcast(ByteVT, Op0); | |||
25439 | SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp); | |||
25440 | return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG); | |||
25441 | } | |||
25442 | ||||
25443 | // We can't use the fast LUT approach, so fall back on LegalizeDAG. | |||
25444 | if (!Subtarget.hasSSSE3()) | |||
25445 | return SDValue(); | |||
25446 | ||||
25447 | return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG); | |||
25448 | } | |||
25449 | ||||
25450 | static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget, | |||
25451 | SelectionDAG &DAG) { | |||
25452 | assert(Op.getSimpleValueType().isVector() &&((Op.getSimpleValueType().isVector() && "We only do custom lowering for vector population count." ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().isVector() && \"We only do custom lowering for vector population count.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25453, __PRETTY_FUNCTION__)) | |||
25453 | "We only do custom lowering for vector population count.")((Op.getSimpleValueType().isVector() && "We only do custom lowering for vector population count." ) ? static_cast<void> (0) : __assert_fail ("Op.getSimpleValueType().isVector() && \"We only do custom lowering for vector population count.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25453, __PRETTY_FUNCTION__)); | |||
25454 | return LowerVectorCTPOP(Op, Subtarget, DAG); | |||
25455 | } | |||
25456 | ||||
25457 | static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) { | |||
25458 | MVT VT = Op.getSimpleValueType(); | |||
25459 | SDValue In = Op.getOperand(0); | |||
25460 | SDLoc DL(Op); | |||
25461 | ||||
25462 | // For scalars, its still beneficial to transfer to/from the SIMD unit to | |||
25463 | // perform the BITREVERSE. | |||
25464 | if (!VT.isVector()) { | |||
25465 | MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits()); | |||
25466 | SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In); | |||
25467 | Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res); | |||
25468 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res, | |||
25469 | DAG.getIntPtrConstant(0, DL)); | |||
25470 | } | |||
25471 | ||||
25472 | int NumElts = VT.getVectorNumElements(); | |||
25473 | int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8; | |||
25474 | ||||
25475 | // Decompose 256-bit ops into smaller 128-bit ops. | |||
25476 | if (VT.is256BitVector()) | |||
25477 | return Lower256IntUnary(Op, DAG); | |||
25478 | ||||
25479 | assert(VT.is128BitVector() &&((VT.is128BitVector() && "Only 128-bit vector bitreverse lowering supported." ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only 128-bit vector bitreverse lowering supported.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25480, __PRETTY_FUNCTION__)) | |||
25480 | "Only 128-bit vector bitreverse lowering supported.")((VT.is128BitVector() && "Only 128-bit vector bitreverse lowering supported." ) ? static_cast<void> (0) : __assert_fail ("VT.is128BitVector() && \"Only 128-bit vector bitreverse lowering supported.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25480, __PRETTY_FUNCTION__)); | |||
25481 | ||||
25482 | // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we | |||
25483 | // perform the BSWAP in the shuffle. | |||
25484 | // Its best to shuffle using the second operand as this will implicitly allow | |||
25485 | // memory folding for multiple vectors. | |||
25486 | SmallVector<SDValue, 16> MaskElts; | |||
25487 | for (int i = 0; i != NumElts; ++i) { | |||
25488 | for (int j = ScalarSizeInBytes - 1; j >= 0; --j) { | |||
25489 | int SourceByte = 16 + (i * ScalarSizeInBytes) + j; | |||
25490 | int PermuteByte = SourceByte | (2 << 5); | |||
25491 | MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8)); | |||
25492 | } | |||
25493 | } | |||
25494 | ||||
25495 | SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts); | |||
25496 | SDValue Res = DAG.getBitcast(MVT::v16i8, In); | |||
25497 | Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8), | |||
25498 | Res, Mask); | |||
25499 | return DAG.getBitcast(VT, Res); | |||
25500 | } | |||
25501 | ||||
25502 | static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget, | |||
25503 | SelectionDAG &DAG) { | |||
25504 | MVT VT = Op.getSimpleValueType(); | |||
25505 | ||||
25506 | if (Subtarget.hasXOP() && !VT.is512BitVector()) | |||
25507 | return LowerBITREVERSE_XOP(Op, DAG); | |||
25508 | ||||
25509 | assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE")((Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSSE3() && \"SSSE3 required for BITREVERSE\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25509, __PRETTY_FUNCTION__)); | |||
25510 | ||||
25511 | SDValue In = Op.getOperand(0); | |||
25512 | SDLoc DL(Op); | |||
25513 | ||||
25514 | unsigned NumElts = VT.getVectorNumElements(); | |||
25515 | assert(VT.getScalarType() == MVT::i8 &&((VT.getScalarType() == MVT::i8 && "Only byte vector BITREVERSE supported" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Only byte vector BITREVERSE supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25516, __PRETTY_FUNCTION__)) | |||
25516 | "Only byte vector BITREVERSE supported")((VT.getScalarType() == MVT::i8 && "Only byte vector BITREVERSE supported" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarType() == MVT::i8 && \"Only byte vector BITREVERSE supported\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25516, __PRETTY_FUNCTION__)); | |||
25517 | ||||
25518 | // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2. | |||
25519 | if (VT.is256BitVector() && !Subtarget.hasInt256()) | |||
25520 | return Lower256IntUnary(Op, DAG); | |||
25521 | ||||
25522 | // Perform BITREVERSE using PSHUFB lookups. Each byte is split into | |||
25523 | // two nibbles and a PSHUFB lookup to find the bitreverse of each | |||
25524 | // 0-15 value (moved to the other nibble). | |||
25525 | SDValue NibbleMask = DAG.getConstant(0xF, DL, VT); | |||
25526 | SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask); | |||
25527 | SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT)); | |||
25528 | ||||
25529 | const int LoLUT[16] = { | |||
25530 | /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0, | |||
25531 | /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0, | |||
25532 | /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0, | |||
25533 | /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0}; | |||
25534 | const int HiLUT[16] = { | |||
25535 | /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C, | |||
25536 | /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E, | |||
25537 | /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D, | |||
25538 | /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F}; | |||
25539 | ||||
25540 | SmallVector<SDValue, 16> LoMaskElts, HiMaskElts; | |||
25541 | for (unsigned i = 0; i < NumElts; ++i) { | |||
25542 | LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8)); | |||
25543 | HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8)); | |||
25544 | } | |||
25545 | ||||
25546 | SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts); | |||
25547 | SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts); | |||
25548 | Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo); | |||
25549 | Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi); | |||
25550 | return DAG.getNode(ISD::OR, DL, VT, Lo, Hi); | |||
25551 | } | |||
25552 | ||||
25553 | static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG, | |||
25554 | const X86Subtarget &Subtarget, | |||
25555 | bool AllowIncDec = true) { | |||
25556 | unsigned NewOpc = 0; | |||
25557 | switch (N->getOpcode()) { | |||
25558 | case ISD::ATOMIC_LOAD_ADD: | |||
25559 | NewOpc = X86ISD::LADD; | |||
25560 | break; | |||
25561 | case ISD::ATOMIC_LOAD_SUB: | |||
25562 | NewOpc = X86ISD::LSUB; | |||
25563 | break; | |||
25564 | case ISD::ATOMIC_LOAD_OR: | |||
25565 | NewOpc = X86ISD::LOR; | |||
25566 | break; | |||
25567 | case ISD::ATOMIC_LOAD_XOR: | |||
25568 | NewOpc = X86ISD::LXOR; | |||
25569 | break; | |||
25570 | case ISD::ATOMIC_LOAD_AND: | |||
25571 | NewOpc = X86ISD::LAND; | |||
25572 | break; | |||
25573 | default: | |||
25574 | llvm_unreachable("Unknown ATOMIC_LOAD_ opcode")::llvm::llvm_unreachable_internal("Unknown ATOMIC_LOAD_ opcode" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25574); | |||
25575 | } | |||
25576 | ||||
25577 | MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand(); | |||
25578 | ||||
25579 | if (auto *C = dyn_cast<ConstantSDNode>(N->getOperand(2))) { | |||
25580 | // Convert to inc/dec if they aren't slow or we are optimizing for size. | |||
25581 | if (AllowIncDec && (!Subtarget.slowIncDec() || | |||
25582 | DAG.getMachineFunction().getFunction().optForSize())) { | |||
25583 | if ((NewOpc == X86ISD::LADD && C->isOne()) || | |||
25584 | (NewOpc == X86ISD::LSUB && C->isAllOnesValue())) | |||
25585 | return DAG.getMemIntrinsicNode(X86ISD::LINC, SDLoc(N), | |||
25586 | DAG.getVTList(MVT::i32, MVT::Other), | |||
25587 | {N->getOperand(0), N->getOperand(1)}, | |||
25588 | /*MemVT=*/N->getSimpleValueType(0), MMO); | |||
25589 | if ((NewOpc == X86ISD::LSUB && C->isOne()) || | |||
25590 | (NewOpc == X86ISD::LADD && C->isAllOnesValue())) | |||
25591 | return DAG.getMemIntrinsicNode(X86ISD::LDEC, SDLoc(N), | |||
25592 | DAG.getVTList(MVT::i32, MVT::Other), | |||
25593 | {N->getOperand(0), N->getOperand(1)}, | |||
25594 | /*MemVT=*/N->getSimpleValueType(0), MMO); | |||
25595 | } | |||
25596 | } | |||
25597 | ||||
25598 | return DAG.getMemIntrinsicNode( | |||
25599 | NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other), | |||
25600 | {N->getOperand(0), N->getOperand(1), N->getOperand(2)}, | |||
25601 | /*MemVT=*/N->getSimpleValueType(0), MMO); | |||
25602 | } | |||
25603 | ||||
25604 | /// Lower atomic_load_ops into LOCK-prefixed operations. | |||
25605 | static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG, | |||
25606 | const X86Subtarget &Subtarget) { | |||
25607 | SDValue Chain = N->getOperand(0); | |||
25608 | SDValue LHS = N->getOperand(1); | |||
25609 | SDValue RHS = N->getOperand(2); | |||
25610 | unsigned Opc = N->getOpcode(); | |||
25611 | MVT VT = N->getSimpleValueType(0); | |||
25612 | SDLoc DL(N); | |||
25613 | ||||
25614 | // We can lower atomic_load_add into LXADD. However, any other atomicrmw op | |||
25615 | // can only be lowered when the result is unused. They should have already | |||
25616 | // been transformed into a cmpxchg loop in AtomicExpand. | |||
25617 | if (N->hasAnyUseOfValue(0)) { | |||
25618 | // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to | |||
25619 | // select LXADD if LOCK_SUB can't be selected. | |||
25620 | if (Opc == ISD::ATOMIC_LOAD_SUB) { | |||
25621 | AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode()); | |||
25622 | RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS); | |||
25623 | return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS, | |||
25624 | RHS, AN->getMemOperand()); | |||
25625 | } | |||
25626 | assert(Opc == ISD::ATOMIC_LOAD_ADD &&((Opc == ISD::ATOMIC_LOAD_ADD && "Used AtomicRMW ops other than Add should have been expanded!" ) ? static_cast<void> (0) : __assert_fail ("Opc == ISD::ATOMIC_LOAD_ADD && \"Used AtomicRMW ops other than Add should have been expanded!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25627, __PRETTY_FUNCTION__)) | |||
25627 | "Used AtomicRMW ops other than Add should have been expanded!")((Opc == ISD::ATOMIC_LOAD_ADD && "Used AtomicRMW ops other than Add should have been expanded!" ) ? static_cast<void> (0) : __assert_fail ("Opc == ISD::ATOMIC_LOAD_ADD && \"Used AtomicRMW ops other than Add should have been expanded!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25627, __PRETTY_FUNCTION__)); | |||
25628 | return N; | |||
25629 | } | |||
25630 | ||||
25631 | SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget); | |||
25632 | // RAUW the chain, but don't worry about the result, as it's unused. | |||
25633 | assert(!N->hasAnyUseOfValue(0))((!N->hasAnyUseOfValue(0)) ? static_cast<void> (0) : __assert_fail ("!N->hasAnyUseOfValue(0)", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25633, __PRETTY_FUNCTION__)); | |||
25634 | DAG.ReplaceAllUsesOfValueWith(N.getValue(1), LockOp.getValue(1)); | |||
25635 | return SDValue(); | |||
25636 | } | |||
25637 | ||||
25638 | static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { | |||
25639 | SDNode *Node = Op.getNode(); | |||
25640 | SDLoc dl(Node); | |||
25641 | EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT(); | |||
25642 | ||||
25643 | // Convert seq_cst store -> xchg | |||
25644 | // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b) | |||
25645 | // FIXME: On 32-bit, store -> fist or movq would be more efficient | |||
25646 | // (The only way to get a 16-byte store is cmpxchg16b) | |||
25647 | // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. | |||
25648 | if (cast<AtomicSDNode>(Node)->getOrdering() == | |||
25649 | AtomicOrdering::SequentiallyConsistent || | |||
25650 | !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { | |||
25651 | SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, | |||
25652 | cast<AtomicSDNode>(Node)->getMemoryVT(), | |||
25653 | Node->getOperand(0), | |||
25654 | Node->getOperand(1), Node->getOperand(2), | |||
25655 | cast<AtomicSDNode>(Node)->getMemOperand()); | |||
25656 | return Swap.getValue(1); | |||
25657 | } | |||
25658 | // Other atomic stores have a simple pattern. | |||
25659 | return Op; | |||
25660 | } | |||
25661 | ||||
25662 | static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) { | |||
25663 | SDNode *N = Op.getNode(); | |||
25664 | MVT VT = N->getSimpleValueType(0); | |||
25665 | ||||
25666 | // Let legalize expand this if it isn't a legal type yet. | |||
25667 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
25668 | return SDValue(); | |||
25669 | ||||
25670 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
25671 | SDLoc DL(N); | |||
25672 | ||||
25673 | // Set the carry flag. | |||
25674 | SDValue Carry = Op.getOperand(2); | |||
25675 | EVT CarryVT = Carry.getValueType(); | |||
25676 | APInt NegOne = APInt::getAllOnesValue(CarryVT.getScalarSizeInBits()); | |||
25677 | Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32), | |||
25678 | Carry, DAG.getConstant(NegOne, DL, CarryVT)); | |||
25679 | ||||
25680 | unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB; | |||
25681 | SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0), | |||
25682 | Op.getOperand(1), Carry.getValue(1)); | |||
25683 | ||||
25684 | SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG); | |||
25685 | if (N->getValueType(1) == MVT::i1) | |||
25686 | SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC); | |||
25687 | ||||
25688 | return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC); | |||
25689 | } | |||
25690 | ||||
25691 | static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget, | |||
25692 | SelectionDAG &DAG) { | |||
25693 | assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit())((Subtarget.isTargetDarwin() && Subtarget.is64Bit()) ? static_cast<void> (0) : __assert_fail ("Subtarget.isTargetDarwin() && Subtarget.is64Bit()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25693, __PRETTY_FUNCTION__)); | |||
25694 | ||||
25695 | // For MacOSX, we want to call an alternative entry point: __sincos_stret, | |||
25696 | // which returns the values as { float, float } (in XMM0) or | |||
25697 | // { double, double } (which is returned in XMM0, XMM1). | |||
25698 | SDLoc dl(Op); | |||
25699 | SDValue Arg = Op.getOperand(0); | |||
25700 | EVT ArgVT = Arg.getValueType(); | |||
25701 | Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); | |||
25702 | ||||
25703 | TargetLowering::ArgListTy Args; | |||
25704 | TargetLowering::ArgListEntry Entry; | |||
25705 | ||||
25706 | Entry.Node = Arg; | |||
25707 | Entry.Ty = ArgTy; | |||
25708 | Entry.IsSExt = false; | |||
25709 | Entry.IsZExt = false; | |||
25710 | Args.push_back(Entry); | |||
25711 | ||||
25712 | bool isF64 = ArgVT == MVT::f64; | |||
25713 | // Only optimize x86_64 for now. i386 is a bit messy. For f32, | |||
25714 | // the small struct {f32, f32} is returned in (eax, edx). For f64, | |||
25715 | // the results are returned via SRet in memory. | |||
25716 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
25717 | RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32; | |||
25718 | const char *LibcallName = TLI.getLibcallName(LC); | |||
25719 | SDValue Callee = | |||
25720 | DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout())); | |||
25721 | ||||
25722 | Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy) | |||
25723 | : (Type *)VectorType::get(ArgTy, 4); | |||
25724 | ||||
25725 | TargetLowering::CallLoweringInfo CLI(DAG); | |||
25726 | CLI.setDebugLoc(dl) | |||
25727 | .setChain(DAG.getEntryNode()) | |||
25728 | .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args)); | |||
25729 | ||||
25730 | std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI); | |||
25731 | ||||
25732 | if (isF64) | |||
25733 | // Returned in xmm0 and xmm1. | |||
25734 | return CallResult.first; | |||
25735 | ||||
25736 | // Returned in bits 0:31 and 32:64 xmm0. | |||
25737 | SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, | |||
25738 | CallResult.first, DAG.getIntPtrConstant(0, dl)); | |||
25739 | SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT, | |||
25740 | CallResult.first, DAG.getIntPtrConstant(1, dl)); | |||
25741 | SDVTList Tys = DAG.getVTList(ArgVT, ArgVT); | |||
25742 | return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal); | |||
25743 | } | |||
25744 | ||||
25745 | /// Widen a vector input to a vector of NVT. The | |||
25746 | /// input vector must have the same element type as NVT. | |||
25747 | static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG, | |||
25748 | bool FillWithZeroes = false) { | |||
25749 | // Check if InOp already has the right width. | |||
25750 | MVT InVT = InOp.getSimpleValueType(); | |||
25751 | if (InVT == NVT) | |||
25752 | return InOp; | |||
25753 | ||||
25754 | if (InOp.isUndef()) | |||
25755 | return DAG.getUNDEF(NVT); | |||
25756 | ||||
25757 | assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&((InVT.getVectorElementType() == NVT.getVectorElementType() && "input and widen element type must match") ? static_cast< void> (0) : __assert_fail ("InVT.getVectorElementType() == NVT.getVectorElementType() && \"input and widen element type must match\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25758, __PRETTY_FUNCTION__)) | |||
25758 | "input and widen element type must match")((InVT.getVectorElementType() == NVT.getVectorElementType() && "input and widen element type must match") ? static_cast< void> (0) : __assert_fail ("InVT.getVectorElementType() == NVT.getVectorElementType() && \"input and widen element type must match\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25758, __PRETTY_FUNCTION__)); | |||
25759 | ||||
25760 | unsigned InNumElts = InVT.getVectorNumElements(); | |||
25761 | unsigned WidenNumElts = NVT.getVectorNumElements(); | |||
25762 | assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&((WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 && "Unexpected request for vector widening") ? static_cast <void> (0) : __assert_fail ("WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 && \"Unexpected request for vector widening\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25763, __PRETTY_FUNCTION__)) | |||
25763 | "Unexpected request for vector widening")((WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 && "Unexpected request for vector widening") ? static_cast <void> (0) : __assert_fail ("WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 && \"Unexpected request for vector widening\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25763, __PRETTY_FUNCTION__)); | |||
25764 | ||||
25765 | SDLoc dl(InOp); | |||
25766 | if (InOp.getOpcode() == ISD::CONCAT_VECTORS && | |||
25767 | InOp.getNumOperands() == 2) { | |||
25768 | SDValue N1 = InOp.getOperand(1); | |||
25769 | if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) || | |||
25770 | N1.isUndef()) { | |||
25771 | InOp = InOp.getOperand(0); | |||
25772 | InVT = InOp.getSimpleValueType(); | |||
25773 | InNumElts = InVT.getVectorNumElements(); | |||
25774 | } | |||
25775 | } | |||
25776 | if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) || | |||
25777 | ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) { | |||
25778 | SmallVector<SDValue, 16> Ops; | |||
25779 | for (unsigned i = 0; i < InNumElts; ++i) | |||
25780 | Ops.push_back(InOp.getOperand(i)); | |||
25781 | ||||
25782 | EVT EltVT = InOp.getOperand(0).getValueType(); | |||
25783 | ||||
25784 | SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) : | |||
25785 | DAG.getUNDEF(EltVT); | |||
25786 | for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i) | |||
25787 | Ops.push_back(FillVal); | |||
25788 | return DAG.getBuildVector(NVT, dl, Ops); | |||
25789 | } | |||
25790 | SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) : | |||
25791 | DAG.getUNDEF(NVT); | |||
25792 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal, | |||
25793 | InOp, DAG.getIntPtrConstant(0, dl)); | |||
25794 | } | |||
25795 | ||||
25796 | static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget, | |||
25797 | SelectionDAG &DAG) { | |||
25798 | assert(Subtarget.hasAVX512() &&((Subtarget.hasAVX512() && "MGATHER/MSCATTER are supported on AVX-512 arch only" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"MGATHER/MSCATTER are supported on AVX-512 arch only\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25799, __PRETTY_FUNCTION__)) | |||
25799 | "MGATHER/MSCATTER are supported on AVX-512 arch only")((Subtarget.hasAVX512() && "MGATHER/MSCATTER are supported on AVX-512 arch only" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"MGATHER/MSCATTER are supported on AVX-512 arch only\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25799, __PRETTY_FUNCTION__)); | |||
25800 | ||||
25801 | MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode()); | |||
25802 | SDValue Src = N->getValue(); | |||
25803 | MVT VT = Src.getSimpleValueType(); | |||
25804 | assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op")((VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() >= 32 && \"Unsupported scatter op\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25804, __PRETTY_FUNCTION__)); | |||
25805 | SDLoc dl(Op); | |||
25806 | ||||
25807 | SDValue Scale = N->getScale(); | |||
25808 | SDValue Index = N->getIndex(); | |||
25809 | SDValue Mask = N->getMask(); | |||
25810 | SDValue Chain = N->getChain(); | |||
25811 | SDValue BasePtr = N->getBasePtr(); | |||
25812 | ||||
25813 | if (VT == MVT::v2f32) { | |||
25814 | assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type")((Mask.getValueType() == MVT::v2i1 && "Unexpected mask type" ) ? static_cast<void> (0) : __assert_fail ("Mask.getValueType() == MVT::v2i1 && \"Unexpected mask type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25814, __PRETTY_FUNCTION__)); | |||
25815 | // If the index is v2i64 and we have VLX we can use xmm for data and index. | |||
25816 | if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) { | |||
25817 | Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, | |||
25818 | DAG.getUNDEF(MVT::v2f32)); | |||
25819 | SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other); | |||
25820 | SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale}; | |||
25821 | SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>( | |||
25822 | VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand()); | |||
25823 | DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1)); | |||
25824 | return SDValue(NewScatter.getNode(), 1); | |||
25825 | } | |||
25826 | return SDValue(); | |||
25827 | } | |||
25828 | ||||
25829 | if (VT == MVT::v2i32) { | |||
25830 | assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type")((Mask.getValueType() == MVT::v2i1 && "Unexpected mask type" ) ? static_cast<void> (0) : __assert_fail ("Mask.getValueType() == MVT::v2i1 && \"Unexpected mask type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25830, __PRETTY_FUNCTION__)); | |||
25831 | Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src, | |||
25832 | DAG.getUNDEF(MVT::v2i32)); | |||
25833 | // If the index is v2i64 and we have VLX we can use xmm for data and index. | |||
25834 | if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) { | |||
25835 | SDVTList VTs = DAG.getVTList(MVT::v2i1, MVT::Other); | |||
25836 | SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale}; | |||
25837 | SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>( | |||
25838 | VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand()); | |||
25839 | DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1)); | |||
25840 | return SDValue(NewScatter.getNode(), 1); | |||
25841 | } | |||
25842 | // Custom widen all the operands to avoid promotion. | |||
25843 | EVT NewIndexVT = EVT::getVectorVT( | |||
25844 | *DAG.getContext(), Index.getValueType().getVectorElementType(), 4); | |||
25845 | Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index, | |||
25846 | DAG.getUNDEF(Index.getValueType())); | |||
25847 | Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask, | |||
25848 | DAG.getConstant(0, dl, MVT::v2i1)); | |||
25849 | SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale}; | |||
25850 | return DAG.getMaskedScatter(DAG.getVTList(MVT::Other), N->getMemoryVT(), dl, | |||
25851 | Ops, N->getMemOperand()); | |||
25852 | } | |||
25853 | ||||
25854 | MVT IndexVT = Index.getSimpleValueType(); | |||
25855 | MVT MaskVT = Mask.getSimpleValueType(); | |||
25856 | ||||
25857 | // If the index is v2i32, we're being called by type legalization and we | |||
25858 | // should just let the default handling take care of it. | |||
25859 | if (IndexVT == MVT::v2i32) | |||
25860 | return SDValue(); | |||
25861 | ||||
25862 | // If we don't have VLX and neither the passthru or index is 512-bits, we | |||
25863 | // need to widen until one is. | |||
25864 | if (!Subtarget.hasVLX() && !VT.is512BitVector() && | |||
25865 | !Index.getSimpleValueType().is512BitVector()) { | |||
25866 | // Determine how much we need to widen by to get a 512-bit type. | |||
25867 | unsigned Factor = std::min(512/VT.getSizeInBits(), | |||
25868 | 512/IndexVT.getSizeInBits()); | |||
25869 | unsigned NumElts = VT.getVectorNumElements() * Factor; | |||
25870 | ||||
25871 | VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts); | |||
25872 | IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts); | |||
25873 | MaskVT = MVT::getVectorVT(MVT::i1, NumElts); | |||
25874 | ||||
25875 | Src = ExtendToType(Src, VT, DAG); | |||
25876 | Index = ExtendToType(Index, IndexVT, DAG); | |||
25877 | Mask = ExtendToType(Mask, MaskVT, DAG, true); | |||
25878 | } | |||
25879 | ||||
25880 | SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other); | |||
25881 | SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale}; | |||
25882 | SDValue NewScatter = DAG.getTargetMemSDNode<X86MaskedScatterSDNode>( | |||
25883 | VTs, Ops, dl, N->getMemoryVT(), N->getMemOperand()); | |||
25884 | DAG.ReplaceAllUsesWith(Op, SDValue(NewScatter.getNode(), 1)); | |||
25885 | return SDValue(NewScatter.getNode(), 1); | |||
25886 | } | |||
25887 | ||||
25888 | static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget, | |||
25889 | SelectionDAG &DAG) { | |||
25890 | ||||
25891 | MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode()); | |||
25892 | MVT VT = Op.getSimpleValueType(); | |||
25893 | MVT ScalarVT = VT.getScalarType(); | |||
25894 | SDValue Mask = N->getMask(); | |||
25895 | SDLoc dl(Op); | |||
25896 | ||||
25897 | assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&(((!N->isExpandingLoad() || Subtarget.hasAVX512()) && "Expanding masked load is supported on AVX-512 target only!" ) ? static_cast<void> (0) : __assert_fail ("(!N->isExpandingLoad() || Subtarget.hasAVX512()) && \"Expanding masked load is supported on AVX-512 target only!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25898, __PRETTY_FUNCTION__)) | |||
25898 | "Expanding masked load is supported on AVX-512 target only!")(((!N->isExpandingLoad() || Subtarget.hasAVX512()) && "Expanding masked load is supported on AVX-512 target only!" ) ? static_cast<void> (0) : __assert_fail ("(!N->isExpandingLoad() || Subtarget.hasAVX512()) && \"Expanding masked load is supported on AVX-512 target only!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25898, __PRETTY_FUNCTION__)); | |||
25899 | ||||
25900 | assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&(((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) && "Expanding masked load is supported for 32 and 64-bit types only!" ) ? static_cast<void> (0) : __assert_fail ("(!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) && \"Expanding masked load is supported for 32 and 64-bit types only!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25901, __PRETTY_FUNCTION__)) | |||
25901 | "Expanding masked load is supported for 32 and 64-bit types only!")(((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) && "Expanding masked load is supported for 32 and 64-bit types only!" ) ? static_cast<void> (0) : __assert_fail ("(!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) && \"Expanding masked load is supported for 32 and 64-bit types only!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25901, __PRETTY_FUNCTION__)); | |||
25902 | ||||
25903 | assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&((Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && "Cannot lower masked load op." ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && \"Cannot lower masked load op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25904, __PRETTY_FUNCTION__)) | |||
25904 | "Cannot lower masked load op.")((Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && "Cannot lower masked load op." ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && \"Cannot lower masked load op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25904, __PRETTY_FUNCTION__)); | |||
25905 | ||||
25906 | assert((ScalarVT.getSizeInBits() >= 32 ||(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked load op." ) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked load op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25909, __PRETTY_FUNCTION__)) | |||
25907 | (Subtarget.hasBWI() &&(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked load op." ) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked load op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25909, __PRETTY_FUNCTION__)) | |||
25908 | (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked load op." ) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked load op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25909, __PRETTY_FUNCTION__)) | |||
25909 | "Unsupported masked load op.")(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked load op." ) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked load op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25909, __PRETTY_FUNCTION__)); | |||
25910 | ||||
25911 | // This operation is legal for targets with VLX, but without | |||
25912 | // VLX the vector should be widened to 512 bit | |||
25913 | unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits(); | |||
25914 | MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec); | |||
25915 | SDValue PassThru = ExtendToType(N->getPassThru(), WideDataVT, DAG); | |||
25916 | ||||
25917 | // Mask element has to be i1. | |||
25918 | assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&((Mask.getSimpleValueType().getScalarType() == MVT::i1 && "Unexpected mask type") ? static_cast<void> (0) : __assert_fail ("Mask.getSimpleValueType().getScalarType() == MVT::i1 && \"Unexpected mask type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25919, __PRETTY_FUNCTION__)) | |||
25919 | "Unexpected mask type")((Mask.getSimpleValueType().getScalarType() == MVT::i1 && "Unexpected mask type") ? static_cast<void> (0) : __assert_fail ("Mask.getSimpleValueType().getScalarType() == MVT::i1 && \"Unexpected mask type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25919, __PRETTY_FUNCTION__)); | |||
25920 | ||||
25921 | MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec); | |||
25922 | ||||
25923 | Mask = ExtendToType(Mask, WideMaskVT, DAG, true); | |||
25924 | SDValue NewLoad = DAG.getMaskedLoad(WideDataVT, dl, N->getChain(), | |||
25925 | N->getBasePtr(), Mask, PassThru, | |||
25926 | N->getMemoryVT(), N->getMemOperand(), | |||
25927 | N->getExtensionType(), | |||
25928 | N->isExpandingLoad()); | |||
25929 | ||||
25930 | SDValue Exract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, | |||
25931 | NewLoad.getValue(0), | |||
25932 | DAG.getIntPtrConstant(0, dl)); | |||
25933 | SDValue RetOps[] = {Exract, NewLoad.getValue(1)}; | |||
25934 | return DAG.getMergeValues(RetOps, dl); | |||
25935 | } | |||
25936 | ||||
25937 | static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget, | |||
25938 | SelectionDAG &DAG) { | |||
25939 | MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode()); | |||
25940 | SDValue DataToStore = N->getValue(); | |||
25941 | MVT VT = DataToStore.getSimpleValueType(); | |||
25942 | MVT ScalarVT = VT.getScalarType(); | |||
25943 | SDValue Mask = N->getMask(); | |||
25944 | SDLoc dl(Op); | |||
25945 | ||||
25946 | assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&(((!N->isCompressingStore() || Subtarget.hasAVX512()) && "Expanding masked load is supported on AVX-512 target only!" ) ? static_cast<void> (0) : __assert_fail ("(!N->isCompressingStore() || Subtarget.hasAVX512()) && \"Expanding masked load is supported on AVX-512 target only!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25947, __PRETTY_FUNCTION__)) | |||
25947 | "Expanding masked load is supported on AVX-512 target only!")(((!N->isCompressingStore() || Subtarget.hasAVX512()) && "Expanding masked load is supported on AVX-512 target only!" ) ? static_cast<void> (0) : __assert_fail ("(!N->isCompressingStore() || Subtarget.hasAVX512()) && \"Expanding masked load is supported on AVX-512 target only!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25947, __PRETTY_FUNCTION__)); | |||
25948 | ||||
25949 | assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&(((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) && "Expanding masked load is supported for 32 and 64-bit types only!" ) ? static_cast<void> (0) : __assert_fail ("(!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) && \"Expanding masked load is supported for 32 and 64-bit types only!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25950, __PRETTY_FUNCTION__)) | |||
25950 | "Expanding masked load is supported for 32 and 64-bit types only!")(((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) && "Expanding masked load is supported for 32 and 64-bit types only!" ) ? static_cast<void> (0) : __assert_fail ("(!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) && \"Expanding masked load is supported for 32 and 64-bit types only!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25950, __PRETTY_FUNCTION__)); | |||
25951 | ||||
25952 | assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&((Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && "Cannot lower masked store op." ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && \"Cannot lower masked store op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25953, __PRETTY_FUNCTION__)) | |||
25953 | "Cannot lower masked store op.")((Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && "Cannot lower masked store op." ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && \"Cannot lower masked store op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25953, __PRETTY_FUNCTION__)); | |||
25954 | ||||
25955 | assert((ScalarVT.getSizeInBits() >= 32 ||(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked store op." ) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked store op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25958, __PRETTY_FUNCTION__)) | |||
25956 | (Subtarget.hasBWI() &&(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked store op." ) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked store op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25958, __PRETTY_FUNCTION__)) | |||
25957 | (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked store op." ) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked store op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25958, __PRETTY_FUNCTION__)) | |||
25958 | "Unsupported masked store op.")(((ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && "Unsupported masked store op." ) ? static_cast<void> (0) : __assert_fail ("(ScalarVT.getSizeInBits() >= 32 || (Subtarget.hasBWI() && (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) && \"Unsupported masked store op.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25958, __PRETTY_FUNCTION__)); | |||
25959 | ||||
25960 | // This operation is legal for targets with VLX, but without | |||
25961 | // VLX the vector should be widened to 512 bit | |||
25962 | unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits(); | |||
25963 | MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec); | |||
25964 | ||||
25965 | // Mask element has to be i1. | |||
25966 | assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&((Mask.getSimpleValueType().getScalarType() == MVT::i1 && "Unexpected mask type") ? static_cast<void> (0) : __assert_fail ("Mask.getSimpleValueType().getScalarType() == MVT::i1 && \"Unexpected mask type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25967, __PRETTY_FUNCTION__)) | |||
25967 | "Unexpected mask type")((Mask.getSimpleValueType().getScalarType() == MVT::i1 && "Unexpected mask type") ? static_cast<void> (0) : __assert_fail ("Mask.getSimpleValueType().getScalarType() == MVT::i1 && \"Unexpected mask type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25967, __PRETTY_FUNCTION__)); | |||
25968 | ||||
25969 | MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec); | |||
25970 | ||||
25971 | DataToStore = ExtendToType(DataToStore, WideDataVT, DAG); | |||
25972 | Mask = ExtendToType(Mask, WideMaskVT, DAG, true); | |||
25973 | return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(), | |||
25974 | Mask, N->getMemoryVT(), N->getMemOperand(), | |||
25975 | N->isTruncatingStore(), N->isCompressingStore()); | |||
25976 | } | |||
25977 | ||||
25978 | static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget, | |||
25979 | SelectionDAG &DAG) { | |||
25980 | assert(Subtarget.hasAVX2() &&((Subtarget.hasAVX2() && "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25981, __PRETTY_FUNCTION__)) | |||
25981 | "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only")((Subtarget.hasAVX2() && "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX2() && \"MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25981, __PRETTY_FUNCTION__)); | |||
25982 | ||||
25983 | MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode()); | |||
25984 | SDLoc dl(Op); | |||
25985 | MVT VT = Op.getSimpleValueType(); | |||
25986 | SDValue Index = N->getIndex(); | |||
25987 | SDValue Mask = N->getMask(); | |||
25988 | SDValue PassThru = N->getPassThru(); | |||
25989 | MVT IndexVT = Index.getSimpleValueType(); | |||
25990 | MVT MaskVT = Mask.getSimpleValueType(); | |||
25991 | ||||
25992 | assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op")((VT.getScalarSizeInBits() >= 32 && "Unsupported gather op" ) ? static_cast<void> (0) : __assert_fail ("VT.getScalarSizeInBits() >= 32 && \"Unsupported gather op\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 25992, __PRETTY_FUNCTION__)); | |||
25993 | ||||
25994 | // If the index is v2i32, we're being called by type legalization. | |||
25995 | if (IndexVT == MVT::v2i32) | |||
25996 | return SDValue(); | |||
25997 | ||||
25998 | // If we don't have VLX and neither the passthru or index is 512-bits, we | |||
25999 | // need to widen until one is. | |||
26000 | MVT OrigVT = VT; | |||
26001 | if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() && | |||
26002 | !IndexVT.is512BitVector()) { | |||
26003 | // Determine how much we need to widen by to get a 512-bit type. | |||
26004 | unsigned Factor = std::min(512/VT.getSizeInBits(), | |||
26005 | 512/IndexVT.getSizeInBits()); | |||
26006 | ||||
26007 | unsigned NumElts = VT.getVectorNumElements() * Factor; | |||
26008 | ||||
26009 | VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts); | |||
26010 | IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts); | |||
26011 | MaskVT = MVT::getVectorVT(MVT::i1, NumElts); | |||
26012 | ||||
26013 | PassThru = ExtendToType(PassThru, VT, DAG); | |||
26014 | Index = ExtendToType(Index, IndexVT, DAG); | |||
26015 | Mask = ExtendToType(Mask, MaskVT, DAG, true); | |||
26016 | } | |||
26017 | ||||
26018 | SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index, | |||
26019 | N->getScale() }; | |||
26020 | SDValue NewGather = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>( | |||
26021 | DAG.getVTList(VT, MaskVT, MVT::Other), Ops, dl, N->getMemoryVT(), | |||
26022 | N->getMemOperand()); | |||
26023 | SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT, | |||
26024 | NewGather, DAG.getIntPtrConstant(0, dl)); | |||
26025 | return DAG.getMergeValues({Extract, NewGather.getValue(2)}, dl); | |||
26026 | } | |||
26027 | ||||
26028 | SDValue X86TargetLowering::LowerGC_TRANSITION_START(SDValue Op, | |||
26029 | SelectionDAG &DAG) const { | |||
26030 | // TODO: Eventually, the lowering of these nodes should be informed by or | |||
26031 | // deferred to the GC strategy for the function in which they appear. For | |||
26032 | // now, however, they must be lowered to something. Since they are logically | |||
26033 | // no-ops in the case of a null GC strategy (or a GC strategy which does not | |||
26034 | // require special handling for these nodes), lower them as literal NOOPs for | |||
26035 | // the time being. | |||
26036 | SmallVector<SDValue, 2> Ops; | |||
26037 | ||||
26038 | Ops.push_back(Op.getOperand(0)); | |||
26039 | if (Op->getGluedNode()) | |||
26040 | Ops.push_back(Op->getOperand(Op->getNumOperands() - 1)); | |||
26041 | ||||
26042 | SDLoc OpDL(Op); | |||
26043 | SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); | |||
26044 | SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0); | |||
26045 | ||||
26046 | return NOOP; | |||
26047 | } | |||
26048 | ||||
26049 | SDValue X86TargetLowering::LowerGC_TRANSITION_END(SDValue Op, | |||
26050 | SelectionDAG &DAG) const { | |||
26051 | // TODO: Eventually, the lowering of these nodes should be informed by or | |||
26052 | // deferred to the GC strategy for the function in which they appear. For | |||
26053 | // now, however, they must be lowered to something. Since they are logically | |||
26054 | // no-ops in the case of a null GC strategy (or a GC strategy which does not | |||
26055 | // require special handling for these nodes), lower them as literal NOOPs for | |||
26056 | // the time being. | |||
26057 | SmallVector<SDValue, 2> Ops; | |||
26058 | ||||
26059 | Ops.push_back(Op.getOperand(0)); | |||
26060 | if (Op->getGluedNode()) | |||
26061 | Ops.push_back(Op->getOperand(Op->getNumOperands() - 1)); | |||
26062 | ||||
26063 | SDLoc OpDL(Op); | |||
26064 | SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue); | |||
26065 | SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0); | |||
26066 | ||||
26067 | return NOOP; | |||
26068 | } | |||
26069 | ||||
26070 | /// Provide custom lowering hooks for some operations. | |||
26071 | SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { | |||
26072 | switch (Op.getOpcode()) { | |||
26073 | default: llvm_unreachable("Should not custom lower this!")::llvm::llvm_unreachable_internal("Should not custom lower this!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26073); | |||
26074 | case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG); | |||
26075 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: | |||
26076 | return LowerCMP_SWAP(Op, Subtarget, DAG); | |||
26077 | case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG); | |||
26078 | case ISD::ATOMIC_LOAD_ADD: | |||
26079 | case ISD::ATOMIC_LOAD_SUB: | |||
26080 | case ISD::ATOMIC_LOAD_OR: | |||
26081 | case ISD::ATOMIC_LOAD_XOR: | |||
26082 | case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget); | |||
26083 | case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG); | |||
26084 | case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG); | |||
26085 | case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); | |||
26086 | case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG); | |||
26087 | case ISD::VECTOR_SHUFFLE: return lowerVectorShuffle(Op, Subtarget, DAG); | |||
26088 | case ISD::VSELECT: return LowerVSELECT(Op, DAG); | |||
26089 | case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); | |||
26090 | case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); | |||
26091 | case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG); | |||
26092 | case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG); | |||
26093 | case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG); | |||
26094 | case ISD::ConstantPool: return LowerConstantPool(Op, DAG); | |||
26095 | case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); | |||
26096 | case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); | |||
26097 | case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG); | |||
26098 | case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); | |||
26099 | case ISD::SHL_PARTS: | |||
26100 | case ISD::SRA_PARTS: | |||
26101 | case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); | |||
26102 | case ISD::FSHL: | |||
26103 | case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG); | |||
26104 | case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); | |||
26105 | case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); | |||
26106 | case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG); | |||
26107 | case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG); | |||
26108 | case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG); | |||
26109 | case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG); | |||
26110 | case ISD::ZERO_EXTEND_VECTOR_INREG: | |||
26111 | case ISD::SIGN_EXTEND_VECTOR_INREG: | |||
26112 | return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG); | |||
26113 | case ISD::FP_TO_SINT: | |||
26114 | case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG); | |||
26115 | case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG); | |||
26116 | case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG); | |||
26117 | case ISD::STORE: return LowerStore(Op, Subtarget, DAG); | |||
26118 | case ISD::FABS: | |||
26119 | case ISD::FNEG: return LowerFABSorFNEG(Op, DAG); | |||
26120 | case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); | |||
26121 | case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG); | |||
26122 | case ISD::SETCC: return LowerSETCC(Op, DAG); | |||
26123 | case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG); | |||
26124 | case ISD::SELECT: return LowerSELECT(Op, DAG); | |||
26125 | case ISD::BRCOND: return LowerBRCOND(Op, DAG); | |||
26126 | case ISD::JumpTable: return LowerJumpTable(Op, DAG); | |||
26127 | case ISD::VASTART: return LowerVASTART(Op, DAG); | |||
26128 | case ISD::VAARG: return LowerVAARG(Op, DAG); | |||
26129 | case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG); | |||
26130 | case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); | |||
26131 | case ISD::INTRINSIC_VOID: | |||
26132 | case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG); | |||
26133 | case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); | |||
26134 | case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG); | |||
26135 | case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); | |||
26136 | case ISD::FRAME_TO_ARGS_OFFSET: | |||
26137 | return LowerFRAME_TO_ARGS_OFFSET(Op, DAG); | |||
26138 | case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); | |||
26139 | case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG); | |||
26140 | case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG); | |||
26141 | case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG); | |||
26142 | case ISD::EH_SJLJ_SETUP_DISPATCH: | |||
26143 | return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG); | |||
26144 | case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG); | |||
26145 | case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG); | |||
26146 | case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG); | |||
26147 | case ISD::CTLZ: | |||
26148 | case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG); | |||
26149 | case ISD::CTTZ: | |||
26150 | case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG); | |||
26151 | case ISD::MUL: return LowerMUL(Op, Subtarget, DAG); | |||
26152 | case ISD::MULHS: | |||
26153 | case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG); | |||
26154 | case ISD::ROTL: | |||
26155 | case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG); | |||
26156 | case ISD::SRA: | |||
26157 | case ISD::SRL: | |||
26158 | case ISD::SHL: return LowerShift(Op, Subtarget, DAG); | |||
26159 | case ISD::SADDO: | |||
26160 | case ISD::UADDO: | |||
26161 | case ISD::SSUBO: | |||
26162 | case ISD::USUBO: | |||
26163 | case ISD::SMULO: | |||
26164 | case ISD::UMULO: return LowerXALUO(Op, DAG); | |||
26165 | case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG); | |||
26166 | case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG); | |||
26167 | case ISD::ADDCARRY: | |||
26168 | case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG); | |||
26169 | case ISD::ADD: | |||
26170 | case ISD::SUB: return LowerADD_SUB(Op, DAG); | |||
26171 | case ISD::UADDSAT: | |||
26172 | case ISD::SADDSAT: | |||
26173 | case ISD::USUBSAT: | |||
26174 | case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG); | |||
26175 | case ISD::SMAX: | |||
26176 | case ISD::SMIN: | |||
26177 | case ISD::UMAX: | |||
26178 | case ISD::UMIN: return LowerMINMAX(Op, DAG); | |||
26179 | case ISD::ABS: return LowerABS(Op, DAG); | |||
26180 | case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG); | |||
26181 | case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG); | |||
26182 | case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG); | |||
26183 | case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG); | |||
26184 | case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG); | |||
26185 | case ISD::GC_TRANSITION_START: | |||
26186 | return LowerGC_TRANSITION_START(Op, DAG); | |||
26187 | case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION_END(Op, DAG); | |||
26188 | } | |||
26189 | } | |||
26190 | ||||
26191 | /// Places new result values for the node in Results (their number | |||
26192 | /// and types must exactly match those of the original return values of | |||
26193 | /// the node), or leaves Results empty, which indicates that the node is not | |||
26194 | /// to be custom lowered after all. | |||
26195 | void X86TargetLowering::LowerOperationWrapper(SDNode *N, | |||
26196 | SmallVectorImpl<SDValue> &Results, | |||
26197 | SelectionDAG &DAG) const { | |||
26198 | SDValue Res = LowerOperation(SDValue(N, 0), DAG); | |||
26199 | ||||
26200 | if (!Res.getNode()) | |||
26201 | return; | |||
26202 | ||||
26203 | assert((N->getNumValues() <= Res->getNumValues()) &&(((N->getNumValues() <= Res->getNumValues()) && "Lowering returned the wrong number of results!") ? static_cast <void> (0) : __assert_fail ("(N->getNumValues() <= Res->getNumValues()) && \"Lowering returned the wrong number of results!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26204, __PRETTY_FUNCTION__)) | |||
26204 | "Lowering returned the wrong number of results!")(((N->getNumValues() <= Res->getNumValues()) && "Lowering returned the wrong number of results!") ? static_cast <void> (0) : __assert_fail ("(N->getNumValues() <= Res->getNumValues()) && \"Lowering returned the wrong number of results!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26204, __PRETTY_FUNCTION__)); | |||
26205 | ||||
26206 | // Places new result values base on N result number. | |||
26207 | // In some cases (LowerSINT_TO_FP for example) Res has more result values | |||
26208 | // than original node, chain should be dropped(last value). | |||
26209 | for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) | |||
26210 | Results.push_back(Res.getValue(I)); | |||
26211 | } | |||
26212 | ||||
26213 | /// Replace a node with an illegal result type with a new node built out of | |||
26214 | /// custom code. | |||
26215 | void X86TargetLowering::ReplaceNodeResults(SDNode *N, | |||
26216 | SmallVectorImpl<SDValue>&Results, | |||
26217 | SelectionDAG &DAG) const { | |||
26218 | SDLoc dl(N); | |||
26219 | switch (N->getOpcode()) { | |||
26220 | default: | |||
26221 | llvm_unreachable("Do not know how to custom type legalize this operation!")::llvm::llvm_unreachable_internal("Do not know how to custom type legalize this operation!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26221); | |||
26222 | case ISD::MUL: { | |||
26223 | EVT VT = N->getValueType(0); | |||
26224 | assert(VT.isVector() && "Unexpected VT")((VT.isVector() && "Unexpected VT") ? static_cast< void> (0) : __assert_fail ("VT.isVector() && \"Unexpected VT\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26224, __PRETTY_FUNCTION__)); | |||
26225 | if (getTypeAction(*DAG.getContext(), VT) == TypePromoteInteger && | |||
26226 | VT.getVectorNumElements() == 2) { | |||
26227 | // Promote to a pattern that will be turned into PMULUDQ. | |||
26228 | SDValue N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64, | |||
26229 | N->getOperand(0)); | |||
26230 | N0 = DAG.getNode(ISD::AND, dl, MVT::v2i64, N0, | |||
26231 | DAG.getConstant(0xffffffff, dl, MVT::v2i64)); | |||
26232 | SDValue N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v2i64, | |||
26233 | N->getOperand(1)); | |||
26234 | N1 = DAG.getNode(ISD::AND, dl, MVT::v2i64, N1, | |||
26235 | DAG.getConstant(0xffffffff, dl, MVT::v2i64)); | |||
26236 | SDValue Mul = DAG.getNode(ISD::MUL, dl, MVT::v2i64, N0, N1); | |||
26237 | Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, VT, Mul)); | |||
26238 | } else if (getTypeAction(*DAG.getContext(), VT) == TypeWidenVector && | |||
26239 | VT.getVectorElementType() == MVT::i8) { | |||
26240 | // Pre-promote these to vXi16 to avoid op legalization thinking all 16 | |||
26241 | // elements are needed. | |||
26242 | MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements()); | |||
26243 | SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0)); | |||
26244 | SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1)); | |||
26245 | SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1); | |||
26246 | Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res); | |||
26247 | unsigned NumConcats = 16 / VT.getVectorNumElements(); | |||
26248 | SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT)); | |||
26249 | ConcatOps[0] = Res; | |||
26250 | Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps); | |||
26251 | Results.push_back(Res); | |||
26252 | } | |||
26253 | return; | |||
26254 | } | |||
26255 | case ISD::UADDSAT: | |||
26256 | case ISD::SADDSAT: | |||
26257 | case ISD::USUBSAT: | |||
26258 | case ISD::SSUBSAT: | |||
26259 | case X86ISD::VPMADDWD: | |||
26260 | case X86ISD::AVG: { | |||
26261 | // Legalize types for ISD::UADDSAT/SADDSAT/USUBSAT/SSUBSAT and | |||
26262 | // X86ISD::AVG/VPMADDWD by widening. | |||
26263 | assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26263, __PRETTY_FUNCTION__)); | |||
26264 | ||||
26265 | EVT VT = N->getValueType(0); | |||
26266 | EVT InVT = N->getOperand(0).getValueType(); | |||
26267 | assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&((VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits () == 0 && "Expected a VT that divides into 128 bits." ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 && \"Expected a VT that divides into 128 bits.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26268, __PRETTY_FUNCTION__)) | |||
26268 | "Expected a VT that divides into 128 bits.")((VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits () == 0 && "Expected a VT that divides into 128 bits." ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 && \"Expected a VT that divides into 128 bits.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26268, __PRETTY_FUNCTION__)); | |||
26269 | unsigned NumConcat = 128 / InVT.getSizeInBits(); | |||
26270 | ||||
26271 | EVT InWideVT = EVT::getVectorVT(*DAG.getContext(), | |||
26272 | InVT.getVectorElementType(), | |||
26273 | NumConcat * InVT.getVectorNumElements()); | |||
26274 | EVT WideVT = EVT::getVectorVT(*DAG.getContext(), | |||
26275 | VT.getVectorElementType(), | |||
26276 | NumConcat * VT.getVectorNumElements()); | |||
26277 | ||||
26278 | SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT)); | |||
26279 | Ops[0] = N->getOperand(0); | |||
26280 | SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops); | |||
26281 | Ops[0] = N->getOperand(1); | |||
26282 | SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops); | |||
26283 | ||||
26284 | SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1); | |||
26285 | if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) | |||
26286 | Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res, | |||
26287 | DAG.getIntPtrConstant(0, dl)); | |||
26288 | Results.push_back(Res); | |||
26289 | return; | |||
26290 | } | |||
26291 | case ISD::SETCC: { | |||
26292 | // Widen v2i32 (setcc v2f32). This is really needed for AVX512VL when | |||
26293 | // setCC result type is v2i1 because type legalzation will end up with | |||
26294 | // a v4i1 setcc plus an extend. | |||
26295 | assert(N->getValueType(0) == MVT::v2i32 && "Unexpected type")((N->getValueType(0) == MVT::v2i32 && "Unexpected type" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v2i32 && \"Unexpected type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26295, __PRETTY_FUNCTION__)); | |||
26296 | if (N->getOperand(0).getValueType() != MVT::v2f32 || | |||
26297 | getTypeAction(*DAG.getContext(), MVT::v2i32) == TypeWidenVector) | |||
26298 | return; | |||
26299 | SDValue UNDEF = DAG.getUNDEF(MVT::v2f32); | |||
26300 | SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, | |||
26301 | N->getOperand(0), UNDEF); | |||
26302 | SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, | |||
26303 | N->getOperand(1), UNDEF); | |||
26304 | SDValue Res = DAG.getNode(ISD::SETCC, dl, MVT::v4i32, LHS, RHS, | |||
26305 | N->getOperand(2)); | |||
26306 | Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, | |||
26307 | DAG.getIntPtrConstant(0, dl)); | |||
26308 | Results.push_back(Res); | |||
26309 | return; | |||
26310 | } | |||
26311 | // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32. | |||
26312 | case X86ISD::FMINC: | |||
26313 | case X86ISD::FMIN: | |||
26314 | case X86ISD::FMAXC: | |||
26315 | case X86ISD::FMAX: { | |||
26316 | EVT VT = N->getValueType(0); | |||
26317 | assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.")((VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX." ) ? static_cast<void> (0) : __assert_fail ("VT == MVT::v2f32 && \"Unexpected type (!= v2f32) on FMIN/FMAX.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26317, __PRETTY_FUNCTION__)); | |||
26318 | SDValue UNDEF = DAG.getUNDEF(VT); | |||
26319 | SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, | |||
26320 | N->getOperand(0), UNDEF); | |||
26321 | SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, | |||
26322 | N->getOperand(1), UNDEF); | |||
26323 | Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS)); | |||
26324 | return; | |||
26325 | } | |||
26326 | case ISD::SDIV: | |||
26327 | case ISD::UDIV: | |||
26328 | case ISD::SREM: | |||
26329 | case ISD::UREM: { | |||
26330 | EVT VT = N->getValueType(0); | |||
26331 | if (getTypeAction(*DAG.getContext(), VT) == TypeWidenVector) { | |||
26332 | // If this RHS is a constant splat vector we can widen this and let | |||
26333 | // division/remainder by constant optimize it. | |||
26334 | // TODO: Can we do something for non-splat? | |||
26335 | APInt SplatVal; | |||
26336 | if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) { | |||
26337 | unsigned NumConcats = 128 / VT.getSizeInBits(); | |||
26338 | SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT)); | |||
26339 | Ops0[0] = N->getOperand(0); | |||
26340 | EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT); | |||
26341 | SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0); | |||
26342 | SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT); | |||
26343 | SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1); | |||
26344 | Results.push_back(Res); | |||
26345 | } | |||
26346 | return; | |||
26347 | } | |||
26348 | ||||
26349 | if (VT == MVT::v2i32) { | |||
26350 | // Legalize v2i32 div/rem by unrolling. Otherwise we promote to the | |||
26351 | // v2i64 and unroll later. But then we create i64 scalar ops which | |||
26352 | // might be slow in 64-bit mode or require a libcall in 32-bit mode. | |||
26353 | Results.push_back(DAG.UnrollVectorOp(N)); | |||
26354 | return; | |||
26355 | } | |||
26356 | ||||
26357 | if (VT.isVector()) | |||
26358 | return; | |||
26359 | ||||
26360 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
26361 | } | |||
26362 | case ISD::SDIVREM: | |||
26363 | case ISD::UDIVREM: { | |||
26364 | SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG); | |||
26365 | Results.push_back(V); | |||
26366 | return; | |||
26367 | } | |||
26368 | case ISD::TRUNCATE: { | |||
26369 | MVT VT = N->getSimpleValueType(0); | |||
26370 | if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) | |||
26371 | return; | |||
26372 | ||||
26373 | // The generic legalizer will try to widen the input type to the same | |||
26374 | // number of elements as the widened result type. But this isn't always | |||
26375 | // the best thing so do some custom legalization to avoid some cases. | |||
26376 | MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT(); | |||
26377 | SDValue In = N->getOperand(0); | |||
26378 | EVT InVT = In.getValueType(); | |||
26379 | ||||
26380 | unsigned InBits = InVT.getSizeInBits(); | |||
26381 | if (128 % InBits == 0) { | |||
26382 | // 128 bit and smaller inputs should avoid truncate all together and | |||
26383 | // just use a build_vector that will become a shuffle. | |||
26384 | // TODO: Widen and use a shuffle directly? | |||
26385 | MVT InEltVT = InVT.getSimpleVT().getVectorElementType(); | |||
26386 | EVT EltVT = VT.getVectorElementType(); | |||
26387 | unsigned WidenNumElts = WidenVT.getVectorNumElements(); | |||
26388 | SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT)); | |||
26389 | // Use the original element count so we don't do more scalar opts than | |||
26390 | // necessary. | |||
26391 | unsigned MinElts = VT.getVectorNumElements(); | |||
26392 | for (unsigned i=0; i < MinElts; ++i) { | |||
26393 | SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In, | |||
26394 | DAG.getIntPtrConstant(i, dl)); | |||
26395 | Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val); | |||
26396 | } | |||
26397 | Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops)); | |||
26398 | return; | |||
26399 | } | |||
26400 | // With AVX512 there are some cases that can use a target specific | |||
26401 | // truncate node to go from 256/512 to less than 128 with zeros in the | |||
26402 | // upper elements of the 128 bit result. | |||
26403 | if (Subtarget.hasAVX512() && isTypeLegal(InVT)) { | |||
26404 | // We can use VTRUNC directly if for 256 bits with VLX or for any 512. | |||
26405 | if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) { | |||
26406 | Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In)); | |||
26407 | return; | |||
26408 | } | |||
26409 | // There's one case we can widen to 512 bits and use VTRUNC. | |||
26410 | if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) { | |||
26411 | In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In, | |||
26412 | DAG.getUNDEF(MVT::v4i64)); | |||
26413 | Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In)); | |||
26414 | return; | |||
26415 | } | |||
26416 | } | |||
26417 | return; | |||
26418 | } | |||
26419 | case ISD::SIGN_EXTEND: | |||
26420 | case ISD::ZERO_EXTEND: { | |||
26421 | if (!ExperimentalVectorWideningLegalization) | |||
26422 | return; | |||
26423 | ||||
26424 | EVT VT = N->getValueType(0); | |||
26425 | SDValue In = N->getOperand(0); | |||
26426 | EVT InVT = In.getValueType(); | |||
26427 | if (!Subtarget.hasSSE41() && VT == MVT::v4i64 && | |||
26428 | (InVT == MVT::v4i16 || InVT == MVT::v4i8)) { | |||
26429 | // Custom split this so we can extend i8/i16->i32 invec. This is better | |||
26430 | // since sign_extend_inreg i8/i16->i64 requires two sra operations. So | |||
26431 | // this allows the first to be shared. | |||
26432 | In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In); | |||
26433 | ||||
26434 | // Fill a vector with sign bits for each element. | |||
26435 | SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32); | |||
26436 | SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT); | |||
26437 | ||||
26438 | // Create an unpackl and unpackh to interleave the sign bits then bitcast | |||
26439 | // to v2i64. | |||
26440 | SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits, | |||
26441 | {0, 4, 1, 5}); | |||
26442 | Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo); | |||
26443 | SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits, | |||
26444 | {2, 6, 3, 7}); | |||
26445 | Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi); | |||
26446 | ||||
26447 | SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi); | |||
26448 | Results.push_back(Res); | |||
26449 | return; | |||
26450 | } | |||
26451 | ||||
26452 | if ((VT == MVT::v16i32 || VT == MVT::v8i64) && InVT.is128BitVector()) { | |||
26453 | // Perform custom splitting instead of the two stage extend we would get | |||
26454 | // by default. | |||
26455 | EVT LoVT, HiVT; | |||
26456 | std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); | |||
26457 | assert(isTypeLegal(LoVT) && "Split VT not legal?")((isTypeLegal(LoVT) && "Split VT not legal?") ? static_cast <void> (0) : __assert_fail ("isTypeLegal(LoVT) && \"Split VT not legal?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26457, __PRETTY_FUNCTION__)); | |||
26458 | ||||
26459 | bool IsSigned = N->getOpcode() == ISD::SIGN_EXTEND; | |||
26460 | ||||
26461 | SDValue Lo = getExtendInVec(IsSigned, dl, LoVT, In, DAG); | |||
26462 | ||||
26463 | // We need to shift the input over by half the number of elements. | |||
26464 | unsigned NumElts = InVT.getVectorNumElements(); | |||
26465 | unsigned HalfNumElts = NumElts / 2; | |||
26466 | SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef); | |||
26467 | for (unsigned i = 0; i != HalfNumElts; ++i) | |||
26468 | ShufMask[i] = i + HalfNumElts; | |||
26469 | ||||
26470 | SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask); | |||
26471 | Hi = getExtendInVec(IsSigned, dl, HiVT, Hi, DAG); | |||
26472 | ||||
26473 | SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi); | |||
26474 | Results.push_back(Res); | |||
26475 | } | |||
26476 | return; | |||
26477 | } | |||
26478 | case ISD::FP_TO_SINT: | |||
26479 | case ISD::FP_TO_UINT: { | |||
26480 | bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; | |||
26481 | EVT VT = N->getValueType(0); | |||
26482 | SDValue Src = N->getOperand(0); | |||
26483 | EVT SrcVT = Src.getValueType(); | |||
26484 | ||||
26485 | // Promote these manually to avoid over promotion to v2i64. Type | |||
26486 | // legalization will revisit the v2i32 operation for more cleanup. | |||
26487 | if ((VT == MVT::v2i8 || VT == MVT::v2i16) && | |||
26488 | getTypeAction(*DAG.getContext(), VT) == TypePromoteInteger) { | |||
26489 | // AVX512DQ provides instructions that produce a v2i64 result. | |||
26490 | if (Subtarget.hasDQI()) | |||
26491 | return; | |||
26492 | ||||
26493 | SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v2i32, Src); | |||
26494 | Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext | |||
26495 | : ISD::AssertSext, | |||
26496 | dl, MVT::v2i32, Res, | |||
26497 | DAG.getValueType(VT.getVectorElementType())); | |||
26498 | Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res); | |||
26499 | Results.push_back(Res); | |||
26500 | return; | |||
26501 | } | |||
26502 | ||||
26503 | if (VT.isVector() && VT.getScalarSizeInBits() < 32) { | |||
26504 | if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) | |||
26505 | return; | |||
26506 | ||||
26507 | // Try to create a 128 bit vector, but don't exceed a 32 bit element. | |||
26508 | unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U); | |||
26509 | MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth), | |||
26510 | VT.getVectorNumElements()); | |||
26511 | SDValue Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src); | |||
26512 | ||||
26513 | // Preserve what we know about the size of the original result. Except | |||
26514 | // when the result is v2i32 since we can't widen the assert. | |||
26515 | if (PromoteVT != MVT::v2i32) | |||
26516 | Res = DAG.getNode(N->getOpcode() == ISD::FP_TO_UINT ? ISD::AssertZext | |||
26517 | : ISD::AssertSext, | |||
26518 | dl, PromoteVT, Res, | |||
26519 | DAG.getValueType(VT.getVectorElementType())); | |||
26520 | ||||
26521 | // Truncate back to the original width. | |||
26522 | Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res); | |||
26523 | ||||
26524 | // Now widen to 128 bits. | |||
26525 | unsigned NumConcats = 128 / VT.getSizeInBits(); | |||
26526 | MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(), | |||
26527 | VT.getVectorNumElements() * NumConcats); | |||
26528 | SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT)); | |||
26529 | ConcatOps[0] = Res; | |||
26530 | Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps); | |||
26531 | Results.push_back(Res); | |||
26532 | return; | |||
26533 | } | |||
26534 | ||||
26535 | ||||
26536 | if (VT == MVT::v2i32) { | |||
26537 | assert((IsSigned || Subtarget.hasAVX512()) &&(((IsSigned || Subtarget.hasAVX512()) && "Can only handle signed conversion without AVX512" ) ? static_cast<void> (0) : __assert_fail ("(IsSigned || Subtarget.hasAVX512()) && \"Can only handle signed conversion without AVX512\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26538, __PRETTY_FUNCTION__)) | |||
26538 | "Can only handle signed conversion without AVX512")(((IsSigned || Subtarget.hasAVX512()) && "Can only handle signed conversion without AVX512" ) ? static_cast<void> (0) : __assert_fail ("(IsSigned || Subtarget.hasAVX512()) && \"Can only handle signed conversion without AVX512\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26538, __PRETTY_FUNCTION__)); | |||
26539 | assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26539, __PRETTY_FUNCTION__)); | |||
26540 | bool Widenv2i32 = | |||
26541 | getTypeAction(*DAG.getContext(), MVT::v2i32) == TypeWidenVector; | |||
26542 | if (Src.getValueType() == MVT::v2f64) { | |||
26543 | unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI; | |||
26544 | if (!IsSigned && !Subtarget.hasVLX()) { | |||
26545 | // If v2i32 is widened, we can defer to the generic legalizer. | |||
26546 | if (Widenv2i32) | |||
26547 | return; | |||
26548 | // Custom widen by doubling to a legal vector with. Isel will | |||
26549 | // further widen to v8f64. | |||
26550 | Opc = ISD::FP_TO_UINT; | |||
26551 | Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, | |||
26552 | Src, DAG.getUNDEF(MVT::v2f64)); | |||
26553 | } | |||
26554 | SDValue Res = DAG.getNode(Opc, dl, MVT::v4i32, Src); | |||
26555 | if (!Widenv2i32) | |||
26556 | Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, | |||
26557 | DAG.getIntPtrConstant(0, dl)); | |||
26558 | Results.push_back(Res); | |||
26559 | return; | |||
26560 | } | |||
26561 | if (SrcVT == MVT::v2f32 && | |||
26562 | getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) { | |||
26563 | SDValue Idx = DAG.getIntPtrConstant(0, dl); | |||
26564 | SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, | |||
26565 | DAG.getUNDEF(MVT::v2f32)); | |||
26566 | Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT | |||
26567 | : ISD::FP_TO_UINT, dl, MVT::v4i32, Res); | |||
26568 | Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, Idx); | |||
26569 | Results.push_back(Res); | |||
26570 | return; | |||
26571 | } | |||
26572 | ||||
26573 | // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs, | |||
26574 | // so early out here. | |||
26575 | return; | |||
26576 | } | |||
26577 | ||||
26578 | if (Subtarget.hasDQI() && VT == MVT::i64 && | |||
26579 | (SrcVT == MVT::f32 || SrcVT == MVT::f64)) { | |||
26580 | assert(!Subtarget.is64Bit() && "i64 should be legal")((!Subtarget.is64Bit() && "i64 should be legal") ? static_cast <void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"i64 should be legal\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26580, __PRETTY_FUNCTION__)); | |||
26581 | unsigned NumElts = Subtarget.hasVLX() ? 4 : 8; | |||
26582 | // Using a 256-bit input here to guarantee 128-bit input for f32 case. | |||
26583 | // TODO: Use 128-bit vectors for f64 case? | |||
26584 | // TODO: Use 128-bit vectors for f32 by using CVTTP2SI/CVTTP2UI. | |||
26585 | MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts); | |||
26586 | MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), NumElts); | |||
26587 | ||||
26588 | SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl); | |||
26589 | SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT, | |||
26590 | DAG.getConstantFP(0.0, dl, VecInVT), Src, | |||
26591 | ZeroIdx); | |||
26592 | Res = DAG.getNode(N->getOpcode(), SDLoc(N), VecVT, Res); | |||
26593 | Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx); | |||
26594 | Results.push_back(Res); | |||
26595 | return; | |||
26596 | } | |||
26597 | ||||
26598 | std::pair<SDValue,SDValue> Vals = | |||
26599 | FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true); | |||
26600 | SDValue FIST = Vals.first, StackSlot = Vals.second; | |||
26601 | if (FIST.getNode()) { | |||
26602 | // Return a load from the stack slot. | |||
26603 | if (StackSlot.getNode()) | |||
26604 | Results.push_back( | |||
26605 | DAG.getLoad(VT, dl, FIST, StackSlot, MachinePointerInfo())); | |||
26606 | else | |||
26607 | Results.push_back(FIST); | |||
26608 | } | |||
26609 | return; | |||
26610 | } | |||
26611 | case ISD::SINT_TO_FP: { | |||
26612 | assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!")((Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasDQI() && Subtarget.hasVLX() && \"Requires AVX512DQVL!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26612, __PRETTY_FUNCTION__)); | |||
26613 | SDValue Src = N->getOperand(0); | |||
26614 | if (N->getValueType(0) != MVT::v2f32 || Src.getValueType() != MVT::v2i64) | |||
26615 | return; | |||
26616 | Results.push_back(DAG.getNode(X86ISD::CVTSI2P, dl, MVT::v4f32, Src)); | |||
26617 | return; | |||
26618 | } | |||
26619 | case ISD::UINT_TO_FP: { | |||
26620 | assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26620, __PRETTY_FUNCTION__)); | |||
26621 | EVT VT = N->getValueType(0); | |||
26622 | if (VT != MVT::v2f32) | |||
26623 | return; | |||
26624 | SDValue Src = N->getOperand(0); | |||
26625 | EVT SrcVT = Src.getValueType(); | |||
26626 | if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) { | |||
26627 | Results.push_back(DAG.getNode(X86ISD::CVTUI2P, dl, MVT::v4f32, Src)); | |||
26628 | return; | |||
26629 | } | |||
26630 | if (SrcVT != MVT::v2i32) | |||
26631 | return; | |||
26632 | SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src); | |||
26633 | SDValue VBias = | |||
26634 | DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64); | |||
26635 | SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn, | |||
26636 | DAG.getBitcast(MVT::v2i64, VBias)); | |||
26637 | Or = DAG.getBitcast(MVT::v2f64, Or); | |||
26638 | // TODO: Are there any fast-math-flags to propagate here? | |||
26639 | SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias); | |||
26640 | Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub)); | |||
26641 | return; | |||
26642 | } | |||
26643 | case ISD::FP_ROUND: { | |||
26644 | if (!isTypeLegal(N->getOperand(0).getValueType())) | |||
26645 | return; | |||
26646 | SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0)); | |||
26647 | Results.push_back(V); | |||
26648 | return; | |||
26649 | } | |||
26650 | case ISD::FP_EXTEND: { | |||
26651 | // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND. | |||
26652 | // No other ValueType for FP_EXTEND should reach this point. | |||
26653 | assert(N->getValueType(0) == MVT::v2f32 &&((N->getValueType(0) == MVT::v2f32 && "Do not know how to legalize this Node" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v2f32 && \"Do not know how to legalize this Node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26654, __PRETTY_FUNCTION__)) | |||
26654 | "Do not know how to legalize this Node")((N->getValueType(0) == MVT::v2f32 && "Do not know how to legalize this Node" ) ? static_cast<void> (0) : __assert_fail ("N->getValueType(0) == MVT::v2f32 && \"Do not know how to legalize this Node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26654, __PRETTY_FUNCTION__)); | |||
26655 | return; | |||
26656 | } | |||
26657 | case ISD::INTRINSIC_W_CHAIN: { | |||
26658 | unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); | |||
26659 | switch (IntNo) { | |||
26660 | default : llvm_unreachable("Do not know how to custom type "::llvm::llvm_unreachable_internal("Do not know how to custom type " "legalize this intrinsic operation!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26661) | |||
26661 | "legalize this intrinsic operation!")::llvm::llvm_unreachable_internal("Do not know how to custom type " "legalize this intrinsic operation!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26661); | |||
26662 | case Intrinsic::x86_rdtsc: | |||
26663 | return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget, | |||
26664 | Results); | |||
26665 | case Intrinsic::x86_rdtscp: | |||
26666 | return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget, | |||
26667 | Results); | |||
26668 | case Intrinsic::x86_rdpmc: | |||
26669 | return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results); | |||
26670 | ||||
26671 | case Intrinsic::x86_xgetbv: | |||
26672 | return getExtendedControlRegister(N, dl, DAG, Subtarget, Results); | |||
26673 | } | |||
26674 | } | |||
26675 | case ISD::INTRINSIC_WO_CHAIN: { | |||
26676 | if (SDValue V = LowerINTRINSIC_WO_CHAIN(SDValue(N, 0), DAG)) | |||
26677 | Results.push_back(V); | |||
26678 | return; | |||
26679 | } | |||
26680 | case ISD::READCYCLECOUNTER: { | |||
26681 | return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget, | |||
26682 | Results); | |||
26683 | } | |||
26684 | case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: { | |||
26685 | EVT T = N->getValueType(0); | |||
26686 | assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair")(((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair" ) ? static_cast<void> (0) : __assert_fail ("(T == MVT::i64 || T == MVT::i128) && \"can only expand cmpxchg pair\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26686, __PRETTY_FUNCTION__)); | |||
26687 | bool Regs64bit = T == MVT::i128; | |||
26688 | MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32; | |||
26689 | SDValue cpInL, cpInH; | |||
26690 | cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), | |||
26691 | DAG.getConstant(0, dl, HalfT)); | |||
26692 | cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2), | |||
26693 | DAG.getConstant(1, dl, HalfT)); | |||
26694 | cpInL = DAG.getCopyToReg(N->getOperand(0), dl, | |||
26695 | Regs64bit ? X86::RAX : X86::EAX, | |||
26696 | cpInL, SDValue()); | |||
26697 | cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl, | |||
26698 | Regs64bit ? X86::RDX : X86::EDX, | |||
26699 | cpInH, cpInL.getValue(1)); | |||
26700 | SDValue swapInL, swapInH; | |||
26701 | swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), | |||
26702 | DAG.getConstant(0, dl, HalfT)); | |||
26703 | swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3), | |||
26704 | DAG.getConstant(1, dl, HalfT)); | |||
26705 | swapInH = | |||
26706 | DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX, | |||
26707 | swapInH, cpInH.getValue(1)); | |||
26708 | // If the current function needs the base pointer, RBX, | |||
26709 | // we shouldn't use cmpxchg directly. | |||
26710 | // Indeed the lowering of that instruction will clobber | |||
26711 | // that register and since RBX will be a reserved register | |||
26712 | // the register allocator will not make sure its value will | |||
26713 | // be properly saved and restored around this live-range. | |||
26714 | const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
26715 | SDValue Result; | |||
26716 | SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue); | |||
26717 | unsigned BasePtr = TRI->getBaseRegister(); | |||
26718 | MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand(); | |||
26719 | if (TRI->hasBasePointer(DAG.getMachineFunction()) && | |||
26720 | (BasePtr == X86::RBX || BasePtr == X86::EBX)) { | |||
26721 | // ISel prefers the LCMPXCHG64 variant. | |||
26722 | // If that assert breaks, that means it is not the case anymore, | |||
26723 | // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX, | |||
26724 | // not just EBX. This is a matter of accepting i64 input for that | |||
26725 | // pseudo, and restoring into the register of the right wide | |||
26726 | // in expand pseudo. Everything else should just work. | |||
26727 | assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&((((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX ) && "Saving only half of the RBX") ? static_cast< void> (0) : __assert_fail ("((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) && \"Saving only half of the RBX\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26728, __PRETTY_FUNCTION__)) | |||
26728 | "Saving only half of the RBX")((((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX ) && "Saving only half of the RBX") ? static_cast< void> (0) : __assert_fail ("((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) && \"Saving only half of the RBX\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26728, __PRETTY_FUNCTION__)); | |||
26729 | unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG | |||
26730 | : X86ISD::LCMPXCHG8_SAVE_EBX_DAG; | |||
26731 | SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl, | |||
26732 | Regs64bit ? X86::RBX : X86::EBX, | |||
26733 | HalfT, swapInH.getValue(1)); | |||
26734 | SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL, | |||
26735 | RBXSave, | |||
26736 | /*Glue*/ RBXSave.getValue(2)}; | |||
26737 | Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO); | |||
26738 | } else { | |||
26739 | unsigned Opcode = | |||
26740 | Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG; | |||
26741 | swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl, | |||
26742 | Regs64bit ? X86::RBX : X86::EBX, swapInL, | |||
26743 | swapInH.getValue(1)); | |||
26744 | SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1), | |||
26745 | swapInL.getValue(1)}; | |||
26746 | Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO); | |||
26747 | } | |||
26748 | SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl, | |||
26749 | Regs64bit ? X86::RAX : X86::EAX, | |||
26750 | HalfT, Result.getValue(1)); | |||
26751 | SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl, | |||
26752 | Regs64bit ? X86::RDX : X86::EDX, | |||
26753 | HalfT, cpOutL.getValue(2)); | |||
26754 | SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)}; | |||
26755 | ||||
26756 | SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS, | |||
26757 | MVT::i32, cpOutH.getValue(2)); | |||
26758 | SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG); | |||
26759 | Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1)); | |||
26760 | ||||
26761 | Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF)); | |||
26762 | Results.push_back(Success); | |||
26763 | Results.push_back(EFLAGS.getValue(1)); | |||
26764 | return; | |||
26765 | } | |||
26766 | case ISD::ATOMIC_SWAP: | |||
26767 | case ISD::ATOMIC_LOAD_ADD: | |||
26768 | case ISD::ATOMIC_LOAD_SUB: | |||
26769 | case ISD::ATOMIC_LOAD_AND: | |||
26770 | case ISD::ATOMIC_LOAD_OR: | |||
26771 | case ISD::ATOMIC_LOAD_XOR: | |||
26772 | case ISD::ATOMIC_LOAD_NAND: | |||
26773 | case ISD::ATOMIC_LOAD_MIN: | |||
26774 | case ISD::ATOMIC_LOAD_MAX: | |||
26775 | case ISD::ATOMIC_LOAD_UMIN: | |||
26776 | case ISD::ATOMIC_LOAD_UMAX: | |||
26777 | case ISD::ATOMIC_LOAD: { | |||
26778 | // Delegate to generic TypeLegalization. Situations we can really handle | |||
26779 | // should have already been dealt with by AtomicExpandPass.cpp. | |||
26780 | break; | |||
26781 | } | |||
26782 | case ISD::BITCAST: { | |||
26783 | assert(Subtarget.hasSSE2() && "Requires at least SSE2!")((Subtarget.hasSSE2() && "Requires at least SSE2!") ? static_cast<void> (0) : __assert_fail ("Subtarget.hasSSE2() && \"Requires at least SSE2!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26783, __PRETTY_FUNCTION__)); | |||
26784 | EVT DstVT = N->getValueType(0); | |||
26785 | EVT SrcVT = N->getOperand(0).getValueType(); | |||
26786 | ||||
26787 | // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target | |||
26788 | // we can split using the k-register rather than memory. | |||
26789 | if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) { | |||
26790 | assert(!Subtarget.is64Bit() && "Expected 32-bit mode")((!Subtarget.is64Bit() && "Expected 32-bit mode") ? static_cast <void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Expected 32-bit mode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26790, __PRETTY_FUNCTION__)); | |||
26791 | SDValue Lo, Hi; | |||
26792 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); | |||
26793 | Lo = DAG.getBitcast(MVT::i32, Lo); | |||
26794 | Hi = DAG.getBitcast(MVT::i32, Hi); | |||
26795 | SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi); | |||
26796 | Results.push_back(Res); | |||
26797 | return; | |||
26798 | } | |||
26799 | ||||
26800 | // Custom splitting for BWI types when AVX512F is available but BWI isn't. | |||
26801 | if ((DstVT == MVT::v32i16 || DstVT == MVT::v64i8) && | |||
26802 | SrcVT.isVector() && isTypeLegal(SrcVT)) { | |||
26803 | SDValue Lo, Hi; | |||
26804 | std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); | |||
26805 | MVT CastVT = (DstVT == MVT::v32i16) ? MVT::v16i16 : MVT::v32i8; | |||
26806 | Lo = DAG.getBitcast(CastVT, Lo); | |||
26807 | Hi = DAG.getBitcast(CastVT, Hi); | |||
26808 | SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, DstVT, Lo, Hi); | |||
26809 | Results.push_back(Res); | |||
26810 | return; | |||
26811 | } | |||
26812 | ||||
26813 | if (SrcVT != MVT::f64 || | |||
26814 | (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8) || | |||
26815 | getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector) | |||
26816 | return; | |||
26817 | ||||
26818 | unsigned NumElts = DstVT.getVectorNumElements(); | |||
26819 | EVT SVT = DstVT.getVectorElementType(); | |||
26820 | EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2); | |||
26821 | SDValue Res; | |||
26822 | Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, N->getOperand(0)); | |||
26823 | Res = DAG.getBitcast(WiderVT, Res); | |||
26824 | Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, Res, | |||
26825 | DAG.getIntPtrConstant(0, dl)); | |||
26826 | Results.push_back(Res); | |||
26827 | return; | |||
26828 | } | |||
26829 | case ISD::MGATHER: { | |||
26830 | EVT VT = N->getValueType(0); | |||
26831 | if (VT == MVT::v2f32 && (Subtarget.hasVLX() || !Subtarget.hasAVX512())) { | |||
26832 | auto *Gather = cast<MaskedGatherSDNode>(N); | |||
26833 | SDValue Index = Gather->getIndex(); | |||
26834 | if (Index.getValueType() != MVT::v2i64) | |||
26835 | return; | |||
26836 | SDValue Mask = Gather->getMask(); | |||
26837 | assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type")((Mask.getValueType() == MVT::v2i1 && "Unexpected mask type" ) ? static_cast<void> (0) : __assert_fail ("Mask.getValueType() == MVT::v2i1 && \"Unexpected mask type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26837, __PRETTY_FUNCTION__)); | |||
26838 | SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, | |||
26839 | Gather->getPassThru(), | |||
26840 | DAG.getUNDEF(MVT::v2f32)); | |||
26841 | if (!Subtarget.hasVLX()) { | |||
26842 | // We need to widen the mask, but the instruction will only use 2 | |||
26843 | // of its elements. So we can use undef. | |||
26844 | Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask, | |||
26845 | DAG.getUNDEF(MVT::v2i1)); | |||
26846 | Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask); | |||
26847 | } | |||
26848 | SDValue Ops[] = { Gather->getChain(), PassThru, Mask, | |||
26849 | Gather->getBasePtr(), Index, Gather->getScale() }; | |||
26850 | SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>( | |||
26851 | DAG.getVTList(MVT::v4f32, Mask.getValueType(), MVT::Other), Ops, dl, | |||
26852 | Gather->getMemoryVT(), Gather->getMemOperand()); | |||
26853 | Results.push_back(Res); | |||
26854 | Results.push_back(Res.getValue(2)); | |||
26855 | return; | |||
26856 | } | |||
26857 | if (VT == MVT::v2i32) { | |||
26858 | auto *Gather = cast<MaskedGatherSDNode>(N); | |||
26859 | SDValue Index = Gather->getIndex(); | |||
26860 | SDValue Mask = Gather->getMask(); | |||
26861 | assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type")((Mask.getValueType() == MVT::v2i1 && "Unexpected mask type" ) ? static_cast<void> (0) : __assert_fail ("Mask.getValueType() == MVT::v2i1 && \"Unexpected mask type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26861, __PRETTY_FUNCTION__)); | |||
26862 | SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, | |||
26863 | Gather->getPassThru(), | |||
26864 | DAG.getUNDEF(MVT::v2i32)); | |||
26865 | // If the index is v2i64 we can use it directly. | |||
26866 | if (Index.getValueType() == MVT::v2i64 && | |||
26867 | (Subtarget.hasVLX() || !Subtarget.hasAVX512())) { | |||
26868 | if (!Subtarget.hasVLX()) { | |||
26869 | // We need to widen the mask, but the instruction will only use 2 | |||
26870 | // of its elements. So we can use undef. | |||
26871 | Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask, | |||
26872 | DAG.getUNDEF(MVT::v2i1)); | |||
26873 | Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask); | |||
26874 | } | |||
26875 | SDValue Ops[] = { Gather->getChain(), PassThru, Mask, | |||
26876 | Gather->getBasePtr(), Index, Gather->getScale() }; | |||
26877 | SDValue Res = DAG.getTargetMemSDNode<X86MaskedGatherSDNode>( | |||
26878 | DAG.getVTList(MVT::v4i32, Mask.getValueType(), MVT::Other), Ops, dl, | |||
26879 | Gather->getMemoryVT(), Gather->getMemOperand()); | |||
26880 | SDValue Chain = Res.getValue(2); | |||
26881 | if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) | |||
26882 | Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, | |||
26883 | DAG.getIntPtrConstant(0, dl)); | |||
26884 | Results.push_back(Res); | |||
26885 | Results.push_back(Chain); | |||
26886 | return; | |||
26887 | } | |||
26888 | if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) { | |||
26889 | EVT IndexVT = Index.getValueType(); | |||
26890 | EVT NewIndexVT = EVT::getVectorVT(*DAG.getContext(), | |||
26891 | IndexVT.getScalarType(), 4); | |||
26892 | // Otherwise we need to custom widen everything to avoid promotion. | |||
26893 | Index = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewIndexVT, Index, | |||
26894 | DAG.getUNDEF(IndexVT)); | |||
26895 | Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask, | |||
26896 | DAG.getConstant(0, dl, MVT::v2i1)); | |||
26897 | SDValue Ops[] = { Gather->getChain(), PassThru, Mask, | |||
26898 | Gather->getBasePtr(), Index, Gather->getScale() }; | |||
26899 | SDValue Res = DAG.getMaskedGather(DAG.getVTList(MVT::v4i32, MVT::Other), | |||
26900 | Gather->getMemoryVT(), dl, Ops, | |||
26901 | Gather->getMemOperand()); | |||
26902 | SDValue Chain = Res.getValue(1); | |||
26903 | if (getTypeAction(*DAG.getContext(), MVT::v2i32) != TypeWidenVector) | |||
26904 | Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res, | |||
26905 | DAG.getIntPtrConstant(0, dl)); | |||
26906 | Results.push_back(Res); | |||
26907 | Results.push_back(Chain); | |||
26908 | return; | |||
26909 | } | |||
26910 | } | |||
26911 | return; | |||
26912 | } | |||
26913 | case ISD::LOAD: { | |||
26914 | // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This | |||
26915 | // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp | |||
26916 | // cast since type legalization will try to use an i64 load. | |||
26917 | MVT VT = N->getSimpleValueType(0); | |||
26918 | assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT")((VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT") ? static_cast<void> (0) : __assert_fail ("VT.isVector() && VT.getSizeInBits() == 64 && \"Unexpected VT\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 26918, __PRETTY_FUNCTION__)); | |||
26919 | if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector) | |||
26920 | return; | |||
26921 | if (!ISD::isNON_EXTLoad(N)) | |||
26922 | return; | |||
26923 | auto *Ld = cast<LoadSDNode>(N); | |||
26924 | MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64; | |||
26925 | SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(), | |||
26926 | Ld->getPointerInfo(), | |||
26927 | Ld->getAlignment(), | |||
26928 | Ld->getMemOperand()->getFlags()); | |||
26929 | SDValue Chain = Res.getValue(1); | |||
26930 | MVT WideVT = MVT::getVectorVT(LdVT, 2); | |||
26931 | Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, WideVT, Res); | |||
26932 | MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), | |||
26933 | VT.getVectorNumElements() * 2); | |||
26934 | Res = DAG.getBitcast(CastVT, Res); | |||
26935 | Results.push_back(Res); | |||
26936 | Results.push_back(Chain); | |||
26937 | return; | |||
26938 | } | |||
26939 | } | |||
26940 | } | |||
26941 | ||||
26942 | const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { | |||
26943 | switch ((X86ISD::NodeType)Opcode) { | |||
26944 | case X86ISD::FIRST_NUMBER: break; | |||
26945 | case X86ISD::BSF: return "X86ISD::BSF"; | |||
26946 | case X86ISD::BSR: return "X86ISD::BSR"; | |||
26947 | case X86ISD::SHLD: return "X86ISD::SHLD"; | |||
26948 | case X86ISD::SHRD: return "X86ISD::SHRD"; | |||
26949 | case X86ISD::FAND: return "X86ISD::FAND"; | |||
26950 | case X86ISD::FANDN: return "X86ISD::FANDN"; | |||
26951 | case X86ISD::FOR: return "X86ISD::FOR"; | |||
26952 | case X86ISD::FXOR: return "X86ISD::FXOR"; | |||
26953 | case X86ISD::FILD: return "X86ISD::FILD"; | |||
26954 | case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG"; | |||
26955 | case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM"; | |||
26956 | case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM"; | |||
26957 | case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM"; | |||
26958 | case X86ISD::FLD: return "X86ISD::FLD"; | |||
26959 | case X86ISD::FST: return "X86ISD::FST"; | |||
26960 | case X86ISD::CALL: return "X86ISD::CALL"; | |||
26961 | case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG"; | |||
26962 | case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG"; | |||
26963 | case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG"; | |||
26964 | case X86ISD::BT: return "X86ISD::BT"; | |||
26965 | case X86ISD::CMP: return "X86ISD::CMP"; | |||
26966 | case X86ISD::COMI: return "X86ISD::COMI"; | |||
26967 | case X86ISD::UCOMI: return "X86ISD::UCOMI"; | |||
26968 | case X86ISD::CMPM: return "X86ISD::CMPM"; | |||
26969 | case X86ISD::CMPM_RND: return "X86ISD::CMPM_RND"; | |||
26970 | case X86ISD::SETCC: return "X86ISD::SETCC"; | |||
26971 | case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY"; | |||
26972 | case X86ISD::FSETCC: return "X86ISD::FSETCC"; | |||
26973 | case X86ISD::FSETCCM: return "X86ISD::FSETCCM"; | |||
26974 | case X86ISD::FSETCCM_RND: return "X86ISD::FSETCCM_RND"; | |||
26975 | case X86ISD::CMOV: return "X86ISD::CMOV"; | |||
26976 | case X86ISD::BRCOND: return "X86ISD::BRCOND"; | |||
26977 | case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG"; | |||
26978 | case X86ISD::IRET: return "X86ISD::IRET"; | |||
26979 | case X86ISD::REP_STOS: return "X86ISD::REP_STOS"; | |||
26980 | case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; | |||
26981 | case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; | |||
26982 | case X86ISD::Wrapper: return "X86ISD::Wrapper"; | |||
26983 | case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP"; | |||
26984 | case X86ISD::MOVDQ2Q: return "X86ISD::MOVDQ2Q"; | |||
26985 | case X86ISD::MMX_MOVD2W: return "X86ISD::MMX_MOVD2W"; | |||
26986 | case X86ISD::MMX_MOVW2D: return "X86ISD::MMX_MOVW2D"; | |||
26987 | case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; | |||
26988 | case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; | |||
26989 | case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; | |||
26990 | case X86ISD::PINSRB: return "X86ISD::PINSRB"; | |||
26991 | case X86ISD::PINSRW: return "X86ISD::PINSRW"; | |||
26992 | case X86ISD::PSHUFB: return "X86ISD::PSHUFB"; | |||
26993 | case X86ISD::ANDNP: return "X86ISD::ANDNP"; | |||
26994 | case X86ISD::BLENDI: return "X86ISD::BLENDI"; | |||
26995 | case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND"; | |||
26996 | case X86ISD::HADD: return "X86ISD::HADD"; | |||
26997 | case X86ISD::HSUB: return "X86ISD::HSUB"; | |||
26998 | case X86ISD::FHADD: return "X86ISD::FHADD"; | |||
26999 | case X86ISD::FHSUB: return "X86ISD::FHSUB"; | |||
27000 | case X86ISD::CONFLICT: return "X86ISD::CONFLICT"; | |||
27001 | case X86ISD::FMAX: return "X86ISD::FMAX"; | |||
27002 | case X86ISD::FMAXS: return "X86ISD::FMAXS"; | |||
27003 | case X86ISD::FMAX_RND: return "X86ISD::FMAX_RND"; | |||
27004 | case X86ISD::FMAXS_RND: return "X86ISD::FMAX_RND"; | |||
27005 | case X86ISD::FMIN: return "X86ISD::FMIN"; | |||
27006 | case X86ISD::FMINS: return "X86ISD::FMINS"; | |||
27007 | case X86ISD::FMIN_RND: return "X86ISD::FMIN_RND"; | |||
27008 | case X86ISD::FMINS_RND: return "X86ISD::FMINS_RND"; | |||
27009 | case X86ISD::FMAXC: return "X86ISD::FMAXC"; | |||
27010 | case X86ISD::FMINC: return "X86ISD::FMINC"; | |||
27011 | case X86ISD::FRSQRT: return "X86ISD::FRSQRT"; | |||
27012 | case X86ISD::FRCP: return "X86ISD::FRCP"; | |||
27013 | case X86ISD::EXTRQI: return "X86ISD::EXTRQI"; | |||
27014 | case X86ISD::INSERTQI: return "X86ISD::INSERTQI"; | |||
27015 | case X86ISD::TLSADDR: return "X86ISD::TLSADDR"; | |||
27016 | case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR"; | |||
27017 | case X86ISD::TLSCALL: return "X86ISD::TLSCALL"; | |||
27018 | case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP"; | |||
27019 | case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP"; | |||
27020 | case X86ISD::EH_SJLJ_SETUP_DISPATCH: | |||
27021 | return "X86ISD::EH_SJLJ_SETUP_DISPATCH"; | |||
27022 | case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN"; | |||
27023 | case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN"; | |||
27024 | case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m"; | |||
27025 | case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r"; | |||
27026 | case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG"; | |||
27027 | case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG"; | |||
27028 | case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG"; | |||
27029 | case X86ISD::LCMPXCHG8_SAVE_EBX_DAG: | |||
27030 | return "X86ISD::LCMPXCHG8_SAVE_EBX_DAG"; | |||
27031 | case X86ISD::LCMPXCHG16_SAVE_RBX_DAG: | |||
27032 | return "X86ISD::LCMPXCHG16_SAVE_RBX_DAG"; | |||
27033 | case X86ISD::LADD: return "X86ISD::LADD"; | |||
27034 | case X86ISD::LSUB: return "X86ISD::LSUB"; | |||
27035 | case X86ISD::LOR: return "X86ISD::LOR"; | |||
27036 | case X86ISD::LXOR: return "X86ISD::LXOR"; | |||
27037 | case X86ISD::LAND: return "X86ISD::LAND"; | |||
27038 | case X86ISD::LINC: return "X86ISD::LINC"; | |||
27039 | case X86ISD::LDEC: return "X86ISD::LDEC"; | |||
27040 | case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL"; | |||
27041 | case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD"; | |||
27042 | case X86ISD::VTRUNC: return "X86ISD::VTRUNC"; | |||
27043 | case X86ISD::VTRUNCS: return "X86ISD::VTRUNCS"; | |||
27044 | case X86ISD::VTRUNCUS: return "X86ISD::VTRUNCUS"; | |||
27045 | case X86ISD::VTRUNCSTORES: return "X86ISD::VTRUNCSTORES"; | |||
27046 | case X86ISD::VTRUNCSTOREUS: return "X86ISD::VTRUNCSTOREUS"; | |||
27047 | case X86ISD::VMTRUNCSTORES: return "X86ISD::VMTRUNCSTORES"; | |||
27048 | case X86ISD::VMTRUNCSTOREUS: return "X86ISD::VMTRUNCSTOREUS"; | |||
27049 | case X86ISD::VFPEXT: return "X86ISD::VFPEXT"; | |||
27050 | case X86ISD::VFPEXT_RND: return "X86ISD::VFPEXT_RND"; | |||
27051 | case X86ISD::VFPEXTS_RND: return "X86ISD::VFPEXTS_RND"; | |||
27052 | case X86ISD::VFPROUND: return "X86ISD::VFPROUND"; | |||
27053 | case X86ISD::VFPROUND_RND: return "X86ISD::VFPROUND_RND"; | |||
27054 | case X86ISD::VFPROUNDS_RND: return "X86ISD::VFPROUNDS_RND"; | |||
27055 | case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ"; | |||
27056 | case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ"; | |||
27057 | case X86ISD::VSHL: return "X86ISD::VSHL"; | |||
27058 | case X86ISD::VSRL: return "X86ISD::VSRL"; | |||
27059 | case X86ISD::VSRA: return "X86ISD::VSRA"; | |||
27060 | case X86ISD::VSHLI: return "X86ISD::VSHLI"; | |||
27061 | case X86ISD::VSRLI: return "X86ISD::VSRLI"; | |||
27062 | case X86ISD::VSRAI: return "X86ISD::VSRAI"; | |||
27063 | case X86ISD::VSRAV: return "X86ISD::VSRAV"; | |||
27064 | case X86ISD::VROTLI: return "X86ISD::VROTLI"; | |||
27065 | case X86ISD::VROTRI: return "X86ISD::VROTRI"; | |||
27066 | case X86ISD::VPPERM: return "X86ISD::VPPERM"; | |||
27067 | case X86ISD::CMPP: return "X86ISD::CMPP"; | |||
27068 | case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ"; | |||
27069 | case X86ISD::PCMPGT: return "X86ISD::PCMPGT"; | |||
27070 | case X86ISD::PHMINPOS: return "X86ISD::PHMINPOS"; | |||
27071 | case X86ISD::ADD: return "X86ISD::ADD"; | |||
27072 | case X86ISD::SUB: return "X86ISD::SUB"; | |||
27073 | case X86ISD::ADC: return "X86ISD::ADC"; | |||
27074 | case X86ISD::SBB: return "X86ISD::SBB"; | |||
27075 | case X86ISD::SMUL: return "X86ISD::SMUL"; | |||
27076 | case X86ISD::UMUL: return "X86ISD::UMUL"; | |||
27077 | case X86ISD::SMUL8: return "X86ISD::SMUL8"; | |||
27078 | case X86ISD::UMUL8: return "X86ISD::UMUL8"; | |||
27079 | case X86ISD::INC: return "X86ISD::INC"; | |||
27080 | case X86ISD::DEC: return "X86ISD::DEC"; | |||
27081 | case X86ISD::OR: return "X86ISD::OR"; | |||
27082 | case X86ISD::XOR: return "X86ISD::XOR"; | |||
27083 | case X86ISD::AND: return "X86ISD::AND"; | |||
27084 | case X86ISD::BEXTR: return "X86ISD::BEXTR"; | |||
27085 | case X86ISD::BZHI: return "X86ISD::BZHI"; | |||
27086 | case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; | |||
27087 | case X86ISD::MOVMSK: return "X86ISD::MOVMSK"; | |||
27088 | case X86ISD::PTEST: return "X86ISD::PTEST"; | |||
27089 | case X86ISD::TESTP: return "X86ISD::TESTP"; | |||
27090 | case X86ISD::KORTEST: return "X86ISD::KORTEST"; | |||
27091 | case X86ISD::KTEST: return "X86ISD::KTEST"; | |||
27092 | case X86ISD::KADD: return "X86ISD::KADD"; | |||
27093 | case X86ISD::KSHIFTL: return "X86ISD::KSHIFTL"; | |||
27094 | case X86ISD::KSHIFTR: return "X86ISD::KSHIFTR"; | |||
27095 | case X86ISD::PACKSS: return "X86ISD::PACKSS"; | |||
27096 | case X86ISD::PACKUS: return "X86ISD::PACKUS"; | |||
27097 | case X86ISD::PALIGNR: return "X86ISD::PALIGNR"; | |||
27098 | case X86ISD::VALIGN: return "X86ISD::VALIGN"; | |||
27099 | case X86ISD::VSHLD: return "X86ISD::VSHLD"; | |||
27100 | case X86ISD::VSHRD: return "X86ISD::VSHRD"; | |||
27101 | case X86ISD::VSHLDV: return "X86ISD::VSHLDV"; | |||
27102 | case X86ISD::VSHRDV: return "X86ISD::VSHRDV"; | |||
27103 | case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; | |||
27104 | case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; | |||
27105 | case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; | |||
27106 | case X86ISD::SHUFP: return "X86ISD::SHUFP"; | |||
27107 | case X86ISD::SHUF128: return "X86ISD::SHUF128"; | |||
27108 | case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; | |||
27109 | case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; | |||
27110 | case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; | |||
27111 | case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; | |||
27112 | case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; | |||
27113 | case X86ISD::MOVSD: return "X86ISD::MOVSD"; | |||
27114 | case X86ISD::MOVSS: return "X86ISD::MOVSS"; | |||
27115 | case X86ISD::UNPCKL: return "X86ISD::UNPCKL"; | |||
27116 | case X86ISD::UNPCKH: return "X86ISD::UNPCKH"; | |||
27117 | case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST"; | |||
27118 | case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM"; | |||
27119 | case X86ISD::SUBV_BROADCAST: return "X86ISD::SUBV_BROADCAST"; | |||
27120 | case X86ISD::VPERMILPV: return "X86ISD::VPERMILPV"; | |||
27121 | case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI"; | |||
27122 | case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128"; | |||
27123 | case X86ISD::VPERMV: return "X86ISD::VPERMV"; | |||
27124 | case X86ISD::VPERMV3: return "X86ISD::VPERMV3"; | |||
27125 | case X86ISD::VPERMI: return "X86ISD::VPERMI"; | |||
27126 | case X86ISD::VPTERNLOG: return "X86ISD::VPTERNLOG"; | |||
27127 | case X86ISD::VFIXUPIMM: return "X86ISD::VFIXUPIMM"; | |||
27128 | case X86ISD::VFIXUPIMMS: return "X86ISD::VFIXUPIMMS"; | |||
27129 | case X86ISD::VRANGE: return "X86ISD::VRANGE"; | |||
27130 | case X86ISD::VRANGE_RND: return "X86ISD::VRANGE_RND"; | |||
27131 | case X86ISD::VRANGES: return "X86ISD::VRANGES"; | |||
27132 | case X86ISD::VRANGES_RND: return "X86ISD::VRANGES_RND"; | |||
27133 | case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ"; | |||
27134 | case X86ISD::PMULDQ: return "X86ISD::PMULDQ"; | |||
27135 | case X86ISD::PSADBW: return "X86ISD::PSADBW"; | |||
27136 | case X86ISD::DBPSADBW: return "X86ISD::DBPSADBW"; | |||
27137 | case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; | |||
27138 | case X86ISD::VAARG_64: return "X86ISD::VAARG_64"; | |||
27139 | case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA"; | |||
27140 | case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER"; | |||
27141 | case X86ISD::MFENCE: return "X86ISD::MFENCE"; | |||
27142 | case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA"; | |||
27143 | case X86ISD::SAHF: return "X86ISD::SAHF"; | |||
27144 | case X86ISD::RDRAND: return "X86ISD::RDRAND"; | |||
27145 | case X86ISD::RDSEED: return "X86ISD::RDSEED"; | |||
27146 | case X86ISD::VPMADDUBSW: return "X86ISD::VPMADDUBSW"; | |||
27147 | case X86ISD::VPMADDWD: return "X86ISD::VPMADDWD"; | |||
27148 | case X86ISD::VPSHA: return "X86ISD::VPSHA"; | |||
27149 | case X86ISD::VPSHL: return "X86ISD::VPSHL"; | |||
27150 | case X86ISD::VPCOM: return "X86ISD::VPCOM"; | |||
27151 | case X86ISD::VPCOMU: return "X86ISD::VPCOMU"; | |||
27152 | case X86ISD::VPERMIL2: return "X86ISD::VPERMIL2"; | |||
27153 | case X86ISD::FMSUB: return "X86ISD::FMSUB"; | |||
27154 | case X86ISD::FNMADD: return "X86ISD::FNMADD"; | |||
27155 | case X86ISD::FNMSUB: return "X86ISD::FNMSUB"; | |||
27156 | case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB"; | |||
27157 | case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD"; | |||
27158 | case X86ISD::FMADD_RND: return "X86ISD::FMADD_RND"; | |||
27159 | case X86ISD::FNMADD_RND: return "X86ISD::FNMADD_RND"; | |||
27160 | case X86ISD::FMSUB_RND: return "X86ISD::FMSUB_RND"; | |||
27161 | case X86ISD::FNMSUB_RND: return "X86ISD::FNMSUB_RND"; | |||
27162 | case X86ISD::FMADDSUB_RND: return "X86ISD::FMADDSUB_RND"; | |||
27163 | case X86ISD::FMSUBADD_RND: return "X86ISD::FMSUBADD_RND"; | |||
27164 | case X86ISD::VPMADD52H: return "X86ISD::VPMADD52H"; | |||
27165 | case X86ISD::VPMADD52L: return "X86ISD::VPMADD52L"; | |||
27166 | case X86ISD::VRNDSCALE: return "X86ISD::VRNDSCALE"; | |||
27167 | case X86ISD::VRNDSCALE_RND: return "X86ISD::VRNDSCALE_RND"; | |||
27168 | case X86ISD::VRNDSCALES: return "X86ISD::VRNDSCALES"; | |||
27169 | case X86ISD::VRNDSCALES_RND: return "X86ISD::VRNDSCALES_RND"; | |||
27170 | case X86ISD::VREDUCE: return "X86ISD::VREDUCE"; | |||
27171 | case X86ISD::VREDUCE_RND: return "X86ISD::VREDUCE_RND"; | |||
27172 | case X86ISD::VREDUCES: return "X86ISD::VREDUCES"; | |||
27173 | case X86ISD::VREDUCES_RND: return "X86ISD::VREDUCES_RND"; | |||
27174 | case X86ISD::VGETMANT: return "X86ISD::VGETMANT"; | |||
27175 | case X86ISD::VGETMANT_RND: return "X86ISD::VGETMANT_RND"; | |||
27176 | case X86ISD::VGETMANTS: return "X86ISD::VGETMANTS"; | |||
27177 | case X86ISD::VGETMANTS_RND: return "X86ISD::VGETMANTS_RND"; | |||
27178 | case X86ISD::PCMPESTR: return "X86ISD::PCMPESTR"; | |||
27179 | case X86ISD::PCMPISTR: return "X86ISD::PCMPISTR"; | |||
27180 | case X86ISD::XTEST: return "X86ISD::XTEST"; | |||
27181 | case X86ISD::COMPRESS: return "X86ISD::COMPRESS"; | |||
27182 | case X86ISD::EXPAND: return "X86ISD::EXPAND"; | |||
27183 | case X86ISD::SELECT: return "X86ISD::SELECT"; | |||
27184 | case X86ISD::SELECTS: return "X86ISD::SELECTS"; | |||
27185 | case X86ISD::ADDSUB: return "X86ISD::ADDSUB"; | |||
27186 | case X86ISD::RCP14: return "X86ISD::RCP14"; | |||
27187 | case X86ISD::RCP14S: return "X86ISD::RCP14S"; | |||
27188 | case X86ISD::RCP28: return "X86ISD::RCP28"; | |||
27189 | case X86ISD::RCP28S: return "X86ISD::RCP28S"; | |||
27190 | case X86ISD::EXP2: return "X86ISD::EXP2"; | |||
27191 | case X86ISD::RSQRT14: return "X86ISD::RSQRT14"; | |||
27192 | case X86ISD::RSQRT14S: return "X86ISD::RSQRT14S"; | |||
27193 | case X86ISD::RSQRT28: return "X86ISD::RSQRT28"; | |||
27194 | case X86ISD::RSQRT28S: return "X86ISD::RSQRT28S"; | |||
27195 | case X86ISD::FADD_RND: return "X86ISD::FADD_RND"; | |||
27196 | case X86ISD::FADDS_RND: return "X86ISD::FADDS_RND"; | |||
27197 | case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND"; | |||
27198 | case X86ISD::FSUBS_RND: return "X86ISD::FSUBS_RND"; | |||
27199 | case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND"; | |||
27200 | case X86ISD::FMULS_RND: return "X86ISD::FMULS_RND"; | |||
27201 | case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND"; | |||
27202 | case X86ISD::FDIVS_RND: return "X86ISD::FDIVS_RND"; | |||
27203 | case X86ISD::FSQRT_RND: return "X86ISD::FSQRT_RND"; | |||
27204 | case X86ISD::FSQRTS_RND: return "X86ISD::FSQRTS_RND"; | |||
27205 | case X86ISD::FGETEXP_RND: return "X86ISD::FGETEXP_RND"; | |||
27206 | case X86ISD::FGETEXPS_RND: return "X86ISD::FGETEXPS_RND"; | |||
27207 | case X86ISD::SCALEF: return "X86ISD::SCALEF"; | |||
27208 | case X86ISD::SCALEFS: return "X86ISD::SCALEFS"; | |||
27209 | case X86ISD::AVG: return "X86ISD::AVG"; | |||
27210 | case X86ISD::MULHRS: return "X86ISD::MULHRS"; | |||
27211 | case X86ISD::SINT_TO_FP_RND: return "X86ISD::SINT_TO_FP_RND"; | |||
27212 | case X86ISD::UINT_TO_FP_RND: return "X86ISD::UINT_TO_FP_RND"; | |||
27213 | case X86ISD::CVTTP2SI: return "X86ISD::CVTTP2SI"; | |||
27214 | case X86ISD::CVTTP2UI: return "X86ISD::CVTTP2UI"; | |||
27215 | case X86ISD::CVTTP2SI_RND: return "X86ISD::CVTTP2SI_RND"; | |||
27216 | case X86ISD::CVTTP2UI_RND: return "X86ISD::CVTTP2UI_RND"; | |||
27217 | case X86ISD::CVTTS2SI: return "X86ISD::CVTTS2SI"; | |||
27218 | case X86ISD::CVTTS2UI: return "X86ISD::CVTTS2UI"; | |||
27219 | case X86ISD::CVTTS2SI_RND: return "X86ISD::CVTTS2SI_RND"; | |||
27220 | case X86ISD::CVTTS2UI_RND: return "X86ISD::CVTTS2UI_RND"; | |||
27221 | case X86ISD::CVTSI2P: return "X86ISD::CVTSI2P"; | |||
27222 | case X86ISD::CVTUI2P: return "X86ISD::CVTUI2P"; | |||
27223 | case X86ISD::VFPCLASS: return "X86ISD::VFPCLASS"; | |||
27224 | case X86ISD::VFPCLASSS: return "X86ISD::VFPCLASSS"; | |||
27225 | case X86ISD::MULTISHIFT: return "X86ISD::MULTISHIFT"; | |||
27226 | case X86ISD::SCALAR_SINT_TO_FP_RND: return "X86ISD::SCALAR_SINT_TO_FP_RND"; | |||
27227 | case X86ISD::SCALAR_UINT_TO_FP_RND: return "X86ISD::SCALAR_UINT_TO_FP_RND"; | |||
27228 | case X86ISD::CVTPS2PH: return "X86ISD::CVTPS2PH"; | |||
27229 | case X86ISD::CVTPH2PS: return "X86ISD::CVTPH2PS"; | |||
27230 | case X86ISD::CVTPH2PS_RND: return "X86ISD::CVTPH2PS_RND"; | |||
27231 | case X86ISD::CVTP2SI: return "X86ISD::CVTP2SI"; | |||
27232 | case X86ISD::CVTP2UI: return "X86ISD::CVTP2UI"; | |||
27233 | case X86ISD::CVTP2SI_RND: return "X86ISD::CVTP2SI_RND"; | |||
27234 | case X86ISD::CVTP2UI_RND: return "X86ISD::CVTP2UI_RND"; | |||
27235 | case X86ISD::CVTS2SI: return "X86ISD::CVTS2SI"; | |||
27236 | case X86ISD::CVTS2UI: return "X86ISD::CVTS2UI"; | |||
27237 | case X86ISD::CVTS2SI_RND: return "X86ISD::CVTS2SI_RND"; | |||
27238 | case X86ISD::CVTS2UI_RND: return "X86ISD::CVTS2UI_RND"; | |||
27239 | case X86ISD::LWPINS: return "X86ISD::LWPINS"; | |||
27240 | case X86ISD::MGATHER: return "X86ISD::MGATHER"; | |||
27241 | case X86ISD::MSCATTER: return "X86ISD::MSCATTER"; | |||
27242 | case X86ISD::VPDPBUSD: return "X86ISD::VPDPBUSD"; | |||
27243 | case X86ISD::VPDPBUSDS: return "X86ISD::VPDPBUSDS"; | |||
27244 | case X86ISD::VPDPWSSD: return "X86ISD::VPDPWSSD"; | |||
27245 | case X86ISD::VPDPWSSDS: return "X86ISD::VPDPWSSDS"; | |||
27246 | case X86ISD::VPSHUFBITQMB: return "X86ISD::VPSHUFBITQMB"; | |||
27247 | case X86ISD::GF2P8MULB: return "X86ISD::GF2P8MULB"; | |||
27248 | case X86ISD::GF2P8AFFINEQB: return "X86ISD::GF2P8AFFINEQB"; | |||
27249 | case X86ISD::GF2P8AFFINEINVQB: return "X86ISD::GF2P8AFFINEINVQB"; | |||
27250 | case X86ISD::NT_CALL: return "X86ISD::NT_CALL"; | |||
27251 | case X86ISD::NT_BRIND: return "X86ISD::NT_BRIND"; | |||
27252 | case X86ISD::UMWAIT: return "X86ISD::UMWAIT"; | |||
27253 | case X86ISD::TPAUSE: return "X86ISD::TPAUSE"; | |||
27254 | } | |||
27255 | return nullptr; | |||
27256 | } | |||
27257 | ||||
27258 | /// Return true if the addressing mode represented by AM is legal for this | |||
27259 | /// target, for a load/store of the specified type. | |||
27260 | bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL, | |||
27261 | const AddrMode &AM, Type *Ty, | |||
27262 | unsigned AS, | |||
27263 | Instruction *I) const { | |||
27264 | // X86 supports extremely general addressing modes. | |||
27265 | CodeModel::Model M = getTargetMachine().getCodeModel(); | |||
27266 | ||||
27267 | // X86 allows a sign-extended 32-bit immediate field as a displacement. | |||
27268 | if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr)) | |||
27269 | return false; | |||
27270 | ||||
27271 | if (AM.BaseGV) { | |||
27272 | unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV); | |||
27273 | ||||
27274 | // If a reference to this global requires an extra load, we can't fold it. | |||
27275 | if (isGlobalStubReference(GVFlags)) | |||
27276 | return false; | |||
27277 | ||||
27278 | // If BaseGV requires a register for the PIC base, we cannot also have a | |||
27279 | // BaseReg specified. | |||
27280 | if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags)) | |||
27281 | return false; | |||
27282 | ||||
27283 | // If lower 4G is not available, then we must use rip-relative addressing. | |||
27284 | if ((M != CodeModel::Small || isPositionIndependent()) && | |||
27285 | Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1)) | |||
27286 | return false; | |||
27287 | } | |||
27288 | ||||
27289 | switch (AM.Scale) { | |||
27290 | case 0: | |||
27291 | case 1: | |||
27292 | case 2: | |||
27293 | case 4: | |||
27294 | case 8: | |||
27295 | // These scales always work. | |||
27296 | break; | |||
27297 | case 3: | |||
27298 | case 5: | |||
27299 | case 9: | |||
27300 | // These scales are formed with basereg+scalereg. Only accept if there is | |||
27301 | // no basereg yet. | |||
27302 | if (AM.HasBaseReg) | |||
27303 | return false; | |||
27304 | break; | |||
27305 | default: // Other stuff never works. | |||
27306 | return false; | |||
27307 | } | |||
27308 | ||||
27309 | return true; | |||
27310 | } | |||
27311 | ||||
27312 | bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const { | |||
27313 | unsigned Bits = Ty->getScalarSizeInBits(); | |||
27314 | ||||
27315 | // 8-bit shifts are always expensive, but versions with a scalar amount aren't | |||
27316 | // particularly cheaper than those without. | |||
27317 | if (Bits == 8) | |||
27318 | return false; | |||
27319 | ||||
27320 | // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts. | |||
27321 | if (Subtarget.hasXOP() && Ty->getPrimitiveSizeInBits() == 128 && | |||
27322 | (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64)) | |||
27323 | return false; | |||
27324 | ||||
27325 | // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable | |||
27326 | // shifts just as cheap as scalar ones. | |||
27327 | if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64)) | |||
27328 | return false; | |||
27329 | ||||
27330 | // AVX512BW has shifts such as vpsllvw. | |||
27331 | if (Subtarget.hasBWI() && Bits == 16) | |||
27332 | return false; | |||
27333 | ||||
27334 | // Otherwise, it's significantly cheaper to shift by a scalar amount than by a | |||
27335 | // fully general vector. | |||
27336 | return true; | |||
27337 | } | |||
27338 | ||||
27339 | bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { | |||
27340 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) | |||
27341 | return false; | |||
27342 | unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); | |||
27343 | unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); | |||
27344 | return NumBits1 > NumBits2; | |||
27345 | } | |||
27346 | ||||
27347 | bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const { | |||
27348 | if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) | |||
27349 | return false; | |||
27350 | ||||
27351 | if (!isTypeLegal(EVT::getEVT(Ty1))) | |||
27352 | return false; | |||
27353 | ||||
27354 | assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop")((Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop" ) ? static_cast<void> (0) : __assert_fail ("Ty1->getPrimitiveSizeInBits() <= 64 && \"i128 is probably not a noop\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 27354, __PRETTY_FUNCTION__)); | |||
27355 | ||||
27356 | // Assuming the caller doesn't have a zeroext or signext return parameter, | |||
27357 | // truncation all the way down to i1 is valid. | |||
27358 | return true; | |||
27359 | } | |||
27360 | ||||
27361 | bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const { | |||
27362 | return isInt<32>(Imm); | |||
27363 | } | |||
27364 | ||||
27365 | bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const { | |||
27366 | // Can also use sub to handle negated immediates. | |||
27367 | return isInt<32>(Imm); | |||
27368 | } | |||
27369 | ||||
27370 | bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const { | |||
27371 | return isInt<32>(Imm); | |||
27372 | } | |||
27373 | ||||
27374 | bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { | |||
27375 | if (!VT1.isInteger() || !VT2.isInteger()) | |||
27376 | return false; | |||
27377 | unsigned NumBits1 = VT1.getSizeInBits(); | |||
27378 | unsigned NumBits2 = VT2.getSizeInBits(); | |||
27379 | return NumBits1 > NumBits2; | |||
27380 | } | |||
27381 | ||||
27382 | bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { | |||
27383 | // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. | |||
27384 | return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit(); | |||
27385 | } | |||
27386 | ||||
27387 | bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { | |||
27388 | // x86-64 implicitly zero-extends 32-bit results in 64-bit registers. | |||
27389 | return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit(); | |||
27390 | } | |||
27391 | ||||
27392 | bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { | |||
27393 | EVT VT1 = Val.getValueType(); | |||
27394 | if (isZExtFree(VT1, VT2)) | |||
27395 | return true; | |||
27396 | ||||
27397 | if (Val.getOpcode() != ISD::LOAD) | |||
27398 | return false; | |||
27399 | ||||
27400 | if (!VT1.isSimple() || !VT1.isInteger() || | |||
27401 | !VT2.isSimple() || !VT2.isInteger()) | |||
27402 | return false; | |||
27403 | ||||
27404 | switch (VT1.getSimpleVT().SimpleTy) { | |||
27405 | default: break; | |||
27406 | case MVT::i8: | |||
27407 | case MVT::i16: | |||
27408 | case MVT::i32: | |||
27409 | // X86 has 8, 16, and 32-bit zero-extending loads. | |||
27410 | return true; | |||
27411 | } | |||
27412 | ||||
27413 | return false; | |||
27414 | } | |||
27415 | ||||
27416 | bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const { | |||
27417 | EVT SrcVT = ExtVal.getOperand(0).getValueType(); | |||
27418 | ||||
27419 | // There is no extending load for vXi1. | |||
27420 | if (SrcVT.getScalarType() == MVT::i1) | |||
27421 | return false; | |||
27422 | ||||
27423 | return true; | |||
27424 | } | |||
27425 | ||||
27426 | bool | |||
27427 | X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { | |||
27428 | if (!Subtarget.hasAnyFMA()) | |||
27429 | return false; | |||
27430 | ||||
27431 | VT = VT.getScalarType(); | |||
27432 | ||||
27433 | if (!VT.isSimple()) | |||
27434 | return false; | |||
27435 | ||||
27436 | switch (VT.getSimpleVT().SimpleTy) { | |||
27437 | case MVT::f32: | |||
27438 | case MVT::f64: | |||
27439 | return true; | |||
27440 | default: | |||
27441 | break; | |||
27442 | } | |||
27443 | ||||
27444 | return false; | |||
27445 | } | |||
27446 | ||||
27447 | bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const { | |||
27448 | // i16 instructions are longer (0x66 prefix) and potentially slower. | |||
27449 | return !(VT1 == MVT::i32 && VT2 == MVT::i16); | |||
27450 | } | |||
27451 | ||||
27452 | /// Targets can use this to indicate that they only support *some* | |||
27453 | /// VECTOR_SHUFFLE operations, those with specific masks. | |||
27454 | /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values | |||
27455 | /// are assumed to be legal. | |||
27456 | bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { | |||
27457 | if (!VT.isSimple()) | |||
27458 | return false; | |||
27459 | ||||
27460 | // Not for i1 vectors | |||
27461 | if (VT.getSimpleVT().getScalarType() == MVT::i1) | |||
27462 | return false; | |||
27463 | ||||
27464 | // Very little shuffling can be done for 64-bit vectors right now. | |||
27465 | if (VT.getSimpleVT().getSizeInBits() == 64) | |||
27466 | return false; | |||
27467 | ||||
27468 | // We only care that the types being shuffled are legal. The lowering can | |||
27469 | // handle any possible shuffle mask that results. | |||
27470 | return isTypeLegal(VT.getSimpleVT()); | |||
27471 | } | |||
27472 | ||||
27473 | bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask, | |||
27474 | EVT VT) const { | |||
27475 | // Don't convert an 'and' into a shuffle that we don't directly support. | |||
27476 | // vpblendw and vpshufb for 256-bit vectors are not available on AVX1. | |||
27477 | if (!Subtarget.hasAVX2()) | |||
27478 | if (VT == MVT::v32i8 || VT == MVT::v16i16) | |||
27479 | return false; | |||
27480 | ||||
27481 | // Just delegate to the generic legality, clear masks aren't special. | |||
27482 | return isShuffleMaskLegal(Mask, VT); | |||
27483 | } | |||
27484 | ||||
27485 | bool X86TargetLowering::areJTsAllowed(const Function *Fn) const { | |||
27486 | // If the subtarget is using retpolines, we need to not generate jump tables. | |||
27487 | if (Subtarget.useRetpolineIndirectBranches()) | |||
27488 | return false; | |||
27489 | ||||
27490 | // Otherwise, fallback on the generic logic. | |||
27491 | return TargetLowering::areJTsAllowed(Fn); | |||
27492 | } | |||
27493 | ||||
27494 | //===----------------------------------------------------------------------===// | |||
27495 | // X86 Scheduler Hooks | |||
27496 | //===----------------------------------------------------------------------===// | |||
27497 | ||||
27498 | /// Utility function to emit xbegin specifying the start of an RTM region. | |||
27499 | static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB, | |||
27500 | const TargetInstrInfo *TII) { | |||
27501 | DebugLoc DL = MI.getDebugLoc(); | |||
27502 | ||||
27503 | const BasicBlock *BB = MBB->getBasicBlock(); | |||
27504 | MachineFunction::iterator I = ++MBB->getIterator(); | |||
27505 | ||||
27506 | // For the v = xbegin(), we generate | |||
27507 | // | |||
27508 | // thisMBB: | |||
27509 | // xbegin sinkMBB | |||
27510 | // | |||
27511 | // mainMBB: | |||
27512 | // s0 = -1 | |||
27513 | // | |||
27514 | // fallBB: | |||
27515 | // eax = # XABORT_DEF | |||
27516 | // s1 = eax | |||
27517 | // | |||
27518 | // sinkMBB: | |||
27519 | // v = phi(s0/mainBB, s1/fallBB) | |||
27520 | ||||
27521 | MachineBasicBlock *thisMBB = MBB; | |||
27522 | MachineFunction *MF = MBB->getParent(); | |||
27523 | MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); | |||
27524 | MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB); | |||
27525 | MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); | |||
27526 | MF->insert(I, mainMBB); | |||
27527 | MF->insert(I, fallMBB); | |||
27528 | MF->insert(I, sinkMBB); | |||
27529 | ||||
27530 | // Transfer the remainder of BB and its successor edges to sinkMBB. | |||
27531 | sinkMBB->splice(sinkMBB->begin(), MBB, | |||
27532 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); | |||
27533 | sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); | |||
27534 | ||||
27535 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
27536 | unsigned DstReg = MI.getOperand(0).getReg(); | |||
27537 | const TargetRegisterClass *RC = MRI.getRegClass(DstReg); | |||
27538 | unsigned mainDstReg = MRI.createVirtualRegister(RC); | |||
27539 | unsigned fallDstReg = MRI.createVirtualRegister(RC); | |||
27540 | ||||
27541 | // thisMBB: | |||
27542 | // xbegin fallMBB | |||
27543 | // # fallthrough to mainMBB | |||
27544 | // # abortion to fallMBB | |||
27545 | BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB); | |||
27546 | thisMBB->addSuccessor(mainMBB); | |||
27547 | thisMBB->addSuccessor(fallMBB); | |||
27548 | ||||
27549 | // mainMBB: | |||
27550 | // mainDstReg := -1 | |||
27551 | BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1); | |||
27552 | BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB); | |||
27553 | mainMBB->addSuccessor(sinkMBB); | |||
27554 | ||||
27555 | // fallMBB: | |||
27556 | // ; pseudo instruction to model hardware's definition from XABORT | |||
27557 | // EAX := XABORT_DEF | |||
27558 | // fallDstReg := EAX | |||
27559 | BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF)); | |||
27560 | BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg) | |||
27561 | .addReg(X86::EAX); | |||
27562 | fallMBB->addSuccessor(sinkMBB); | |||
27563 | ||||
27564 | // sinkMBB: | |||
27565 | // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB) | |||
27566 | BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg) | |||
27567 | .addReg(mainDstReg).addMBB(mainMBB) | |||
27568 | .addReg(fallDstReg).addMBB(fallMBB); | |||
27569 | ||||
27570 | MI.eraseFromParent(); | |||
27571 | return sinkMBB; | |||
27572 | } | |||
27573 | ||||
27574 | static MachineBasicBlock *emitWRPKRU(MachineInstr &MI, MachineBasicBlock *BB, | |||
27575 | const X86Subtarget &Subtarget) { | |||
27576 | DebugLoc dl = MI.getDebugLoc(); | |||
27577 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
27578 | ||||
27579 | // insert input VAL into EAX | |||
27580 | BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EAX) | |||
27581 | .addReg(MI.getOperand(0).getReg()); | |||
27582 | // insert zero to ECX | |||
27583 | BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX); | |||
27584 | ||||
27585 | // insert zero to EDX | |||
27586 | BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::EDX); | |||
27587 | ||||
27588 | // insert WRPKRU instruction | |||
27589 | BuildMI(*BB, MI, dl, TII->get(X86::WRPKRUr)); | |||
27590 | ||||
27591 | MI.eraseFromParent(); // The pseudo is gone now. | |||
27592 | return BB; | |||
27593 | } | |||
27594 | ||||
27595 | static MachineBasicBlock *emitRDPKRU(MachineInstr &MI, MachineBasicBlock *BB, | |||
27596 | const X86Subtarget &Subtarget) { | |||
27597 | DebugLoc dl = MI.getDebugLoc(); | |||
27598 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
27599 | ||||
27600 | // insert zero to ECX | |||
27601 | BuildMI(*BB, MI, dl, TII->get(X86::MOV32r0), X86::ECX); | |||
27602 | ||||
27603 | // insert RDPKRU instruction | |||
27604 | BuildMI(*BB, MI, dl, TII->get(X86::RDPKRUr)); | |||
27605 | BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg()) | |||
27606 | .addReg(X86::EAX); | |||
27607 | ||||
27608 | MI.eraseFromParent(); // The pseudo is gone now. | |||
27609 | return BB; | |||
27610 | } | |||
27611 | ||||
27612 | static MachineBasicBlock *emitMonitor(MachineInstr &MI, MachineBasicBlock *BB, | |||
27613 | const X86Subtarget &Subtarget, | |||
27614 | unsigned Opc) { | |||
27615 | DebugLoc dl = MI.getDebugLoc(); | |||
27616 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
27617 | // Address into RAX/EAX, other two args into ECX, EDX. | |||
27618 | unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r; | |||
27619 | unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; | |||
27620 | MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); | |||
27621 | for (int i = 0; i < X86::AddrNumOperands; ++i) | |||
27622 | MIB.add(MI.getOperand(i)); | |||
27623 | ||||
27624 | unsigned ValOps = X86::AddrNumOperands; | |||
27625 | BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX) | |||
27626 | .addReg(MI.getOperand(ValOps).getReg()); | |||
27627 | BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX) | |||
27628 | .addReg(MI.getOperand(ValOps + 1).getReg()); | |||
27629 | ||||
27630 | // The instruction doesn't actually take any operands though. | |||
27631 | BuildMI(*BB, MI, dl, TII->get(Opc)); | |||
27632 | ||||
27633 | MI.eraseFromParent(); // The pseudo is gone now. | |||
27634 | return BB; | |||
27635 | } | |||
27636 | ||||
27637 | static MachineBasicBlock *emitClzero(MachineInstr *MI, MachineBasicBlock *BB, | |||
27638 | const X86Subtarget &Subtarget) { | |||
27639 | DebugLoc dl = MI->getDebugLoc(); | |||
27640 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
27641 | // Address into RAX/EAX | |||
27642 | unsigned MemOpc = Subtarget.is64Bit() ? X86::LEA64r : X86::LEA32r; | |||
27643 | unsigned MemReg = Subtarget.is64Bit() ? X86::RAX : X86::EAX; | |||
27644 | MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg); | |||
27645 | for (int i = 0; i < X86::AddrNumOperands; ++i) | |||
27646 | MIB.add(MI->getOperand(i)); | |||
27647 | ||||
27648 | // The instruction doesn't actually take any operands though. | |||
27649 | BuildMI(*BB, MI, dl, TII->get(X86::CLZEROr)); | |||
27650 | ||||
27651 | MI->eraseFromParent(); // The pseudo is gone now. | |||
27652 | return BB; | |||
27653 | } | |||
27654 | ||||
27655 | ||||
27656 | ||||
27657 | MachineBasicBlock * | |||
27658 | X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI, | |||
27659 | MachineBasicBlock *MBB) const { | |||
27660 | // Emit va_arg instruction on X86-64. | |||
27661 | ||||
27662 | // Operands to this pseudo-instruction: | |||
27663 | // 0 ) Output : destination address (reg) | |||
27664 | // 1-5) Input : va_list address (addr, i64mem) | |||
27665 | // 6 ) ArgSize : Size (in bytes) of vararg type | |||
27666 | // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset | |||
27667 | // 8 ) Align : Alignment of type | |||
27668 | // 9 ) EFLAGS (implicit-def) | |||
27669 | ||||
27670 | assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!")((MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!" ) ? static_cast<void> (0) : __assert_fail ("MI.getNumOperands() == 10 && \"VAARG_64 should have 10 operands!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 27670, __PRETTY_FUNCTION__)); | |||
27671 | static_assert(X86::AddrNumOperands == 5, | |||
27672 | "VAARG_64 assumes 5 address operands"); | |||
27673 | ||||
27674 | unsigned DestReg = MI.getOperand(0).getReg(); | |||
27675 | MachineOperand &Base = MI.getOperand(1); | |||
27676 | MachineOperand &Scale = MI.getOperand(2); | |||
27677 | MachineOperand &Index = MI.getOperand(3); | |||
27678 | MachineOperand &Disp = MI.getOperand(4); | |||
27679 | MachineOperand &Segment = MI.getOperand(5); | |||
27680 | unsigned ArgSize = MI.getOperand(6).getImm(); | |||
27681 | unsigned ArgMode = MI.getOperand(7).getImm(); | |||
27682 | unsigned Align = MI.getOperand(8).getImm(); | |||
27683 | ||||
27684 | // Memory Reference | |||
27685 | assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand")((MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand" ) ? static_cast<void> (0) : __assert_fail ("MI.hasOneMemOperand() && \"Expected VAARG_64 to have one memoperand\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 27685, __PRETTY_FUNCTION__)); | |||
27686 | SmallVector<MachineMemOperand *, 1> MMOs(MI.memoperands_begin(), | |||
27687 | MI.memoperands_end()); | |||
27688 | ||||
27689 | // Machine Information | |||
27690 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
27691 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); | |||
27692 | const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64); | |||
27693 | const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32); | |||
27694 | DebugLoc DL = MI.getDebugLoc(); | |||
27695 | ||||
27696 | // struct va_list { | |||
27697 | // i32 gp_offset | |||
27698 | // i32 fp_offset | |||
27699 | // i64 overflow_area (address) | |||
27700 | // i64 reg_save_area (address) | |||
27701 | // } | |||
27702 | // sizeof(va_list) = 24 | |||
27703 | // alignment(va_list) = 8 | |||
27704 | ||||
27705 | unsigned TotalNumIntRegs = 6; | |||
27706 | unsigned TotalNumXMMRegs = 8; | |||
27707 | bool UseGPOffset = (ArgMode == 1); | |||
27708 | bool UseFPOffset = (ArgMode == 2); | |||
27709 | unsigned MaxOffset = TotalNumIntRegs * 8 + | |||
27710 | (UseFPOffset ? TotalNumXMMRegs * 16 : 0); | |||
27711 | ||||
27712 | /* Align ArgSize to a multiple of 8 */ | |||
27713 | unsigned ArgSizeA8 = (ArgSize + 7) & ~7; | |||
27714 | bool NeedsAlign = (Align > 8); | |||
27715 | ||||
27716 | MachineBasicBlock *thisMBB = MBB; | |||
27717 | MachineBasicBlock *overflowMBB; | |||
27718 | MachineBasicBlock *offsetMBB; | |||
27719 | MachineBasicBlock *endMBB; | |||
27720 | ||||
27721 | unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB | |||
27722 | unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB | |||
27723 | unsigned OffsetReg = 0; | |||
27724 | ||||
27725 | if (!UseGPOffset && !UseFPOffset) { | |||
27726 | // If we only pull from the overflow region, we don't create a branch. | |||
27727 | // We don't need to alter control flow. | |||
27728 | OffsetDestReg = 0; // unused | |||
27729 | OverflowDestReg = DestReg; | |||
27730 | ||||
27731 | offsetMBB = nullptr; | |||
27732 | overflowMBB = thisMBB; | |||
27733 | endMBB = thisMBB; | |||
27734 | } else { | |||
27735 | // First emit code to check if gp_offset (or fp_offset) is below the bound. | |||
27736 | // If so, pull the argument from reg_save_area. (branch to offsetMBB) | |||
27737 | // If not, pull from overflow_area. (branch to overflowMBB) | |||
27738 | // | |||
27739 | // thisMBB | |||
27740 | // | . | |||
27741 | // | . | |||
27742 | // offsetMBB overflowMBB | |||
27743 | // | . | |||
27744 | // | . | |||
27745 | // endMBB | |||
27746 | ||||
27747 | // Registers for the PHI in endMBB | |||
27748 | OffsetDestReg = MRI.createVirtualRegister(AddrRegClass); | |||
27749 | OverflowDestReg = MRI.createVirtualRegister(AddrRegClass); | |||
27750 | ||||
27751 | const BasicBlock *LLVM_BB = MBB->getBasicBlock(); | |||
27752 | MachineFunction *MF = MBB->getParent(); | |||
27753 | overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
27754 | offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
27755 | endMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
27756 | ||||
27757 | MachineFunction::iterator MBBIter = ++MBB->getIterator(); | |||
27758 | ||||
27759 | // Insert the new basic blocks | |||
27760 | MF->insert(MBBIter, offsetMBB); | |||
27761 | MF->insert(MBBIter, overflowMBB); | |||
27762 | MF->insert(MBBIter, endMBB); | |||
27763 | ||||
27764 | // Transfer the remainder of MBB and its successor edges to endMBB. | |||
27765 | endMBB->splice(endMBB->begin(), thisMBB, | |||
27766 | std::next(MachineBasicBlock::iterator(MI)), thisMBB->end()); | |||
27767 | endMBB->transferSuccessorsAndUpdatePHIs(thisMBB); | |||
27768 | ||||
27769 | // Make offsetMBB and overflowMBB successors of thisMBB | |||
27770 | thisMBB->addSuccessor(offsetMBB); | |||
27771 | thisMBB->addSuccessor(overflowMBB); | |||
27772 | ||||
27773 | // endMBB is a successor of both offsetMBB and overflowMBB | |||
27774 | offsetMBB->addSuccessor(endMBB); | |||
27775 | overflowMBB->addSuccessor(endMBB); | |||
27776 | ||||
27777 | // Load the offset value into a register | |||
27778 | OffsetReg = MRI.createVirtualRegister(OffsetRegClass); | |||
27779 | BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg) | |||
27780 | .add(Base) | |||
27781 | .add(Scale) | |||
27782 | .add(Index) | |||
27783 | .addDisp(Disp, UseFPOffset ? 4 : 0) | |||
27784 | .add(Segment) | |||
27785 | .setMemRefs(MMOs); | |||
27786 | ||||
27787 | // Check if there is enough room left to pull this argument. | |||
27788 | BuildMI(thisMBB, DL, TII->get(X86::CMP32ri)) | |||
27789 | .addReg(OffsetReg) | |||
27790 | .addImm(MaxOffset + 8 - ArgSizeA8); | |||
27791 | ||||
27792 | // Branch to "overflowMBB" if offset >= max | |||
27793 | // Fall through to "offsetMBB" otherwise | |||
27794 | BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE))) | |||
27795 | .addMBB(overflowMBB); | |||
27796 | } | |||
27797 | ||||
27798 | // In offsetMBB, emit code to use the reg_save_area. | |||
27799 | if (offsetMBB) { | |||
27800 | assert(OffsetReg != 0)((OffsetReg != 0) ? static_cast<void> (0) : __assert_fail ("OffsetReg != 0", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 27800, __PRETTY_FUNCTION__)); | |||
27801 | ||||
27802 | // Read the reg_save_area address. | |||
27803 | unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass); | |||
27804 | BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg) | |||
27805 | .add(Base) | |||
27806 | .add(Scale) | |||
27807 | .add(Index) | |||
27808 | .addDisp(Disp, 16) | |||
27809 | .add(Segment) | |||
27810 | .setMemRefs(MMOs); | |||
27811 | ||||
27812 | // Zero-extend the offset | |||
27813 | unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass); | |||
27814 | BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64) | |||
27815 | .addImm(0) | |||
27816 | .addReg(OffsetReg) | |||
27817 | .addImm(X86::sub_32bit); | |||
27818 | ||||
27819 | // Add the offset to the reg_save_area to get the final address. | |||
27820 | BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg) | |||
27821 | .addReg(OffsetReg64) | |||
27822 | .addReg(RegSaveReg); | |||
27823 | ||||
27824 | // Compute the offset for the next argument | |||
27825 | unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass); | |||
27826 | BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg) | |||
27827 | .addReg(OffsetReg) | |||
27828 | .addImm(UseFPOffset ? 16 : 8); | |||
27829 | ||||
27830 | // Store it back into the va_list. | |||
27831 | BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr)) | |||
27832 | .add(Base) | |||
27833 | .add(Scale) | |||
27834 | .add(Index) | |||
27835 | .addDisp(Disp, UseFPOffset ? 4 : 0) | |||
27836 | .add(Segment) | |||
27837 | .addReg(NextOffsetReg) | |||
27838 | .setMemRefs(MMOs); | |||
27839 | ||||
27840 | // Jump to endMBB | |||
27841 | BuildMI(offsetMBB, DL, TII->get(X86::JMP_1)) | |||
27842 | .addMBB(endMBB); | |||
27843 | } | |||
27844 | ||||
27845 | // | |||
27846 | // Emit code to use overflow area | |||
27847 | // | |||
27848 | ||||
27849 | // Load the overflow_area address into a register. | |||
27850 | unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass); | |||
27851 | BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg) | |||
27852 | .add(Base) | |||
27853 | .add(Scale) | |||
27854 | .add(Index) | |||
27855 | .addDisp(Disp, 8) | |||
27856 | .add(Segment) | |||
27857 | .setMemRefs(MMOs); | |||
27858 | ||||
27859 | // If we need to align it, do so. Otherwise, just copy the address | |||
27860 | // to OverflowDestReg. | |||
27861 | if (NeedsAlign) { | |||
27862 | // Align the overflow address | |||
27863 | assert(isPowerOf2_32(Align) && "Alignment must be a power of 2")((isPowerOf2_32(Align) && "Alignment must be a power of 2" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(Align) && \"Alignment must be a power of 2\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 27863, __PRETTY_FUNCTION__)); | |||
27864 | unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass); | |||
27865 | ||||
27866 | // aligned_addr = (addr + (align-1)) & ~(align-1) | |||
27867 | BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg) | |||
27868 | .addReg(OverflowAddrReg) | |||
27869 | .addImm(Align-1); | |||
27870 | ||||
27871 | BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg) | |||
27872 | .addReg(TmpReg) | |||
27873 | .addImm(~(uint64_t)(Align-1)); | |||
27874 | } else { | |||
27875 | BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg) | |||
27876 | .addReg(OverflowAddrReg); | |||
27877 | } | |||
27878 | ||||
27879 | // Compute the next overflow address after this argument. | |||
27880 | // (the overflow address should be kept 8-byte aligned) | |||
27881 | unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass); | |||
27882 | BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg) | |||
27883 | .addReg(OverflowDestReg) | |||
27884 | .addImm(ArgSizeA8); | |||
27885 | ||||
27886 | // Store the new overflow address. | |||
27887 | BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr)) | |||
27888 | .add(Base) | |||
27889 | .add(Scale) | |||
27890 | .add(Index) | |||
27891 | .addDisp(Disp, 8) | |||
27892 | .add(Segment) | |||
27893 | .addReg(NextAddrReg) | |||
27894 | .setMemRefs(MMOs); | |||
27895 | ||||
27896 | // If we branched, emit the PHI to the front of endMBB. | |||
27897 | if (offsetMBB) { | |||
27898 | BuildMI(*endMBB, endMBB->begin(), DL, | |||
27899 | TII->get(X86::PHI), DestReg) | |||
27900 | .addReg(OffsetDestReg).addMBB(offsetMBB) | |||
27901 | .addReg(OverflowDestReg).addMBB(overflowMBB); | |||
27902 | } | |||
27903 | ||||
27904 | // Erase the pseudo instruction | |||
27905 | MI.eraseFromParent(); | |||
27906 | ||||
27907 | return endMBB; | |||
27908 | } | |||
27909 | ||||
27910 | MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter( | |||
27911 | MachineInstr &MI, MachineBasicBlock *MBB) const { | |||
27912 | // Emit code to save XMM registers to the stack. The ABI says that the | |||
27913 | // number of registers to save is given in %al, so it's theoretically | |||
27914 | // possible to do an indirect jump trick to avoid saving all of them, | |||
27915 | // however this code takes a simpler approach and just executes all | |||
27916 | // of the stores if %al is non-zero. It's less code, and it's probably | |||
27917 | // easier on the hardware branch predictor, and stores aren't all that | |||
27918 | // expensive anyway. | |||
27919 | ||||
27920 | // Create the new basic blocks. One block contains all the XMM stores, | |||
27921 | // and one block is the final destination regardless of whether any | |||
27922 | // stores were performed. | |||
27923 | const BasicBlock *LLVM_BB = MBB->getBasicBlock(); | |||
27924 | MachineFunction *F = MBB->getParent(); | |||
27925 | MachineFunction::iterator MBBIter = ++MBB->getIterator(); | |||
27926 | MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
27927 | MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
27928 | F->insert(MBBIter, XMMSaveMBB); | |||
27929 | F->insert(MBBIter, EndMBB); | |||
27930 | ||||
27931 | // Transfer the remainder of MBB and its successor edges to EndMBB. | |||
27932 | EndMBB->splice(EndMBB->begin(), MBB, | |||
27933 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); | |||
27934 | EndMBB->transferSuccessorsAndUpdatePHIs(MBB); | |||
27935 | ||||
27936 | // The original block will now fall through to the XMM save block. | |||
27937 | MBB->addSuccessor(XMMSaveMBB); | |||
27938 | // The XMMSaveMBB will fall through to the end block. | |||
27939 | XMMSaveMBB->addSuccessor(EndMBB); | |||
27940 | ||||
27941 | // Now add the instructions. | |||
27942 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
27943 | DebugLoc DL = MI.getDebugLoc(); | |||
27944 | ||||
27945 | unsigned CountReg = MI.getOperand(0).getReg(); | |||
27946 | int64_t RegSaveFrameIndex = MI.getOperand(1).getImm(); | |||
27947 | int64_t VarArgsFPOffset = MI.getOperand(2).getImm(); | |||
27948 | ||||
27949 | if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) { | |||
27950 | // If %al is 0, branch around the XMM save block. | |||
27951 | BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg); | |||
27952 | BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB); | |||
27953 | MBB->addSuccessor(EndMBB); | |||
27954 | } | |||
27955 | ||||
27956 | // Make sure the last operand is EFLAGS, which gets clobbered by the branch | |||
27957 | // that was just emitted, but clearly shouldn't be "saved". | |||
27958 | assert((MI.getNumOperands() <= 3 ||(((MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands () - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg () == X86::EFLAGS) && "Expected last argument to be EFLAGS" ) ? static_cast<void> (0) : __assert_fail ("(MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) && \"Expected last argument to be EFLAGS\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 27961, __PRETTY_FUNCTION__)) | |||
27959 | !MI.getOperand(MI.getNumOperands() - 1).isReg() ||(((MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands () - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg () == X86::EFLAGS) && "Expected last argument to be EFLAGS" ) ? static_cast<void> (0) : __assert_fail ("(MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) && \"Expected last argument to be EFLAGS\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 27961, __PRETTY_FUNCTION__)) | |||
27960 | MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&(((MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands () - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg () == X86::EFLAGS) && "Expected last argument to be EFLAGS" ) ? static_cast<void> (0) : __assert_fail ("(MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) && \"Expected last argument to be EFLAGS\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 27961, __PRETTY_FUNCTION__)) | |||
27961 | "Expected last argument to be EFLAGS")(((MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands () - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg () == X86::EFLAGS) && "Expected last argument to be EFLAGS" ) ? static_cast<void> (0) : __assert_fail ("(MI.getNumOperands() <= 3 || !MI.getOperand(MI.getNumOperands() - 1).isReg() || MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) && \"Expected last argument to be EFLAGS\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 27961, __PRETTY_FUNCTION__)); | |||
27962 | unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr; | |||
27963 | // In the XMM save block, save all the XMM argument registers. | |||
27964 | for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) { | |||
27965 | int64_t Offset = (i - 3) * 16 + VarArgsFPOffset; | |||
27966 | MachineMemOperand *MMO = F->getMachineMemOperand( | |||
27967 | MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset), | |||
27968 | MachineMemOperand::MOStore, | |||
27969 | /*Size=*/16, /*Align=*/16); | |||
27970 | BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc)) | |||
27971 | .addFrameIndex(RegSaveFrameIndex) | |||
27972 | .addImm(/*Scale=*/1) | |||
27973 | .addReg(/*IndexReg=*/0) | |||
27974 | .addImm(/*Disp=*/Offset) | |||
27975 | .addReg(/*Segment=*/0) | |||
27976 | .addReg(MI.getOperand(i).getReg()) | |||
27977 | .addMemOperand(MMO); | |||
27978 | } | |||
27979 | ||||
27980 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
27981 | ||||
27982 | return EndMBB; | |||
27983 | } | |||
27984 | ||||
27985 | // The EFLAGS operand of SelectItr might be missing a kill marker | |||
27986 | // because there were multiple uses of EFLAGS, and ISel didn't know | |||
27987 | // which to mark. Figure out whether SelectItr should have had a | |||
27988 | // kill marker, and set it if it should. Returns the correct kill | |||
27989 | // marker value. | |||
27990 | static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr, | |||
27991 | MachineBasicBlock* BB, | |||
27992 | const TargetRegisterInfo* TRI) { | |||
27993 | // Scan forward through BB for a use/def of EFLAGS. | |||
27994 | MachineBasicBlock::iterator miI(std::next(SelectItr)); | |||
27995 | for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) { | |||
27996 | const MachineInstr& mi = *miI; | |||
27997 | if (mi.readsRegister(X86::EFLAGS)) | |||
27998 | return false; | |||
27999 | if (mi.definesRegister(X86::EFLAGS)) | |||
28000 | break; // Should have kill-flag - update below. | |||
28001 | } | |||
28002 | ||||
28003 | // If we hit the end of the block, check whether EFLAGS is live into a | |||
28004 | // successor. | |||
28005 | if (miI == BB->end()) { | |||
28006 | for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(), | |||
28007 | sEnd = BB->succ_end(); | |||
28008 | sItr != sEnd; ++sItr) { | |||
28009 | MachineBasicBlock* succ = *sItr; | |||
28010 | if (succ->isLiveIn(X86::EFLAGS)) | |||
28011 | return false; | |||
28012 | } | |||
28013 | } | |||
28014 | ||||
28015 | // We found a def, or hit the end of the basic block and EFLAGS wasn't live | |||
28016 | // out. SelectMI should have a kill flag on EFLAGS. | |||
28017 | SelectItr->addRegisterKilled(X86::EFLAGS, TRI); | |||
28018 | return true; | |||
28019 | } | |||
28020 | ||||
28021 | // Return true if it is OK for this CMOV pseudo-opcode to be cascaded | |||
28022 | // together with other CMOV pseudo-opcodes into a single basic-block with | |||
28023 | // conditional jump around it. | |||
28024 | static bool isCMOVPseudo(MachineInstr &MI) { | |||
28025 | switch (MI.getOpcode()) { | |||
28026 | case X86::CMOV_FR32: | |||
28027 | case X86::CMOV_FR64: | |||
28028 | case X86::CMOV_GR8: | |||
28029 | case X86::CMOV_GR16: | |||
28030 | case X86::CMOV_GR32: | |||
28031 | case X86::CMOV_RFP32: | |||
28032 | case X86::CMOV_RFP64: | |||
28033 | case X86::CMOV_RFP80: | |||
28034 | case X86::CMOV_VR128: | |||
28035 | case X86::CMOV_VR128X: | |||
28036 | case X86::CMOV_VR256: | |||
28037 | case X86::CMOV_VR256X: | |||
28038 | case X86::CMOV_VR512: | |||
28039 | case X86::CMOV_VK2: | |||
28040 | case X86::CMOV_VK4: | |||
28041 | case X86::CMOV_VK8: | |||
28042 | case X86::CMOV_VK16: | |||
28043 | case X86::CMOV_VK32: | |||
28044 | case X86::CMOV_VK64: | |||
28045 | return true; | |||
28046 | ||||
28047 | default: | |||
28048 | return false; | |||
28049 | } | |||
28050 | } | |||
28051 | ||||
28052 | // Helper function, which inserts PHI functions into SinkMBB: | |||
28053 | // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ], | |||
28054 | // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs | |||
28055 | // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for | |||
28056 | // the last PHI function inserted. | |||
28057 | static MachineInstrBuilder createPHIsForCMOVsInSinkBB( | |||
28058 | MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd, | |||
28059 | MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB, | |||
28060 | MachineBasicBlock *SinkMBB) { | |||
28061 | MachineFunction *MF = TrueMBB->getParent(); | |||
28062 | const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo(); | |||
28063 | DebugLoc DL = MIItBegin->getDebugLoc(); | |||
28064 | ||||
28065 | X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm()); | |||
28066 | X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC); | |||
28067 | ||||
28068 | MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin(); | |||
28069 | ||||
28070 | // As we are creating the PHIs, we have to be careful if there is more than | |||
28071 | // one. Later CMOVs may reference the results of earlier CMOVs, but later | |||
28072 | // PHIs have to reference the individual true/false inputs from earlier PHIs. | |||
28073 | // That also means that PHI construction must work forward from earlier to | |||
28074 | // later, and that the code must maintain a mapping from earlier PHI's | |||
28075 | // destination registers, and the registers that went into the PHI. | |||
28076 | DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable; | |||
28077 | MachineInstrBuilder MIB; | |||
28078 | ||||
28079 | for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) { | |||
28080 | unsigned DestReg = MIIt->getOperand(0).getReg(); | |||
28081 | unsigned Op1Reg = MIIt->getOperand(1).getReg(); | |||
28082 | unsigned Op2Reg = MIIt->getOperand(2).getReg(); | |||
28083 | ||||
28084 | // If this CMOV we are generating is the opposite condition from | |||
28085 | // the jump we generated, then we have to swap the operands for the | |||
28086 | // PHI that is going to be generated. | |||
28087 | if (MIIt->getOperand(3).getImm() == OppCC) | |||
28088 | std::swap(Op1Reg, Op2Reg); | |||
28089 | ||||
28090 | if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end()) | |||
28091 | Op1Reg = RegRewriteTable[Op1Reg].first; | |||
28092 | ||||
28093 | if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end()) | |||
28094 | Op2Reg = RegRewriteTable[Op2Reg].second; | |||
28095 | ||||
28096 | MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg) | |||
28097 | .addReg(Op1Reg) | |||
28098 | .addMBB(FalseMBB) | |||
28099 | .addReg(Op2Reg) | |||
28100 | .addMBB(TrueMBB); | |||
28101 | ||||
28102 | // Add this PHI to the rewrite table. | |||
28103 | RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg); | |||
28104 | } | |||
28105 | ||||
28106 | return MIB; | |||
28107 | } | |||
28108 | ||||
28109 | // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2). | |||
28110 | MachineBasicBlock * | |||
28111 | X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV, | |||
28112 | MachineInstr &SecondCascadedCMOV, | |||
28113 | MachineBasicBlock *ThisMBB) const { | |||
28114 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
28115 | DebugLoc DL = FirstCMOV.getDebugLoc(); | |||
28116 | ||||
28117 | // We lower cascaded CMOVs such as | |||
28118 | // | |||
28119 | // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2) | |||
28120 | // | |||
28121 | // to two successive branches. | |||
28122 | // | |||
28123 | // Without this, we would add a PHI between the two jumps, which ends up | |||
28124 | // creating a few copies all around. For instance, for | |||
28125 | // | |||
28126 | // (sitofp (zext (fcmp une))) | |||
28127 | // | |||
28128 | // we would generate: | |||
28129 | // | |||
28130 | // ucomiss %xmm1, %xmm0 | |||
28131 | // movss <1.0f>, %xmm0 | |||
28132 | // movaps %xmm0, %xmm1 | |||
28133 | // jne .LBB5_2 | |||
28134 | // xorps %xmm1, %xmm1 | |||
28135 | // .LBB5_2: | |||
28136 | // jp .LBB5_4 | |||
28137 | // movaps %xmm1, %xmm0 | |||
28138 | // .LBB5_4: | |||
28139 | // retq | |||
28140 | // | |||
28141 | // because this custom-inserter would have generated: | |||
28142 | // | |||
28143 | // A | |||
28144 | // | \ | |||
28145 | // | B | |||
28146 | // | / | |||
28147 | // C | |||
28148 | // | \ | |||
28149 | // | D | |||
28150 | // | / | |||
28151 | // E | |||
28152 | // | |||
28153 | // A: X = ...; Y = ... | |||
28154 | // B: empty | |||
28155 | // C: Z = PHI [X, A], [Y, B] | |||
28156 | // D: empty | |||
28157 | // E: PHI [X, C], [Z, D] | |||
28158 | // | |||
28159 | // If we lower both CMOVs in a single step, we can instead generate: | |||
28160 | // | |||
28161 | // A | |||
28162 | // | \ | |||
28163 | // | C | |||
28164 | // | /| | |||
28165 | // |/ | | |||
28166 | // | | | |||
28167 | // | D | |||
28168 | // | / | |||
28169 | // E | |||
28170 | // | |||
28171 | // A: X = ...; Y = ... | |||
28172 | // D: empty | |||
28173 | // E: PHI [X, A], [X, C], [Y, D] | |||
28174 | // | |||
28175 | // Which, in our sitofp/fcmp example, gives us something like: | |||
28176 | // | |||
28177 | // ucomiss %xmm1, %xmm0 | |||
28178 | // movss <1.0f>, %xmm0 | |||
28179 | // jne .LBB5_4 | |||
28180 | // jp .LBB5_4 | |||
28181 | // xorps %xmm0, %xmm0 | |||
28182 | // .LBB5_4: | |||
28183 | // retq | |||
28184 | // | |||
28185 | ||||
28186 | // We lower cascaded CMOV into two successive branches to the same block. | |||
28187 | // EFLAGS is used by both, so mark it as live in the second. | |||
28188 | const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock(); | |||
28189 | MachineFunction *F = ThisMBB->getParent(); | |||
28190 | MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
28191 | MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
28192 | MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
28193 | ||||
28194 | MachineFunction::iterator It = ++ThisMBB->getIterator(); | |||
28195 | F->insert(It, FirstInsertedMBB); | |||
28196 | F->insert(It, SecondInsertedMBB); | |||
28197 | F->insert(It, SinkMBB); | |||
28198 | ||||
28199 | // For a cascaded CMOV, we lower it to two successive branches to | |||
28200 | // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in | |||
28201 | // the FirstInsertedMBB. | |||
28202 | FirstInsertedMBB->addLiveIn(X86::EFLAGS); | |||
28203 | ||||
28204 | // If the EFLAGS register isn't dead in the terminator, then claim that it's | |||
28205 | // live into the sink and copy blocks. | |||
28206 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
28207 | if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) && | |||
28208 | !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) { | |||
28209 | SecondInsertedMBB->addLiveIn(X86::EFLAGS); | |||
28210 | SinkMBB->addLiveIn(X86::EFLAGS); | |||
28211 | } | |||
28212 | ||||
28213 | // Transfer the remainder of ThisMBB and its successor edges to SinkMBB. | |||
28214 | SinkMBB->splice(SinkMBB->begin(), ThisMBB, | |||
28215 | std::next(MachineBasicBlock::iterator(FirstCMOV)), | |||
28216 | ThisMBB->end()); | |||
28217 | SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB); | |||
28218 | ||||
28219 | // Fallthrough block for ThisMBB. | |||
28220 | ThisMBB->addSuccessor(FirstInsertedMBB); | |||
28221 | // The true block target of the first branch is always SinkMBB. | |||
28222 | ThisMBB->addSuccessor(SinkMBB); | |||
28223 | // Fallthrough block for FirstInsertedMBB. | |||
28224 | FirstInsertedMBB->addSuccessor(SecondInsertedMBB); | |||
28225 | // The true block for the branch of FirstInsertedMBB. | |||
28226 | FirstInsertedMBB->addSuccessor(SinkMBB); | |||
28227 | // This is fallthrough. | |||
28228 | SecondInsertedMBB->addSuccessor(SinkMBB); | |||
28229 | ||||
28230 | // Create the conditional branch instructions. | |||
28231 | X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm()); | |||
28232 | unsigned Opc = X86::GetCondBranchFromCond(FirstCC); | |||
28233 | BuildMI(ThisMBB, DL, TII->get(Opc)).addMBB(SinkMBB); | |||
28234 | ||||
28235 | X86::CondCode SecondCC = | |||
28236 | X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm()); | |||
28237 | unsigned Opc2 = X86::GetCondBranchFromCond(SecondCC); | |||
28238 | BuildMI(FirstInsertedMBB, DL, TII->get(Opc2)).addMBB(SinkMBB); | |||
28239 | ||||
28240 | // SinkMBB: | |||
28241 | // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ] | |||
28242 | unsigned DestReg = FirstCMOV.getOperand(0).getReg(); | |||
28243 | unsigned Op1Reg = FirstCMOV.getOperand(1).getReg(); | |||
28244 | unsigned Op2Reg = FirstCMOV.getOperand(2).getReg(); | |||
28245 | MachineInstrBuilder MIB = | |||
28246 | BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg) | |||
28247 | .addReg(Op1Reg) | |||
28248 | .addMBB(SecondInsertedMBB) | |||
28249 | .addReg(Op2Reg) | |||
28250 | .addMBB(ThisMBB); | |||
28251 | ||||
28252 | // The second SecondInsertedMBB provides the same incoming value as the | |||
28253 | // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes). | |||
28254 | MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB); | |||
28255 | // Copy the PHI result to the register defined by the second CMOV. | |||
28256 | BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL, | |||
28257 | TII->get(TargetOpcode::COPY), | |||
28258 | SecondCascadedCMOV.getOperand(0).getReg()) | |||
28259 | .addReg(FirstCMOV.getOperand(0).getReg()); | |||
28260 | ||||
28261 | // Now remove the CMOVs. | |||
28262 | FirstCMOV.eraseFromParent(); | |||
28263 | SecondCascadedCMOV.eraseFromParent(); | |||
28264 | ||||
28265 | return SinkMBB; | |||
28266 | } | |||
28267 | ||||
28268 | MachineBasicBlock * | |||
28269 | X86TargetLowering::EmitLoweredSelect(MachineInstr &MI, | |||
28270 | MachineBasicBlock *ThisMBB) const { | |||
28271 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
28272 | DebugLoc DL = MI.getDebugLoc(); | |||
28273 | ||||
28274 | // To "insert" a SELECT_CC instruction, we actually have to insert the | |||
28275 | // diamond control-flow pattern. The incoming instruction knows the | |||
28276 | // destination vreg to set, the condition code register to branch on, the | |||
28277 | // true/false values to select between and a branch opcode to use. | |||
28278 | ||||
28279 | // ThisMBB: | |||
28280 | // ... | |||
28281 | // TrueVal = ... | |||
28282 | // cmpTY ccX, r1, r2 | |||
28283 | // bCC copy1MBB | |||
28284 | // fallthrough --> FalseMBB | |||
28285 | ||||
28286 | // This code lowers all pseudo-CMOV instructions. Generally it lowers these | |||
28287 | // as described above, by inserting a BB, and then making a PHI at the join | |||
28288 | // point to select the true and false operands of the CMOV in the PHI. | |||
28289 | // | |||
28290 | // The code also handles two different cases of multiple CMOV opcodes | |||
28291 | // in a row. | |||
28292 | // | |||
28293 | // Case 1: | |||
28294 | // In this case, there are multiple CMOVs in a row, all which are based on | |||
28295 | // the same condition setting (or the exact opposite condition setting). | |||
28296 | // In this case we can lower all the CMOVs using a single inserted BB, and | |||
28297 | // then make a number of PHIs at the join point to model the CMOVs. The only | |||
28298 | // trickiness here, is that in a case like: | |||
28299 | // | |||
28300 | // t2 = CMOV cond1 t1, f1 | |||
28301 | // t3 = CMOV cond1 t2, f2 | |||
28302 | // | |||
28303 | // when rewriting this into PHIs, we have to perform some renaming on the | |||
28304 | // temps since you cannot have a PHI operand refer to a PHI result earlier | |||
28305 | // in the same block. The "simple" but wrong lowering would be: | |||
28306 | // | |||
28307 | // t2 = PHI t1(BB1), f1(BB2) | |||
28308 | // t3 = PHI t2(BB1), f2(BB2) | |||
28309 | // | |||
28310 | // but clearly t2 is not defined in BB1, so that is incorrect. The proper | |||
28311 | // renaming is to note that on the path through BB1, t2 is really just a | |||
28312 | // copy of t1, and do that renaming, properly generating: | |||
28313 | // | |||
28314 | // t2 = PHI t1(BB1), f1(BB2) | |||
28315 | // t3 = PHI t1(BB1), f2(BB2) | |||
28316 | // | |||
28317 | // Case 2: | |||
28318 | // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate | |||
28319 | // function - EmitLoweredCascadedSelect. | |||
28320 | ||||
28321 | X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm()); | |||
28322 | X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC); | |||
28323 | MachineInstr *LastCMOV = &MI; | |||
28324 | MachineBasicBlock::iterator NextMIIt = | |||
28325 | std::next(MachineBasicBlock::iterator(MI)); | |||
28326 | ||||
28327 | // Check for case 1, where there are multiple CMOVs with the same condition | |||
28328 | // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the | |||
28329 | // number of jumps the most. | |||
28330 | ||||
28331 | if (isCMOVPseudo(MI)) { | |||
28332 | // See if we have a string of CMOVS with the same condition. | |||
28333 | while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) && | |||
28334 | (NextMIIt->getOperand(3).getImm() == CC || | |||
28335 | NextMIIt->getOperand(3).getImm() == OppCC)) { | |||
28336 | LastCMOV = &*NextMIIt; | |||
28337 | ++NextMIIt; | |||
28338 | } | |||
28339 | } | |||
28340 | ||||
28341 | // This checks for case 2, but only do this if we didn't already find | |||
28342 | // case 1, as indicated by LastCMOV == MI. | |||
28343 | if (LastCMOV == &MI && NextMIIt != ThisMBB->end() && | |||
28344 | NextMIIt->getOpcode() == MI.getOpcode() && | |||
28345 | NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() && | |||
28346 | NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() && | |||
28347 | NextMIIt->getOperand(1).isKill()) { | |||
28348 | return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB); | |||
28349 | } | |||
28350 | ||||
28351 | const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock(); | |||
28352 | MachineFunction *F = ThisMBB->getParent(); | |||
28353 | MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
28354 | MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB); | |||
28355 | ||||
28356 | MachineFunction::iterator It = ++ThisMBB->getIterator(); | |||
28357 | F->insert(It, FalseMBB); | |||
28358 | F->insert(It, SinkMBB); | |||
28359 | ||||
28360 | // If the EFLAGS register isn't dead in the terminator, then claim that it's | |||
28361 | // live into the sink and copy blocks. | |||
28362 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
28363 | if (!LastCMOV->killsRegister(X86::EFLAGS) && | |||
28364 | !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) { | |||
28365 | FalseMBB->addLiveIn(X86::EFLAGS); | |||
28366 | SinkMBB->addLiveIn(X86::EFLAGS); | |||
28367 | } | |||
28368 | ||||
28369 | // Transfer the remainder of ThisMBB and its successor edges to SinkMBB. | |||
28370 | SinkMBB->splice(SinkMBB->begin(), ThisMBB, | |||
28371 | std::next(MachineBasicBlock::iterator(LastCMOV)), | |||
28372 | ThisMBB->end()); | |||
28373 | SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB); | |||
28374 | ||||
28375 | // Fallthrough block for ThisMBB. | |||
28376 | ThisMBB->addSuccessor(FalseMBB); | |||
28377 | // The true block target of the first (or only) branch is always a SinkMBB. | |||
28378 | ThisMBB->addSuccessor(SinkMBB); | |||
28379 | // Fallthrough block for FalseMBB. | |||
28380 | FalseMBB->addSuccessor(SinkMBB); | |||
28381 | ||||
28382 | // Create the conditional branch instruction. | |||
28383 | unsigned Opc = X86::GetCondBranchFromCond(CC); | |||
28384 | BuildMI(ThisMBB, DL, TII->get(Opc)).addMBB(SinkMBB); | |||
28385 | ||||
28386 | // SinkMBB: | |||
28387 | // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ] | |||
28388 | // ... | |||
28389 | MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI); | |||
28390 | MachineBasicBlock::iterator MIItEnd = | |||
28391 | std::next(MachineBasicBlock::iterator(LastCMOV)); | |||
28392 | createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB); | |||
28393 | ||||
28394 | // Now remove the CMOV(s). | |||
28395 | ThisMBB->erase(MIItBegin, MIItEnd); | |||
28396 | ||||
28397 | return SinkMBB; | |||
28398 | } | |||
28399 | ||||
28400 | MachineBasicBlock * | |||
28401 | X86TargetLowering::EmitLoweredAtomicFP(MachineInstr &MI, | |||
28402 | MachineBasicBlock *BB) const { | |||
28403 | // Combine the following atomic floating-point modification pattern: | |||
28404 | // a.store(reg OP a.load(acquire), release) | |||
28405 | // Transform them into: | |||
28406 | // OPss (%gpr), %xmm | |||
28407 | // movss %xmm, (%gpr) | |||
28408 | // Or sd equivalent for 64-bit operations. | |||
28409 | unsigned MOp, FOp; | |||
28410 | switch (MI.getOpcode()) { | |||
28411 | default: llvm_unreachable("unexpected instr type for EmitLoweredAtomicFP")::llvm::llvm_unreachable_internal("unexpected instr type for EmitLoweredAtomicFP" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28411); | |||
28412 | case X86::RELEASE_FADD32mr: | |||
28413 | FOp = X86::ADDSSrm; | |||
28414 | MOp = X86::MOVSSmr; | |||
28415 | break; | |||
28416 | case X86::RELEASE_FADD64mr: | |||
28417 | FOp = X86::ADDSDrm; | |||
28418 | MOp = X86::MOVSDmr; | |||
28419 | break; | |||
28420 | } | |||
28421 | const X86InstrInfo *TII = Subtarget.getInstrInfo(); | |||
28422 | DebugLoc DL = MI.getDebugLoc(); | |||
28423 | MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); | |||
28424 | unsigned ValOpIdx = X86::AddrNumOperands; | |||
28425 | unsigned VSrc = MI.getOperand(ValOpIdx).getReg(); | |||
28426 | MachineInstrBuilder MIB = | |||
28427 | BuildMI(*BB, MI, DL, TII->get(FOp), | |||
28428 | MRI.createVirtualRegister(MRI.getRegClass(VSrc))) | |||
28429 | .addReg(VSrc); | |||
28430 | for (int i = 0; i < X86::AddrNumOperands; ++i) { | |||
28431 | MachineOperand &Operand = MI.getOperand(i); | |||
28432 | // Clear any kill flags on register operands as we'll create a second | |||
28433 | // instruction using the same address operands. | |||
28434 | if (Operand.isReg()) | |||
28435 | Operand.setIsKill(false); | |||
28436 | MIB.add(Operand); | |||
28437 | } | |||
28438 | MachineInstr *FOpMI = MIB; | |||
28439 | MIB = BuildMI(*BB, MI, DL, TII->get(MOp)); | |||
28440 | for (int i = 0; i < X86::AddrNumOperands; ++i) | |||
28441 | MIB.add(MI.getOperand(i)); | |||
28442 | MIB.addReg(FOpMI->getOperand(0).getReg(), RegState::Kill); | |||
28443 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
28444 | return BB; | |||
28445 | } | |||
28446 | ||||
28447 | MachineBasicBlock * | |||
28448 | X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI, | |||
28449 | MachineBasicBlock *BB) const { | |||
28450 | MachineFunction *MF = BB->getParent(); | |||
28451 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
28452 | DebugLoc DL = MI.getDebugLoc(); | |||
28453 | const BasicBlock *LLVM_BB = BB->getBasicBlock(); | |||
28454 | ||||
28455 | assert(MF->shouldSplitStack())((MF->shouldSplitStack()) ? static_cast<void> (0) : __assert_fail ("MF->shouldSplitStack()", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28455, __PRETTY_FUNCTION__)); | |||
28456 | ||||
28457 | const bool Is64Bit = Subtarget.is64Bit(); | |||
28458 | const bool IsLP64 = Subtarget.isTarget64BitLP64(); | |||
28459 | ||||
28460 | const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS; | |||
28461 | const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30; | |||
28462 | ||||
28463 | // BB: | |||
28464 | // ... [Till the alloca] | |||
28465 | // If stacklet is not large enough, jump to mallocMBB | |||
28466 | // | |||
28467 | // bumpMBB: | |||
28468 | // Allocate by subtracting from RSP | |||
28469 | // Jump to continueMBB | |||
28470 | // | |||
28471 | // mallocMBB: | |||
28472 | // Allocate by call to runtime | |||
28473 | // | |||
28474 | // continueMBB: | |||
28475 | // ... | |||
28476 | // [rest of original BB] | |||
28477 | // | |||
28478 | ||||
28479 | MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
28480 | MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
28481 | MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB); | |||
28482 | ||||
28483 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
28484 | const TargetRegisterClass *AddrRegClass = | |||
28485 | getRegClassFor(getPointerTy(MF->getDataLayout())); | |||
28486 | ||||
28487 | unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass), | |||
28488 | bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass), | |||
28489 | tmpSPVReg = MRI.createVirtualRegister(AddrRegClass), | |||
28490 | SPLimitVReg = MRI.createVirtualRegister(AddrRegClass), | |||
28491 | sizeVReg = MI.getOperand(1).getReg(), | |||
28492 | physSPReg = | |||
28493 | IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP; | |||
28494 | ||||
28495 | MachineFunction::iterator MBBIter = ++BB->getIterator(); | |||
28496 | ||||
28497 | MF->insert(MBBIter, bumpMBB); | |||
28498 | MF->insert(MBBIter, mallocMBB); | |||
28499 | MF->insert(MBBIter, continueMBB); | |||
28500 | ||||
28501 | continueMBB->splice(continueMBB->begin(), BB, | |||
28502 | std::next(MachineBasicBlock::iterator(MI)), BB->end()); | |||
28503 | continueMBB->transferSuccessorsAndUpdatePHIs(BB); | |||
28504 | ||||
28505 | // Add code to the main basic block to check if the stack limit has been hit, | |||
28506 | // and if so, jump to mallocMBB otherwise to bumpMBB. | |||
28507 | BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg); | |||
28508 | BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg) | |||
28509 | .addReg(tmpSPVReg).addReg(sizeVReg); | |||
28510 | BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr)) | |||
28511 | .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg) | |||
28512 | .addReg(SPLimitVReg); | |||
28513 | BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB); | |||
28514 | ||||
28515 | // bumpMBB simply decreases the stack pointer, since we know the current | |||
28516 | // stacklet has enough space. | |||
28517 | BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg) | |||
28518 | .addReg(SPLimitVReg); | |||
28519 | BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg) | |||
28520 | .addReg(SPLimitVReg); | |||
28521 | BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB); | |||
28522 | ||||
28523 | // Calls into a routine in libgcc to allocate more space from the heap. | |||
28524 | const uint32_t *RegMask = | |||
28525 | Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C); | |||
28526 | if (IsLP64) { | |||
28527 | BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI) | |||
28528 | .addReg(sizeVReg); | |||
28529 | BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) | |||
28530 | .addExternalSymbol("__morestack_allocate_stack_space") | |||
28531 | .addRegMask(RegMask) | |||
28532 | .addReg(X86::RDI, RegState::Implicit) | |||
28533 | .addReg(X86::RAX, RegState::ImplicitDefine); | |||
28534 | } else if (Is64Bit) { | |||
28535 | BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI) | |||
28536 | .addReg(sizeVReg); | |||
28537 | BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32)) | |||
28538 | .addExternalSymbol("__morestack_allocate_stack_space") | |||
28539 | .addRegMask(RegMask) | |||
28540 | .addReg(X86::EDI, RegState::Implicit) | |||
28541 | .addReg(X86::EAX, RegState::ImplicitDefine); | |||
28542 | } else { | |||
28543 | BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg) | |||
28544 | .addImm(12); | |||
28545 | BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg); | |||
28546 | BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32)) | |||
28547 | .addExternalSymbol("__morestack_allocate_stack_space") | |||
28548 | .addRegMask(RegMask) | |||
28549 | .addReg(X86::EAX, RegState::ImplicitDefine); | |||
28550 | } | |||
28551 | ||||
28552 | if (!Is64Bit) | |||
28553 | BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg) | |||
28554 | .addImm(16); | |||
28555 | ||||
28556 | BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg) | |||
28557 | .addReg(IsLP64 ? X86::RAX : X86::EAX); | |||
28558 | BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB); | |||
28559 | ||||
28560 | // Set up the CFG correctly. | |||
28561 | BB->addSuccessor(bumpMBB); | |||
28562 | BB->addSuccessor(mallocMBB); | |||
28563 | mallocMBB->addSuccessor(continueMBB); | |||
28564 | bumpMBB->addSuccessor(continueMBB); | |||
28565 | ||||
28566 | // Take care of the PHI nodes. | |||
28567 | BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI), | |||
28568 | MI.getOperand(0).getReg()) | |||
28569 | .addReg(mallocPtrVReg) | |||
28570 | .addMBB(mallocMBB) | |||
28571 | .addReg(bumpSPPtrVReg) | |||
28572 | .addMBB(bumpMBB); | |||
28573 | ||||
28574 | // Delete the original pseudo instruction. | |||
28575 | MI.eraseFromParent(); | |||
28576 | ||||
28577 | // And we're done. | |||
28578 | return continueMBB; | |||
28579 | } | |||
28580 | ||||
28581 | MachineBasicBlock * | |||
28582 | X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI, | |||
28583 | MachineBasicBlock *BB) const { | |||
28584 | MachineFunction *MF = BB->getParent(); | |||
28585 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); | |||
28586 | MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB(); | |||
28587 | DebugLoc DL = MI.getDebugLoc(); | |||
28588 | ||||
28589 | assert(!isAsynchronousEHPersonality(((!isAsynchronousEHPersonality( classifyEHPersonality(MF-> getFunction().getPersonalityFn())) && "SEH does not use catchret!" ) ? static_cast<void> (0) : __assert_fail ("!isAsynchronousEHPersonality( classifyEHPersonality(MF->getFunction().getPersonalityFn())) && \"SEH does not use catchret!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28591, __PRETTY_FUNCTION__)) | |||
28590 | classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&((!isAsynchronousEHPersonality( classifyEHPersonality(MF-> getFunction().getPersonalityFn())) && "SEH does not use catchret!" ) ? static_cast<void> (0) : __assert_fail ("!isAsynchronousEHPersonality( classifyEHPersonality(MF->getFunction().getPersonalityFn())) && \"SEH does not use catchret!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28591, __PRETTY_FUNCTION__)) | |||
28591 | "SEH does not use catchret!")((!isAsynchronousEHPersonality( classifyEHPersonality(MF-> getFunction().getPersonalityFn())) && "SEH does not use catchret!" ) ? static_cast<void> (0) : __assert_fail ("!isAsynchronousEHPersonality( classifyEHPersonality(MF->getFunction().getPersonalityFn())) && \"SEH does not use catchret!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28591, __PRETTY_FUNCTION__)); | |||
28592 | ||||
28593 | // Only 32-bit EH needs to worry about manually restoring stack pointers. | |||
28594 | if (!Subtarget.is32Bit()) | |||
28595 | return BB; | |||
28596 | ||||
28597 | // C++ EH creates a new target block to hold the restore code, and wires up | |||
28598 | // the new block to the return destination with a normal JMP_4. | |||
28599 | MachineBasicBlock *RestoreMBB = | |||
28600 | MF->CreateMachineBasicBlock(BB->getBasicBlock()); | |||
28601 | assert(BB->succ_size() == 1)((BB->succ_size() == 1) ? static_cast<void> (0) : __assert_fail ("BB->succ_size() == 1", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28601, __PRETTY_FUNCTION__)); | |||
28602 | MF->insert(std::next(BB->getIterator()), RestoreMBB); | |||
28603 | RestoreMBB->transferSuccessorsAndUpdatePHIs(BB); | |||
28604 | BB->addSuccessor(RestoreMBB); | |||
28605 | MI.getOperand(0).setMBB(RestoreMBB); | |||
28606 | ||||
28607 | auto RestoreMBBI = RestoreMBB->begin(); | |||
28608 | BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::EH_RESTORE)); | |||
28609 | BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB); | |||
28610 | return BB; | |||
28611 | } | |||
28612 | ||||
28613 | MachineBasicBlock * | |||
28614 | X86TargetLowering::EmitLoweredCatchPad(MachineInstr &MI, | |||
28615 | MachineBasicBlock *BB) const { | |||
28616 | MachineFunction *MF = BB->getParent(); | |||
28617 | const Constant *PerFn = MF->getFunction().getPersonalityFn(); | |||
28618 | bool IsSEH = isAsynchronousEHPersonality(classifyEHPersonality(PerFn)); | |||
28619 | // Only 32-bit SEH requires special handling for catchpad. | |||
28620 | if (IsSEH && Subtarget.is32Bit()) { | |||
28621 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); | |||
28622 | DebugLoc DL = MI.getDebugLoc(); | |||
28623 | BuildMI(*BB, MI, DL, TII.get(X86::EH_RESTORE)); | |||
28624 | } | |||
28625 | MI.eraseFromParent(); | |||
28626 | return BB; | |||
28627 | } | |||
28628 | ||||
28629 | MachineBasicBlock * | |||
28630 | X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI, | |||
28631 | MachineBasicBlock *BB) const { | |||
28632 | // So, here we replace TLSADDR with the sequence: | |||
28633 | // adjust_stackdown -> TLSADDR -> adjust_stackup. | |||
28634 | // We need this because TLSADDR is lowered into calls | |||
28635 | // inside MC, therefore without the two markers shrink-wrapping | |||
28636 | // may push the prologue/epilogue pass them. | |||
28637 | const TargetInstrInfo &TII = *Subtarget.getInstrInfo(); | |||
28638 | DebugLoc DL = MI.getDebugLoc(); | |||
28639 | MachineFunction &MF = *BB->getParent(); | |||
28640 | ||||
28641 | // Emit CALLSEQ_START right before the instruction. | |||
28642 | unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); | |||
28643 | MachineInstrBuilder CallseqStart = | |||
28644 | BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0); | |||
28645 | BB->insert(MachineBasicBlock::iterator(MI), CallseqStart); | |||
28646 | ||||
28647 | // Emit CALLSEQ_END right after the instruction. | |||
28648 | // We don't call erase from parent because we want to keep the | |||
28649 | // original instruction around. | |||
28650 | unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); | |||
28651 | MachineInstrBuilder CallseqEnd = | |||
28652 | BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0); | |||
28653 | BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd); | |||
28654 | ||||
28655 | return BB; | |||
28656 | } | |||
28657 | ||||
28658 | MachineBasicBlock * | |||
28659 | X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI, | |||
28660 | MachineBasicBlock *BB) const { | |||
28661 | // This is pretty easy. We're taking the value that we received from | |||
28662 | // our load from the relocation, sticking it in either RDI (x86-64) | |||
28663 | // or EAX and doing an indirect call. The return value will then | |||
28664 | // be in the normal return register. | |||
28665 | MachineFunction *F = BB->getParent(); | |||
28666 | const X86InstrInfo *TII = Subtarget.getInstrInfo(); | |||
28667 | DebugLoc DL = MI.getDebugLoc(); | |||
28668 | ||||
28669 | assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?")((Subtarget.isTargetDarwin() && "Darwin only instr emitted?" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.isTargetDarwin() && \"Darwin only instr emitted?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28669, __PRETTY_FUNCTION__)); | |||
28670 | assert(MI.getOperand(3).isGlobal() && "This should be a global")((MI.getOperand(3).isGlobal() && "This should be a global" ) ? static_cast<void> (0) : __assert_fail ("MI.getOperand(3).isGlobal() && \"This should be a global\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28670, __PRETTY_FUNCTION__)); | |||
28671 | ||||
28672 | // Get a register mask for the lowered call. | |||
28673 | // FIXME: The 32-bit calls have non-standard calling conventions. Use a | |||
28674 | // proper register mask. | |||
28675 | const uint32_t *RegMask = | |||
28676 | Subtarget.is64Bit() ? | |||
28677 | Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() : | |||
28678 | Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C); | |||
28679 | if (Subtarget.is64Bit()) { | |||
28680 | MachineInstrBuilder MIB = | |||
28681 | BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI) | |||
28682 | .addReg(X86::RIP) | |||
28683 | .addImm(0) | |||
28684 | .addReg(0) | |||
28685 | .addGlobalAddress(MI.getOperand(3).getGlobal(), 0, | |||
28686 | MI.getOperand(3).getTargetFlags()) | |||
28687 | .addReg(0); | |||
28688 | MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); | |||
28689 | addDirectMem(MIB, X86::RDI); | |||
28690 | MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask); | |||
28691 | } else if (!isPositionIndependent()) { | |||
28692 | MachineInstrBuilder MIB = | |||
28693 | BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX) | |||
28694 | .addReg(0) | |||
28695 | .addImm(0) | |||
28696 | .addReg(0) | |||
28697 | .addGlobalAddress(MI.getOperand(3).getGlobal(), 0, | |||
28698 | MI.getOperand(3).getTargetFlags()) | |||
28699 | .addReg(0); | |||
28700 | MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); | |||
28701 | addDirectMem(MIB, X86::EAX); | |||
28702 | MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); | |||
28703 | } else { | |||
28704 | MachineInstrBuilder MIB = | |||
28705 | BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX) | |||
28706 | .addReg(TII->getGlobalBaseReg(F)) | |||
28707 | .addImm(0) | |||
28708 | .addReg(0) | |||
28709 | .addGlobalAddress(MI.getOperand(3).getGlobal(), 0, | |||
28710 | MI.getOperand(3).getTargetFlags()) | |||
28711 | .addReg(0); | |||
28712 | MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m)); | |||
28713 | addDirectMem(MIB, X86::EAX); | |||
28714 | MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask); | |||
28715 | } | |||
28716 | ||||
28717 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
28718 | return BB; | |||
28719 | } | |||
28720 | ||||
28721 | static unsigned getOpcodeForRetpoline(unsigned RPOpc) { | |||
28722 | switch (RPOpc) { | |||
28723 | case X86::RETPOLINE_CALL32: | |||
28724 | return X86::CALLpcrel32; | |||
28725 | case X86::RETPOLINE_CALL64: | |||
28726 | return X86::CALL64pcrel32; | |||
28727 | case X86::RETPOLINE_TCRETURN32: | |||
28728 | return X86::TCRETURNdi; | |||
28729 | case X86::RETPOLINE_TCRETURN64: | |||
28730 | return X86::TCRETURNdi64; | |||
28731 | } | |||
28732 | llvm_unreachable("not retpoline opcode")::llvm::llvm_unreachable_internal("not retpoline opcode", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28732); | |||
28733 | } | |||
28734 | ||||
28735 | static const char *getRetpolineSymbol(const X86Subtarget &Subtarget, | |||
28736 | unsigned Reg) { | |||
28737 | if (Subtarget.useRetpolineExternalThunk()) { | |||
28738 | // When using an external thunk for retpolines, we pick names that match the | |||
28739 | // names GCC happens to use as well. This helps simplify the implementation | |||
28740 | // of the thunks for kernels where they have no easy ability to create | |||
28741 | // aliases and are doing non-trivial configuration of the thunk's body. For | |||
28742 | // example, the Linux kernel will do boot-time hot patching of the thunk | |||
28743 | // bodies and cannot easily export aliases of these to loaded modules. | |||
28744 | // | |||
28745 | // Note that at any point in the future, we may need to change the semantics | |||
28746 | // of how we implement retpolines and at that time will likely change the | |||
28747 | // name of the called thunk. Essentially, there is no hard guarantee that | |||
28748 | // LLVM will generate calls to specific thunks, we merely make a best-effort | |||
28749 | // attempt to help out kernels and other systems where duplicating the | |||
28750 | // thunks is costly. | |||
28751 | switch (Reg) { | |||
28752 | case X86::EAX: | |||
28753 | assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28753, __PRETTY_FUNCTION__)); | |||
28754 | return "__x86_indirect_thunk_eax"; | |||
28755 | case X86::ECX: | |||
28756 | assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28756, __PRETTY_FUNCTION__)); | |||
28757 | return "__x86_indirect_thunk_ecx"; | |||
28758 | case X86::EDX: | |||
28759 | assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28759, __PRETTY_FUNCTION__)); | |||
28760 | return "__x86_indirect_thunk_edx"; | |||
28761 | case X86::EDI: | |||
28762 | assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28762, __PRETTY_FUNCTION__)); | |||
28763 | return "__x86_indirect_thunk_edi"; | |||
28764 | case X86::R11: | |||
28765 | assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!")((Subtarget.is64Bit() && "Should not be using a 64-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"Should not be using a 64-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28765, __PRETTY_FUNCTION__)); | |||
28766 | return "__x86_indirect_thunk_r11"; | |||
28767 | } | |||
28768 | llvm_unreachable("unexpected reg for retpoline")::llvm::llvm_unreachable_internal("unexpected reg for retpoline" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28768); | |||
28769 | } | |||
28770 | ||||
28771 | // When targeting an internal COMDAT thunk use an LLVM-specific name. | |||
28772 | switch (Reg) { | |||
28773 | case X86::EAX: | |||
28774 | assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28774, __PRETTY_FUNCTION__)); | |||
28775 | return "__llvm_retpoline_eax"; | |||
28776 | case X86::ECX: | |||
28777 | assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28777, __PRETTY_FUNCTION__)); | |||
28778 | return "__llvm_retpoline_ecx"; | |||
28779 | case X86::EDX: | |||
28780 | assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28780, __PRETTY_FUNCTION__)); | |||
28781 | return "__llvm_retpoline_edx"; | |||
28782 | case X86::EDI: | |||
28783 | assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!")((!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("!Subtarget.is64Bit() && \"Should not be using a 32-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28783, __PRETTY_FUNCTION__)); | |||
28784 | return "__llvm_retpoline_edi"; | |||
28785 | case X86::R11: | |||
28786 | assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!")((Subtarget.is64Bit() && "Should not be using a 64-bit thunk!" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.is64Bit() && \"Should not be using a 64-bit thunk!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28786, __PRETTY_FUNCTION__)); | |||
28787 | return "__llvm_retpoline_r11"; | |||
28788 | } | |||
28789 | llvm_unreachable("unexpected reg for retpoline")::llvm::llvm_unreachable_internal("unexpected reg for retpoline" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28789); | |||
28790 | } | |||
28791 | ||||
28792 | MachineBasicBlock * | |||
28793 | X86TargetLowering::EmitLoweredRetpoline(MachineInstr &MI, | |||
28794 | MachineBasicBlock *BB) const { | |||
28795 | // Copy the virtual register into the R11 physical register and | |||
28796 | // call the retpoline thunk. | |||
28797 | DebugLoc DL = MI.getDebugLoc(); | |||
28798 | const X86InstrInfo *TII = Subtarget.getInstrInfo(); | |||
28799 | unsigned CalleeVReg = MI.getOperand(0).getReg(); | |||
28800 | unsigned Opc = getOpcodeForRetpoline(MI.getOpcode()); | |||
28801 | ||||
28802 | // Find an available scratch register to hold the callee. On 64-bit, we can | |||
28803 | // just use R11, but we scan for uses anyway to ensure we don't generate | |||
28804 | // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't | |||
28805 | // already a register use operand to the call to hold the callee. If none | |||
28806 | // are available, use EDI instead. EDI is chosen because EBX is the PIC base | |||
28807 | // register and ESI is the base pointer to realigned stack frames with VLAs. | |||
28808 | SmallVector<unsigned, 3> AvailableRegs; | |||
28809 | if (Subtarget.is64Bit()) | |||
28810 | AvailableRegs.push_back(X86::R11); | |||
28811 | else | |||
28812 | AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI}); | |||
28813 | ||||
28814 | // Zero out any registers that are already used. | |||
28815 | for (const auto &MO : MI.operands()) { | |||
28816 | if (MO.isReg() && MO.isUse()) | |||
28817 | for (unsigned &Reg : AvailableRegs) | |||
28818 | if (Reg == MO.getReg()) | |||
28819 | Reg = 0; | |||
28820 | } | |||
28821 | ||||
28822 | // Choose the first remaining non-zero available register. | |||
28823 | unsigned AvailableReg = 0; | |||
28824 | for (unsigned MaybeReg : AvailableRegs) { | |||
28825 | if (MaybeReg) { | |||
28826 | AvailableReg = MaybeReg; | |||
28827 | break; | |||
28828 | } | |||
28829 | } | |||
28830 | if (!AvailableReg) | |||
28831 | report_fatal_error("calling convention incompatible with retpoline, no " | |||
28832 | "available registers"); | |||
28833 | ||||
28834 | const char *Symbol = getRetpolineSymbol(Subtarget, AvailableReg); | |||
28835 | ||||
28836 | BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg) | |||
28837 | .addReg(CalleeVReg); | |||
28838 | MI.getOperand(0).ChangeToES(Symbol); | |||
28839 | MI.setDesc(TII->get(Opc)); | |||
28840 | MachineInstrBuilder(*BB->getParent(), &MI) | |||
28841 | .addReg(AvailableReg, RegState::Implicit | RegState::Kill); | |||
28842 | return BB; | |||
28843 | } | |||
28844 | ||||
28845 | /// SetJmp implies future control flow change upon calling the corresponding | |||
28846 | /// LongJmp. | |||
28847 | /// Instead of using the 'return' instruction, the long jump fixes the stack and | |||
28848 | /// performs an indirect branch. To do so it uses the registers that were stored | |||
28849 | /// in the jump buffer (when calling SetJmp). | |||
28850 | /// In case the shadow stack is enabled we need to fix it as well, because some | |||
28851 | /// return addresses will be skipped. | |||
28852 | /// The function will save the SSP for future fixing in the function | |||
28853 | /// emitLongJmpShadowStackFix. | |||
28854 | /// \sa emitLongJmpShadowStackFix | |||
28855 | /// \param [in] MI The temporary Machine Instruction for the builtin. | |||
28856 | /// \param [in] MBB The Machine Basic Block that will be modified. | |||
28857 | void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI, | |||
28858 | MachineBasicBlock *MBB) const { | |||
28859 | DebugLoc DL = MI.getDebugLoc(); | |||
28860 | MachineFunction *MF = MBB->getParent(); | |||
28861 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
28862 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
28863 | MachineInstrBuilder MIB; | |||
28864 | ||||
28865 | // Memory Reference. | |||
28866 | SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(), | |||
28867 | MI.memoperands_end()); | |||
28868 | ||||
28869 | // Initialize a register with zero. | |||
28870 | MVT PVT = getPointerTy(MF->getDataLayout()); | |||
28871 | const TargetRegisterClass *PtrRC = getRegClassFor(PVT); | |||
28872 | unsigned ZReg = MRI.createVirtualRegister(PtrRC); | |||
28873 | unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr; | |||
28874 | BuildMI(*MBB, MI, DL, TII->get(XorRROpc)) | |||
28875 | .addDef(ZReg) | |||
28876 | .addReg(ZReg, RegState::Undef) | |||
28877 | .addReg(ZReg, RegState::Undef); | |||
28878 | ||||
28879 | // Read the current SSP Register value to the zeroed register. | |||
28880 | unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC); | |||
28881 | unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD; | |||
28882 | BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg); | |||
28883 | ||||
28884 | // Write the SSP register value to offset 3 in input memory buffer. | |||
28885 | unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; | |||
28886 | MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc)); | |||
28887 | const int64_t SSPOffset = 3 * PVT.getStoreSize(); | |||
28888 | const unsigned MemOpndSlot = 1; | |||
28889 | for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { | |||
28890 | if (i == X86::AddrDisp) | |||
28891 | MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset); | |||
28892 | else | |||
28893 | MIB.add(MI.getOperand(MemOpndSlot + i)); | |||
28894 | } | |||
28895 | MIB.addReg(SSPCopyReg); | |||
28896 | MIB.setMemRefs(MMOs); | |||
28897 | } | |||
28898 | ||||
28899 | MachineBasicBlock * | |||
28900 | X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI, | |||
28901 | MachineBasicBlock *MBB) const { | |||
28902 | DebugLoc DL = MI.getDebugLoc(); | |||
28903 | MachineFunction *MF = MBB->getParent(); | |||
28904 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
28905 | const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
28906 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
28907 | ||||
28908 | const BasicBlock *BB = MBB->getBasicBlock(); | |||
28909 | MachineFunction::iterator I = ++MBB->getIterator(); | |||
28910 | ||||
28911 | // Memory Reference | |||
28912 | SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(), | |||
28913 | MI.memoperands_end()); | |||
28914 | ||||
28915 | unsigned DstReg; | |||
28916 | unsigned MemOpndSlot = 0; | |||
28917 | ||||
28918 | unsigned CurOp = 0; | |||
28919 | ||||
28920 | DstReg = MI.getOperand(CurOp++).getReg(); | |||
28921 | const TargetRegisterClass *RC = MRI.getRegClass(DstReg); | |||
28922 | assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!")((TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!" ) ? static_cast<void> (0) : __assert_fail ("TRI->isTypeLegalForClass(*RC, MVT::i32) && \"Invalid destination!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28922, __PRETTY_FUNCTION__)); | |||
28923 | (void)TRI; | |||
28924 | unsigned mainDstReg = MRI.createVirtualRegister(RC); | |||
28925 | unsigned restoreDstReg = MRI.createVirtualRegister(RC); | |||
28926 | ||||
28927 | MemOpndSlot = CurOp; | |||
28928 | ||||
28929 | MVT PVT = getPointerTy(MF->getDataLayout()); | |||
28930 | assert((PVT == MVT::i64 || PVT == MVT::i32) &&(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!" ) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28931, __PRETTY_FUNCTION__)) | |||
28931 | "Invalid Pointer Size!")(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!" ) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 28931, __PRETTY_FUNCTION__)); | |||
28932 | ||||
28933 | // For v = setjmp(buf), we generate | |||
28934 | // | |||
28935 | // thisMBB: | |||
28936 | // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB | |||
28937 | // SjLjSetup restoreMBB | |||
28938 | // | |||
28939 | // mainMBB: | |||
28940 | // v_main = 0 | |||
28941 | // | |||
28942 | // sinkMBB: | |||
28943 | // v = phi(main, restore) | |||
28944 | // | |||
28945 | // restoreMBB: | |||
28946 | // if base pointer being used, load it from frame | |||
28947 | // v_restore = 1 | |||
28948 | ||||
28949 | MachineBasicBlock *thisMBB = MBB; | |||
28950 | MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB); | |||
28951 | MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); | |||
28952 | MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB); | |||
28953 | MF->insert(I, mainMBB); | |||
28954 | MF->insert(I, sinkMBB); | |||
28955 | MF->push_back(restoreMBB); | |||
28956 | restoreMBB->setHasAddressTaken(); | |||
28957 | ||||
28958 | MachineInstrBuilder MIB; | |||
28959 | ||||
28960 | // Transfer the remainder of BB and its successor edges to sinkMBB. | |||
28961 | sinkMBB->splice(sinkMBB->begin(), MBB, | |||
28962 | std::next(MachineBasicBlock::iterator(MI)), MBB->end()); | |||
28963 | sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); | |||
28964 | ||||
28965 | // thisMBB: | |||
28966 | unsigned PtrStoreOpc = 0; | |||
28967 | unsigned LabelReg = 0; | |||
28968 | const int64_t LabelOffset = 1 * PVT.getStoreSize(); | |||
28969 | bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) && | |||
28970 | !isPositionIndependent(); | |||
28971 | ||||
28972 | // Prepare IP either in reg or imm. | |||
28973 | if (!UseImmLabel) { | |||
28974 | PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; | |||
28975 | const TargetRegisterClass *PtrRC = getRegClassFor(PVT); | |||
28976 | LabelReg = MRI.createVirtualRegister(PtrRC); | |||
28977 | if (Subtarget.is64Bit()) { | |||
28978 | MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg) | |||
28979 | .addReg(X86::RIP) | |||
28980 | .addImm(0) | |||
28981 | .addReg(0) | |||
28982 | .addMBB(restoreMBB) | |||
28983 | .addReg(0); | |||
28984 | } else { | |||
28985 | const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII); | |||
28986 | MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg) | |||
28987 | .addReg(XII->getGlobalBaseReg(MF)) | |||
28988 | .addImm(0) | |||
28989 | .addReg(0) | |||
28990 | .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference()) | |||
28991 | .addReg(0); | |||
28992 | } | |||
28993 | } else | |||
28994 | PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi; | |||
28995 | // Store IP | |||
28996 | MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc)); | |||
28997 | for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { | |||
28998 | if (i == X86::AddrDisp) | |||
28999 | MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset); | |||
29000 | else | |||
29001 | MIB.add(MI.getOperand(MemOpndSlot + i)); | |||
29002 | } | |||
29003 | if (!UseImmLabel) | |||
29004 | MIB.addReg(LabelReg); | |||
29005 | else | |||
29006 | MIB.addMBB(restoreMBB); | |||
29007 | MIB.setMemRefs(MMOs); | |||
29008 | ||||
29009 | if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) { | |||
29010 | emitSetJmpShadowStackFix(MI, thisMBB); | |||
29011 | } | |||
29012 | ||||
29013 | // Setup | |||
29014 | MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup)) | |||
29015 | .addMBB(restoreMBB); | |||
29016 | ||||
29017 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
29018 | MIB.addRegMask(RegInfo->getNoPreservedMask()); | |||
29019 | thisMBB->addSuccessor(mainMBB); | |||
29020 | thisMBB->addSuccessor(restoreMBB); | |||
29021 | ||||
29022 | // mainMBB: | |||
29023 | // EAX = 0 | |||
29024 | BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg); | |||
29025 | mainMBB->addSuccessor(sinkMBB); | |||
29026 | ||||
29027 | // sinkMBB: | |||
29028 | BuildMI(*sinkMBB, sinkMBB->begin(), DL, | |||
29029 | TII->get(X86::PHI), DstReg) | |||
29030 | .addReg(mainDstReg).addMBB(mainMBB) | |||
29031 | .addReg(restoreDstReg).addMBB(restoreMBB); | |||
29032 | ||||
29033 | // restoreMBB: | |||
29034 | if (RegInfo->hasBasePointer(*MF)) { | |||
29035 | const bool Uses64BitFramePtr = | |||
29036 | Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64(); | |||
29037 | X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>(); | |||
29038 | X86FI->setRestoreBasePointer(MF); | |||
29039 | unsigned FramePtr = RegInfo->getFrameRegister(*MF); | |||
29040 | unsigned BasePtr = RegInfo->getBaseRegister(); | |||
29041 | unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm; | |||
29042 | addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr), | |||
29043 | FramePtr, true, X86FI->getRestoreBasePointerOffset()) | |||
29044 | .setMIFlag(MachineInstr::FrameSetup); | |||
29045 | } | |||
29046 | BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1); | |||
29047 | BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB); | |||
29048 | restoreMBB->addSuccessor(sinkMBB); | |||
29049 | ||||
29050 | MI.eraseFromParent(); | |||
29051 | return sinkMBB; | |||
29052 | } | |||
29053 | ||||
29054 | /// Fix the shadow stack using the previously saved SSP pointer. | |||
29055 | /// \sa emitSetJmpShadowStackFix | |||
29056 | /// \param [in] MI The temporary Machine Instruction for the builtin. | |||
29057 | /// \param [in] MBB The Machine Basic Block that will be modified. | |||
29058 | /// \return The sink MBB that will perform the future indirect branch. | |||
29059 | MachineBasicBlock * | |||
29060 | X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI, | |||
29061 | MachineBasicBlock *MBB) const { | |||
29062 | DebugLoc DL = MI.getDebugLoc(); | |||
29063 | MachineFunction *MF = MBB->getParent(); | |||
29064 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
29065 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
29066 | ||||
29067 | // Memory Reference | |||
29068 | SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(), | |||
29069 | MI.memoperands_end()); | |||
29070 | ||||
29071 | MVT PVT = getPointerTy(MF->getDataLayout()); | |||
29072 | const TargetRegisterClass *PtrRC = getRegClassFor(PVT); | |||
29073 | ||||
29074 | // checkSspMBB: | |||
29075 | // xor vreg1, vreg1 | |||
29076 | // rdssp vreg1 | |||
29077 | // test vreg1, vreg1 | |||
29078 | // je sinkMBB # Jump if Shadow Stack is not supported | |||
29079 | // fallMBB: | |||
29080 | // mov buf+24/12(%rip), vreg2 | |||
29081 | // sub vreg1, vreg2 | |||
29082 | // jbe sinkMBB # No need to fix the Shadow Stack | |||
29083 | // fixShadowMBB: | |||
29084 | // shr 3/2, vreg2 | |||
29085 | // incssp vreg2 # fix the SSP according to the lower 8 bits | |||
29086 | // shr 8, vreg2 | |||
29087 | // je sinkMBB | |||
29088 | // fixShadowLoopPrepareMBB: | |||
29089 | // shl vreg2 | |||
29090 | // mov 128, vreg3 | |||
29091 | // fixShadowLoopMBB: | |||
29092 | // incssp vreg3 | |||
29093 | // dec vreg2 | |||
29094 | // jne fixShadowLoopMBB # Iterate until you finish fixing | |||
29095 | // # the Shadow Stack | |||
29096 | // sinkMBB: | |||
29097 | ||||
29098 | MachineFunction::iterator I = ++MBB->getIterator(); | |||
29099 | const BasicBlock *BB = MBB->getBasicBlock(); | |||
29100 | ||||
29101 | MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB); | |||
29102 | MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB); | |||
29103 | MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB); | |||
29104 | MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB); | |||
29105 | MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB); | |||
29106 | MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB); | |||
29107 | MF->insert(I, checkSspMBB); | |||
29108 | MF->insert(I, fallMBB); | |||
29109 | MF->insert(I, fixShadowMBB); | |||
29110 | MF->insert(I, fixShadowLoopPrepareMBB); | |||
29111 | MF->insert(I, fixShadowLoopMBB); | |||
29112 | MF->insert(I, sinkMBB); | |||
29113 | ||||
29114 | // Transfer the remainder of BB and its successor edges to sinkMBB. | |||
29115 | sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI), | |||
29116 | MBB->end()); | |||
29117 | sinkMBB->transferSuccessorsAndUpdatePHIs(MBB); | |||
29118 | ||||
29119 | MBB->addSuccessor(checkSspMBB); | |||
29120 | ||||
29121 | // Initialize a register with zero. | |||
29122 | unsigned ZReg = MRI.createVirtualRegister(PtrRC); | |||
29123 | unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr; | |||
29124 | BuildMI(checkSspMBB, DL, TII->get(XorRROpc)) | |||
29125 | .addDef(ZReg) | |||
29126 | .addReg(ZReg, RegState::Undef) | |||
29127 | .addReg(ZReg, RegState::Undef); | |||
29128 | ||||
29129 | // Read the current SSP Register value to the zeroed register. | |||
29130 | unsigned SSPCopyReg = MRI.createVirtualRegister(PtrRC); | |||
29131 | unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD; | |||
29132 | BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg); | |||
29133 | ||||
29134 | // Check whether the result of the SSP register is zero and jump directly | |||
29135 | // to the sink. | |||
29136 | unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr; | |||
29137 | BuildMI(checkSspMBB, DL, TII->get(TestRROpc)) | |||
29138 | .addReg(SSPCopyReg) | |||
29139 | .addReg(SSPCopyReg); | |||
29140 | BuildMI(checkSspMBB, DL, TII->get(X86::JE_1)).addMBB(sinkMBB); | |||
29141 | checkSspMBB->addSuccessor(sinkMBB); | |||
29142 | checkSspMBB->addSuccessor(fallMBB); | |||
29143 | ||||
29144 | // Reload the previously saved SSP register value. | |||
29145 | unsigned PrevSSPReg = MRI.createVirtualRegister(PtrRC); | |||
29146 | unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm; | |||
29147 | const int64_t SPPOffset = 3 * PVT.getStoreSize(); | |||
29148 | MachineInstrBuilder MIB = | |||
29149 | BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg); | |||
29150 | for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { | |||
29151 | const MachineOperand &MO = MI.getOperand(i); | |||
29152 | if (i == X86::AddrDisp) | |||
29153 | MIB.addDisp(MO, SPPOffset); | |||
29154 | else if (MO.isReg()) // Don't add the whole operand, we don't want to | |||
29155 | // preserve kill flags. | |||
29156 | MIB.addReg(MO.getReg()); | |||
29157 | else | |||
29158 | MIB.add(MO); | |||
29159 | } | |||
29160 | MIB.setMemRefs(MMOs); | |||
29161 | ||||
29162 | // Subtract the current SSP from the previous SSP. | |||
29163 | unsigned SspSubReg = MRI.createVirtualRegister(PtrRC); | |||
29164 | unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr; | |||
29165 | BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg) | |||
29166 | .addReg(PrevSSPReg) | |||
29167 | .addReg(SSPCopyReg); | |||
29168 | ||||
29169 | // Jump to sink in case PrevSSPReg <= SSPCopyReg. | |||
29170 | BuildMI(fallMBB, DL, TII->get(X86::JBE_1)).addMBB(sinkMBB); | |||
29171 | fallMBB->addSuccessor(sinkMBB); | |||
29172 | fallMBB->addSuccessor(fixShadowMBB); | |||
29173 | ||||
29174 | // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8. | |||
29175 | unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri; | |||
29176 | unsigned Offset = (PVT == MVT::i64) ? 3 : 2; | |||
29177 | unsigned SspFirstShrReg = MRI.createVirtualRegister(PtrRC); | |||
29178 | BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg) | |||
29179 | .addReg(SspSubReg) | |||
29180 | .addImm(Offset); | |||
29181 | ||||
29182 | // Increase SSP when looking only on the lower 8 bits of the delta. | |||
29183 | unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD; | |||
29184 | BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg); | |||
29185 | ||||
29186 | // Reset the lower 8 bits. | |||
29187 | unsigned SspSecondShrReg = MRI.createVirtualRegister(PtrRC); | |||
29188 | BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg) | |||
29189 | .addReg(SspFirstShrReg) | |||
29190 | .addImm(8); | |||
29191 | ||||
29192 | // Jump if the result of the shift is zero. | |||
29193 | BuildMI(fixShadowMBB, DL, TII->get(X86::JE_1)).addMBB(sinkMBB); | |||
29194 | fixShadowMBB->addSuccessor(sinkMBB); | |||
29195 | fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB); | |||
29196 | ||||
29197 | // Do a single shift left. | |||
29198 | unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1; | |||
29199 | unsigned SspAfterShlReg = MRI.createVirtualRegister(PtrRC); | |||
29200 | BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg) | |||
29201 | .addReg(SspSecondShrReg); | |||
29202 | ||||
29203 | // Save the value 128 to a register (will be used next with incssp). | |||
29204 | unsigned Value128InReg = MRI.createVirtualRegister(PtrRC); | |||
29205 | unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri; | |||
29206 | BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg) | |||
29207 | .addImm(128); | |||
29208 | fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB); | |||
29209 | ||||
29210 | // Since incssp only looks at the lower 8 bits, we might need to do several | |||
29211 | // iterations of incssp until we finish fixing the shadow stack. | |||
29212 | unsigned DecReg = MRI.createVirtualRegister(PtrRC); | |||
29213 | unsigned CounterReg = MRI.createVirtualRegister(PtrRC); | |||
29214 | BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg) | |||
29215 | .addReg(SspAfterShlReg) | |||
29216 | .addMBB(fixShadowLoopPrepareMBB) | |||
29217 | .addReg(DecReg) | |||
29218 | .addMBB(fixShadowLoopMBB); | |||
29219 | ||||
29220 | // Every iteration we increase the SSP by 128. | |||
29221 | BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg); | |||
29222 | ||||
29223 | // Every iteration we decrement the counter by 1. | |||
29224 | unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r; | |||
29225 | BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg); | |||
29226 | ||||
29227 | // Jump if the counter is not zero yet. | |||
29228 | BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JNE_1)).addMBB(fixShadowLoopMBB); | |||
29229 | fixShadowLoopMBB->addSuccessor(sinkMBB); | |||
29230 | fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB); | |||
29231 | ||||
29232 | return sinkMBB; | |||
29233 | } | |||
29234 | ||||
29235 | MachineBasicBlock * | |||
29236 | X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI, | |||
29237 | MachineBasicBlock *MBB) const { | |||
29238 | DebugLoc DL = MI.getDebugLoc(); | |||
29239 | MachineFunction *MF = MBB->getParent(); | |||
29240 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
29241 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
29242 | ||||
29243 | // Memory Reference | |||
29244 | SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(), | |||
29245 | MI.memoperands_end()); | |||
29246 | ||||
29247 | MVT PVT = getPointerTy(MF->getDataLayout()); | |||
29248 | assert((PVT == MVT::i64 || PVT == MVT::i32) &&(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!" ) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29249, __PRETTY_FUNCTION__)) | |||
29249 | "Invalid Pointer Size!")(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!" ) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29249, __PRETTY_FUNCTION__)); | |||
29250 | ||||
29251 | const TargetRegisterClass *RC = | |||
29252 | (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; | |||
29253 | unsigned Tmp = MRI.createVirtualRegister(RC); | |||
29254 | // Since FP is only updated here but NOT referenced, it's treated as GPR. | |||
29255 | const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo(); | |||
29256 | unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP; | |||
29257 | unsigned SP = RegInfo->getStackRegister(); | |||
29258 | ||||
29259 | MachineInstrBuilder MIB; | |||
29260 | ||||
29261 | const int64_t LabelOffset = 1 * PVT.getStoreSize(); | |||
29262 | const int64_t SPOffset = 2 * PVT.getStoreSize(); | |||
29263 | ||||
29264 | unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm; | |||
29265 | unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r; | |||
29266 | ||||
29267 | MachineBasicBlock *thisMBB = MBB; | |||
29268 | ||||
29269 | // When CET and shadow stack is enabled, we need to fix the Shadow Stack. | |||
29270 | if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) { | |||
29271 | thisMBB = emitLongJmpShadowStackFix(MI, thisMBB); | |||
29272 | } | |||
29273 | ||||
29274 | // Reload FP | |||
29275 | MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP); | |||
29276 | for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { | |||
29277 | const MachineOperand &MO = MI.getOperand(i); | |||
29278 | if (MO.isReg()) // Don't add the whole operand, we don't want to | |||
29279 | // preserve kill flags. | |||
29280 | MIB.addReg(MO.getReg()); | |||
29281 | else | |||
29282 | MIB.add(MO); | |||
29283 | } | |||
29284 | MIB.setMemRefs(MMOs); | |||
29285 | ||||
29286 | // Reload IP | |||
29287 | MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp); | |||
29288 | for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { | |||
29289 | const MachineOperand &MO = MI.getOperand(i); | |||
29290 | if (i == X86::AddrDisp) | |||
29291 | MIB.addDisp(MO, LabelOffset); | |||
29292 | else if (MO.isReg()) // Don't add the whole operand, we don't want to | |||
29293 | // preserve kill flags. | |||
29294 | MIB.addReg(MO.getReg()); | |||
29295 | else | |||
29296 | MIB.add(MO); | |||
29297 | } | |||
29298 | MIB.setMemRefs(MMOs); | |||
29299 | ||||
29300 | // Reload SP | |||
29301 | MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP); | |||
29302 | for (unsigned i = 0; i < X86::AddrNumOperands; ++i) { | |||
29303 | if (i == X86::AddrDisp) | |||
29304 | MIB.addDisp(MI.getOperand(i), SPOffset); | |||
29305 | else | |||
29306 | MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's | |||
29307 | // the last instruction of the expansion. | |||
29308 | } | |||
29309 | MIB.setMemRefs(MMOs); | |||
29310 | ||||
29311 | // Jump | |||
29312 | BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp); | |||
29313 | ||||
29314 | MI.eraseFromParent(); | |||
29315 | return thisMBB; | |||
29316 | } | |||
29317 | ||||
29318 | void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI, | |||
29319 | MachineBasicBlock *MBB, | |||
29320 | MachineBasicBlock *DispatchBB, | |||
29321 | int FI) const { | |||
29322 | DebugLoc DL = MI.getDebugLoc(); | |||
29323 | MachineFunction *MF = MBB->getParent(); | |||
29324 | MachineRegisterInfo *MRI = &MF->getRegInfo(); | |||
29325 | const X86InstrInfo *TII = Subtarget.getInstrInfo(); | |||
29326 | ||||
29327 | MVT PVT = getPointerTy(MF->getDataLayout()); | |||
29328 | assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!")(((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!" ) ? static_cast<void> (0) : __assert_fail ("(PVT == MVT::i64 || PVT == MVT::i32) && \"Invalid Pointer Size!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29328, __PRETTY_FUNCTION__)); | |||
29329 | ||||
29330 | unsigned Op = 0; | |||
29331 | unsigned VR = 0; | |||
29332 | ||||
29333 | bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) && | |||
29334 | !isPositionIndependent(); | |||
29335 | ||||
29336 | if (UseImmLabel) { | |||
29337 | Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi; | |||
29338 | } else { | |||
29339 | const TargetRegisterClass *TRC = | |||
29340 | (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass; | |||
29341 | VR = MRI->createVirtualRegister(TRC); | |||
29342 | Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr; | |||
29343 | ||||
29344 | if (Subtarget.is64Bit()) | |||
29345 | BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR) | |||
29346 | .addReg(X86::RIP) | |||
29347 | .addImm(1) | |||
29348 | .addReg(0) | |||
29349 | .addMBB(DispatchBB) | |||
29350 | .addReg(0); | |||
29351 | else | |||
29352 | BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR) | |||
29353 | .addReg(0) /* TII->getGlobalBaseReg(MF) */ | |||
29354 | .addImm(1) | |||
29355 | .addReg(0) | |||
29356 | .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference()) | |||
29357 | .addReg(0); | |||
29358 | } | |||
29359 | ||||
29360 | MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op)); | |||
29361 | addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36); | |||
29362 | if (UseImmLabel) | |||
29363 | MIB.addMBB(DispatchBB); | |||
29364 | else | |||
29365 | MIB.addReg(VR); | |||
29366 | } | |||
29367 | ||||
29368 | MachineBasicBlock * | |||
29369 | X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI, | |||
29370 | MachineBasicBlock *BB) const { | |||
29371 | DebugLoc DL = MI.getDebugLoc(); | |||
29372 | MachineFunction *MF = BB->getParent(); | |||
29373 | MachineFrameInfo &MFI = MF->getFrameInfo(); | |||
29374 | MachineRegisterInfo *MRI = &MF->getRegInfo(); | |||
29375 | const X86InstrInfo *TII = Subtarget.getInstrInfo(); | |||
29376 | int FI = MFI.getFunctionContextIndex(); | |||
29377 | ||||
29378 | // Get a mapping of the call site numbers to all of the landing pads they're | |||
29379 | // associated with. | |||
29380 | DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad; | |||
29381 | unsigned MaxCSNum = 0; | |||
29382 | for (auto &MBB : *MF) { | |||
29383 | if (!MBB.isEHPad()) | |||
29384 | continue; | |||
29385 | ||||
29386 | MCSymbol *Sym = nullptr; | |||
29387 | for (const auto &MI : MBB) { | |||
29388 | if (MI.isDebugInstr()) | |||
29389 | continue; | |||
29390 | ||||
29391 | assert(MI.isEHLabel() && "expected EH_LABEL")((MI.isEHLabel() && "expected EH_LABEL") ? static_cast <void> (0) : __assert_fail ("MI.isEHLabel() && \"expected EH_LABEL\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29391, __PRETTY_FUNCTION__)); | |||
29392 | Sym = MI.getOperand(0).getMCSymbol(); | |||
29393 | break; | |||
29394 | } | |||
29395 | ||||
29396 | if (!MF->hasCallSiteLandingPad(Sym)) | |||
29397 | continue; | |||
29398 | ||||
29399 | for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) { | |||
29400 | CallSiteNumToLPad[CSI].push_back(&MBB); | |||
29401 | MaxCSNum = std::max(MaxCSNum, CSI); | |||
29402 | } | |||
29403 | } | |||
29404 | ||||
29405 | // Get an ordered list of the machine basic blocks for the jump table. | |||
29406 | std::vector<MachineBasicBlock *> LPadList; | |||
29407 | SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs; | |||
29408 | LPadList.reserve(CallSiteNumToLPad.size()); | |||
29409 | ||||
29410 | for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) { | |||
29411 | for (auto &LP : CallSiteNumToLPad[CSI]) { | |||
29412 | LPadList.push_back(LP); | |||
29413 | InvokeBBs.insert(LP->pred_begin(), LP->pred_end()); | |||
29414 | } | |||
29415 | } | |||
29416 | ||||
29417 | assert(!LPadList.empty() &&((!LPadList.empty() && "No landing pad destinations for the dispatch jump table!" ) ? static_cast<void> (0) : __assert_fail ("!LPadList.empty() && \"No landing pad destinations for the dispatch jump table!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29418, __PRETTY_FUNCTION__)) | |||
29418 | "No landing pad destinations for the dispatch jump table!")((!LPadList.empty() && "No landing pad destinations for the dispatch jump table!" ) ? static_cast<void> (0) : __assert_fail ("!LPadList.empty() && \"No landing pad destinations for the dispatch jump table!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29418, __PRETTY_FUNCTION__)); | |||
29419 | ||||
29420 | // Create the MBBs for the dispatch code. | |||
29421 | ||||
29422 | // Shove the dispatch's address into the return slot in the function context. | |||
29423 | MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock(); | |||
29424 | DispatchBB->setIsEHPad(true); | |||
29425 | ||||
29426 | MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock(); | |||
29427 | BuildMI(TrapBB, DL, TII->get(X86::TRAP)); | |||
29428 | DispatchBB->addSuccessor(TrapBB); | |||
29429 | ||||
29430 | MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock(); | |||
29431 | DispatchBB->addSuccessor(DispContBB); | |||
29432 | ||||
29433 | // Insert MBBs. | |||
29434 | MF->push_back(DispatchBB); | |||
29435 | MF->push_back(DispContBB); | |||
29436 | MF->push_back(TrapBB); | |||
29437 | ||||
29438 | // Insert code into the entry block that creates and registers the function | |||
29439 | // context. | |||
29440 | SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI); | |||
29441 | ||||
29442 | // Create the jump table and associated information | |||
29443 | unsigned JTE = getJumpTableEncoding(); | |||
29444 | MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE); | |||
29445 | unsigned MJTI = JTI->createJumpTableIndex(LPadList); | |||
29446 | ||||
29447 | const X86RegisterInfo &RI = TII->getRegisterInfo(); | |||
29448 | // Add a register mask with no preserved registers. This results in all | |||
29449 | // registers being marked as clobbered. | |||
29450 | if (RI.hasBasePointer(*MF)) { | |||
29451 | const bool FPIs64Bit = | |||
29452 | Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64(); | |||
29453 | X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>(); | |||
29454 | MFI->setRestoreBasePointer(MF); | |||
29455 | ||||
29456 | unsigned FP = RI.getFrameRegister(*MF); | |||
29457 | unsigned BP = RI.getBaseRegister(); | |||
29458 | unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm; | |||
29459 | addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true, | |||
29460 | MFI->getRestoreBasePointerOffset()) | |||
29461 | .addRegMask(RI.getNoPreservedMask()); | |||
29462 | } else { | |||
29463 | BuildMI(DispatchBB, DL, TII->get(X86::NOOP)) | |||
29464 | .addRegMask(RI.getNoPreservedMask()); | |||
29465 | } | |||
29466 | ||||
29467 | // IReg is used as an index in a memory operand and therefore can't be SP | |||
29468 | unsigned IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass); | |||
29469 | addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI, | |||
29470 | Subtarget.is64Bit() ? 8 : 4); | |||
29471 | BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri)) | |||
29472 | .addReg(IReg) | |||
29473 | .addImm(LPadList.size()); | |||
29474 | BuildMI(DispatchBB, DL, TII->get(X86::JAE_1)).addMBB(TrapBB); | |||
29475 | ||||
29476 | if (Subtarget.is64Bit()) { | |||
29477 | unsigned BReg = MRI->createVirtualRegister(&X86::GR64RegClass); | |||
29478 | unsigned IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass); | |||
29479 | ||||
29480 | // leaq .LJTI0_0(%rip), BReg | |||
29481 | BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg) | |||
29482 | .addReg(X86::RIP) | |||
29483 | .addImm(1) | |||
29484 | .addReg(0) | |||
29485 | .addJumpTableIndex(MJTI) | |||
29486 | .addReg(0); | |||
29487 | // movzx IReg64, IReg | |||
29488 | BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64) | |||
29489 | .addImm(0) | |||
29490 | .addReg(IReg) | |||
29491 | .addImm(X86::sub_32bit); | |||
29492 | ||||
29493 | switch (JTE) { | |||
29494 | case MachineJumpTableInfo::EK_BlockAddress: | |||
29495 | // jmpq *(BReg,IReg64,8) | |||
29496 | BuildMI(DispContBB, DL, TII->get(X86::JMP64m)) | |||
29497 | .addReg(BReg) | |||
29498 | .addImm(8) | |||
29499 | .addReg(IReg64) | |||
29500 | .addImm(0) | |||
29501 | .addReg(0); | |||
29502 | break; | |||
29503 | case MachineJumpTableInfo::EK_LabelDifference32: { | |||
29504 | unsigned OReg = MRI->createVirtualRegister(&X86::GR32RegClass); | |||
29505 | unsigned OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass); | |||
29506 | unsigned TReg = MRI->createVirtualRegister(&X86::GR64RegClass); | |||
29507 | ||||
29508 | // movl (BReg,IReg64,4), OReg | |||
29509 | BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg) | |||
29510 | .addReg(BReg) | |||
29511 | .addImm(4) | |||
29512 | .addReg(IReg64) | |||
29513 | .addImm(0) | |||
29514 | .addReg(0); | |||
29515 | // movsx OReg64, OReg | |||
29516 | BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg); | |||
29517 | // addq BReg, OReg64, TReg | |||
29518 | BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg) | |||
29519 | .addReg(OReg64) | |||
29520 | .addReg(BReg); | |||
29521 | // jmpq *TReg | |||
29522 | BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg); | |||
29523 | break; | |||
29524 | } | |||
29525 | default: | |||
29526 | llvm_unreachable("Unexpected jump table encoding")::llvm::llvm_unreachable_internal("Unexpected jump table encoding" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29526); | |||
29527 | } | |||
29528 | } else { | |||
29529 | // jmpl *.LJTI0_0(,IReg,4) | |||
29530 | BuildMI(DispContBB, DL, TII->get(X86::JMP32m)) | |||
29531 | .addReg(0) | |||
29532 | .addImm(4) | |||
29533 | .addReg(IReg) | |||
29534 | .addJumpTableIndex(MJTI) | |||
29535 | .addReg(0); | |||
29536 | } | |||
29537 | ||||
29538 | // Add the jump table entries as successors to the MBB. | |||
29539 | SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs; | |||
29540 | for (auto &LP : LPadList) | |||
29541 | if (SeenMBBs.insert(LP).second) | |||
29542 | DispContBB->addSuccessor(LP); | |||
29543 | ||||
29544 | // N.B. the order the invoke BBs are processed in doesn't matter here. | |||
29545 | SmallVector<MachineBasicBlock *, 64> MBBLPads; | |||
29546 | const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs(); | |||
29547 | for (MachineBasicBlock *MBB : InvokeBBs) { | |||
29548 | // Remove the landing pad successor from the invoke block and replace it | |||
29549 | // with the new dispatch block. | |||
29550 | // Keep a copy of Successors since it's modified inside the loop. | |||
29551 | SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(), | |||
29552 | MBB->succ_rend()); | |||
29553 | // FIXME: Avoid quadratic complexity. | |||
29554 | for (auto MBBS : Successors) { | |||
29555 | if (MBBS->isEHPad()) { | |||
29556 | MBB->removeSuccessor(MBBS); | |||
29557 | MBBLPads.push_back(MBBS); | |||
29558 | } | |||
29559 | } | |||
29560 | ||||
29561 | MBB->addSuccessor(DispatchBB); | |||
29562 | ||||
29563 | // Find the invoke call and mark all of the callee-saved registers as | |||
29564 | // 'implicit defined' so that they're spilled. This prevents code from | |||
29565 | // moving instructions to before the EH block, where they will never be | |||
29566 | // executed. | |||
29567 | for (auto &II : reverse(*MBB)) { | |||
29568 | if (!II.isCall()) | |||
29569 | continue; | |||
29570 | ||||
29571 | DenseMap<unsigned, bool> DefRegs; | |||
29572 | for (auto &MOp : II.operands()) | |||
29573 | if (MOp.isReg()) | |||
29574 | DefRegs[MOp.getReg()] = true; | |||
29575 | ||||
29576 | MachineInstrBuilder MIB(*MF, &II); | |||
29577 | for (unsigned RI = 0; SavedRegs[RI]; ++RI) { | |||
29578 | unsigned Reg = SavedRegs[RI]; | |||
29579 | if (!DefRegs[Reg]) | |||
29580 | MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead); | |||
29581 | } | |||
29582 | ||||
29583 | break; | |||
29584 | } | |||
29585 | } | |||
29586 | ||||
29587 | // Mark all former landing pads as non-landing pads. The dispatch is the only | |||
29588 | // landing pad now. | |||
29589 | for (auto &LP : MBBLPads) | |||
29590 | LP->setIsEHPad(false); | |||
29591 | ||||
29592 | // The instruction is gone now. | |||
29593 | MI.eraseFromParent(); | |||
29594 | return BB; | |||
29595 | } | |||
29596 | ||||
29597 | MachineBasicBlock * | |||
29598 | X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, | |||
29599 | MachineBasicBlock *BB) const { | |||
29600 | MachineFunction *MF = BB->getParent(); | |||
29601 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
29602 | DebugLoc DL = MI.getDebugLoc(); | |||
29603 | ||||
29604 | switch (MI.getOpcode()) { | |||
29605 | default: llvm_unreachable("Unexpected instr type to insert")::llvm::llvm_unreachable_internal("Unexpected instr type to insert" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29605); | |||
29606 | case X86::TLS_addr32: | |||
29607 | case X86::TLS_addr64: | |||
29608 | case X86::TLS_base_addr32: | |||
29609 | case X86::TLS_base_addr64: | |||
29610 | return EmitLoweredTLSAddr(MI, BB); | |||
29611 | case X86::RETPOLINE_CALL32: | |||
29612 | case X86::RETPOLINE_CALL64: | |||
29613 | case X86::RETPOLINE_TCRETURN32: | |||
29614 | case X86::RETPOLINE_TCRETURN64: | |||
29615 | return EmitLoweredRetpoline(MI, BB); | |||
29616 | case X86::CATCHRET: | |||
29617 | return EmitLoweredCatchRet(MI, BB); | |||
29618 | case X86::CATCHPAD: | |||
29619 | return EmitLoweredCatchPad(MI, BB); | |||
29620 | case X86::SEG_ALLOCA_32: | |||
29621 | case X86::SEG_ALLOCA_64: | |||
29622 | return EmitLoweredSegAlloca(MI, BB); | |||
29623 | case X86::TLSCall_32: | |||
29624 | case X86::TLSCall_64: | |||
29625 | return EmitLoweredTLSCall(MI, BB); | |||
29626 | case X86::CMOV_FR32: | |||
29627 | case X86::CMOV_FR64: | |||
29628 | case X86::CMOV_GR8: | |||
29629 | case X86::CMOV_GR16: | |||
29630 | case X86::CMOV_GR32: | |||
29631 | case X86::CMOV_RFP32: | |||
29632 | case X86::CMOV_RFP64: | |||
29633 | case X86::CMOV_RFP80: | |||
29634 | case X86::CMOV_VR128: | |||
29635 | case X86::CMOV_VR128X: | |||
29636 | case X86::CMOV_VR256: | |||
29637 | case X86::CMOV_VR256X: | |||
29638 | case X86::CMOV_VR512: | |||
29639 | case X86::CMOV_VK2: | |||
29640 | case X86::CMOV_VK4: | |||
29641 | case X86::CMOV_VK8: | |||
29642 | case X86::CMOV_VK16: | |||
29643 | case X86::CMOV_VK32: | |||
29644 | case X86::CMOV_VK64: | |||
29645 | return EmitLoweredSelect(MI, BB); | |||
29646 | ||||
29647 | case X86::RDFLAGS32: | |||
29648 | case X86::RDFLAGS64: { | |||
29649 | unsigned PushF = | |||
29650 | MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64; | |||
29651 | unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r; | |||
29652 | MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF)); | |||
29653 | // Permit reads of the EFLAGS and DF registers without them being defined. | |||
29654 | // This intrinsic exists to read external processor state in flags, such as | |||
29655 | // the trap flag, interrupt flag, and direction flag, none of which are | |||
29656 | // modeled by the backend. | |||
29657 | assert(Push->getOperand(2).getReg() == X86::EFLAGS &&((Push->getOperand(2).getReg() == X86::EFLAGS && "Unexpected register in operand!" ) ? static_cast<void> (0) : __assert_fail ("Push->getOperand(2).getReg() == X86::EFLAGS && \"Unexpected register in operand!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29658, __PRETTY_FUNCTION__)) | |||
29658 | "Unexpected register in operand!")((Push->getOperand(2).getReg() == X86::EFLAGS && "Unexpected register in operand!" ) ? static_cast<void> (0) : __assert_fail ("Push->getOperand(2).getReg() == X86::EFLAGS && \"Unexpected register in operand!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29658, __PRETTY_FUNCTION__)); | |||
29659 | Push->getOperand(2).setIsUndef(); | |||
29660 | assert(Push->getOperand(3).getReg() == X86::DF &&((Push->getOperand(3).getReg() == X86::DF && "Unexpected register in operand!" ) ? static_cast<void> (0) : __assert_fail ("Push->getOperand(3).getReg() == X86::DF && \"Unexpected register in operand!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29661, __PRETTY_FUNCTION__)) | |||
29661 | "Unexpected register in operand!")((Push->getOperand(3).getReg() == X86::DF && "Unexpected register in operand!" ) ? static_cast<void> (0) : __assert_fail ("Push->getOperand(3).getReg() == X86::DF && \"Unexpected register in operand!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29661, __PRETTY_FUNCTION__)); | |||
29662 | Push->getOperand(3).setIsUndef(); | |||
29663 | BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg()); | |||
29664 | ||||
29665 | MI.eraseFromParent(); // The pseudo is gone now. | |||
29666 | return BB; | |||
29667 | } | |||
29668 | ||||
29669 | case X86::WRFLAGS32: | |||
29670 | case X86::WRFLAGS64: { | |||
29671 | unsigned Push = | |||
29672 | MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r; | |||
29673 | unsigned PopF = | |||
29674 | MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64; | |||
29675 | BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg()); | |||
29676 | BuildMI(*BB, MI, DL, TII->get(PopF)); | |||
29677 | ||||
29678 | MI.eraseFromParent(); // The pseudo is gone now. | |||
29679 | return BB; | |||
29680 | } | |||
29681 | ||||
29682 | case X86::RELEASE_FADD32mr: | |||
29683 | case X86::RELEASE_FADD64mr: | |||
29684 | return EmitLoweredAtomicFP(MI, BB); | |||
29685 | ||||
29686 | case X86::FP32_TO_INT16_IN_MEM: | |||
29687 | case X86::FP32_TO_INT32_IN_MEM: | |||
29688 | case X86::FP32_TO_INT64_IN_MEM: | |||
29689 | case X86::FP64_TO_INT16_IN_MEM: | |||
29690 | case X86::FP64_TO_INT32_IN_MEM: | |||
29691 | case X86::FP64_TO_INT64_IN_MEM: | |||
29692 | case X86::FP80_TO_INT16_IN_MEM: | |||
29693 | case X86::FP80_TO_INT32_IN_MEM: | |||
29694 | case X86::FP80_TO_INT64_IN_MEM: { | |||
29695 | // Change the floating point control register to use "round towards zero" | |||
29696 | // mode when truncating to an integer value. | |||
29697 | int CWFrameIdx = MF->getFrameInfo().CreateStackObject(2, 2, false); | |||
29698 | addFrameReference(BuildMI(*BB, MI, DL, | |||
29699 | TII->get(X86::FNSTCW16m)), CWFrameIdx); | |||
29700 | ||||
29701 | // Load the old value of the high byte of the control word... | |||
29702 | unsigned OldCW = | |||
29703 | MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass); | |||
29704 | addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW), | |||
29705 | CWFrameIdx); | |||
29706 | ||||
29707 | // Set the high part to be round to zero... | |||
29708 | addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx) | |||
29709 | .addImm(0xC7F); | |||
29710 | ||||
29711 | // Reload the modified control word now... | |||
29712 | addFrameReference(BuildMI(*BB, MI, DL, | |||
29713 | TII->get(X86::FLDCW16m)), CWFrameIdx); | |||
29714 | ||||
29715 | // Restore the memory image of control word to original value | |||
29716 | addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx) | |||
29717 | .addReg(OldCW); | |||
29718 | ||||
29719 | // Get the X86 opcode to use. | |||
29720 | unsigned Opc; | |||
29721 | switch (MI.getOpcode()) { | |||
29722 | default: llvm_unreachable("illegal opcode!")::llvm::llvm_unreachable_internal("illegal opcode!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29722); | |||
29723 | case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break; | |||
29724 | case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break; | |||
29725 | case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break; | |||
29726 | case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break; | |||
29727 | case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break; | |||
29728 | case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break; | |||
29729 | case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break; | |||
29730 | case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break; | |||
29731 | case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break; | |||
29732 | } | |||
29733 | ||||
29734 | X86AddressMode AM = getAddressFromInstr(&MI, 0); | |||
29735 | addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM) | |||
29736 | .addReg(MI.getOperand(X86::AddrNumOperands).getReg()); | |||
29737 | ||||
29738 | // Reload the original control word now. | |||
29739 | addFrameReference(BuildMI(*BB, MI, DL, | |||
29740 | TII->get(X86::FLDCW16m)), CWFrameIdx); | |||
29741 | ||||
29742 | MI.eraseFromParent(); // The pseudo instruction is gone now. | |||
29743 | return BB; | |||
29744 | } | |||
29745 | // Thread synchronization. | |||
29746 | case X86::MONITOR: | |||
29747 | return emitMonitor(MI, BB, Subtarget, X86::MONITORrrr); | |||
29748 | case X86::MONITORX: | |||
29749 | return emitMonitor(MI, BB, Subtarget, X86::MONITORXrrr); | |||
29750 | ||||
29751 | // Cache line zero | |||
29752 | case X86::CLZERO: | |||
29753 | return emitClzero(&MI, BB, Subtarget); | |||
29754 | ||||
29755 | // PKU feature | |||
29756 | case X86::WRPKRU: | |||
29757 | return emitWRPKRU(MI, BB, Subtarget); | |||
29758 | case X86::RDPKRU: | |||
29759 | return emitRDPKRU(MI, BB, Subtarget); | |||
29760 | // xbegin | |||
29761 | case X86::XBEGIN: | |||
29762 | return emitXBegin(MI, BB, Subtarget.getInstrInfo()); | |||
29763 | ||||
29764 | case X86::VASTART_SAVE_XMM_REGS: | |||
29765 | return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB); | |||
29766 | ||||
29767 | case X86::VAARG_64: | |||
29768 | return EmitVAARG64WithCustomInserter(MI, BB); | |||
29769 | ||||
29770 | case X86::EH_SjLj_SetJmp32: | |||
29771 | case X86::EH_SjLj_SetJmp64: | |||
29772 | return emitEHSjLjSetJmp(MI, BB); | |||
29773 | ||||
29774 | case X86::EH_SjLj_LongJmp32: | |||
29775 | case X86::EH_SjLj_LongJmp64: | |||
29776 | return emitEHSjLjLongJmp(MI, BB); | |||
29777 | ||||
29778 | case X86::Int_eh_sjlj_setup_dispatch: | |||
29779 | return EmitSjLjDispatchBlock(MI, BB); | |||
29780 | ||||
29781 | case TargetOpcode::STATEPOINT: | |||
29782 | // As an implementation detail, STATEPOINT shares the STACKMAP format at | |||
29783 | // this point in the process. We diverge later. | |||
29784 | return emitPatchPoint(MI, BB); | |||
29785 | ||||
29786 | case TargetOpcode::STACKMAP: | |||
29787 | case TargetOpcode::PATCHPOINT: | |||
29788 | return emitPatchPoint(MI, BB); | |||
29789 | ||||
29790 | case TargetOpcode::PATCHABLE_EVENT_CALL: | |||
29791 | return emitXRayCustomEvent(MI, BB); | |||
29792 | ||||
29793 | case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL: | |||
29794 | return emitXRayTypedEvent(MI, BB); | |||
29795 | ||||
29796 | case X86::LCMPXCHG8B: { | |||
29797 | const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
29798 | // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B | |||
29799 | // requires a memory operand. If it happens that current architecture is | |||
29800 | // i686 and for current function we need a base pointer | |||
29801 | // - which is ESI for i686 - register allocator would not be able to | |||
29802 | // allocate registers for an address in form of X(%reg, %reg, Y) | |||
29803 | // - there never would be enough unreserved registers during regalloc | |||
29804 | // (without the need for base ptr the only option would be X(%edi, %esi, Y). | |||
29805 | // We are giving a hand to register allocator by precomputing the address in | |||
29806 | // a new vreg using LEA. | |||
29807 | ||||
29808 | // If it is not i686 or there is no base pointer - nothing to do here. | |||
29809 | if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF)) | |||
29810 | return BB; | |||
29811 | ||||
29812 | // Even though this code does not necessarily needs the base pointer to | |||
29813 | // be ESI, we check for that. The reason: if this assert fails, there are | |||
29814 | // some changes happened in the compiler base pointer handling, which most | |||
29815 | // probably have to be addressed somehow here. | |||
29816 | assert(TRI->getBaseRegister() == X86::ESI &&((TRI->getBaseRegister() == X86::ESI && "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a " "base pointer in mind") ? static_cast<void> (0) : __assert_fail ("TRI->getBaseRegister() == X86::ESI && \"LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a \" \"base pointer in mind\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29818, __PRETTY_FUNCTION__)) | |||
29817 | "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "((TRI->getBaseRegister() == X86::ESI && "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a " "base pointer in mind") ? static_cast<void> (0) : __assert_fail ("TRI->getBaseRegister() == X86::ESI && \"LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a \" \"base pointer in mind\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29818, __PRETTY_FUNCTION__)) | |||
29818 | "base pointer in mind")((TRI->getBaseRegister() == X86::ESI && "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a " "base pointer in mind") ? static_cast<void> (0) : __assert_fail ("TRI->getBaseRegister() == X86::ESI && \"LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a \" \"base pointer in mind\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29818, __PRETTY_FUNCTION__)); | |||
29819 | ||||
29820 | MachineRegisterInfo &MRI = MF->getRegInfo(); | |||
29821 | MVT SPTy = getPointerTy(MF->getDataLayout()); | |||
29822 | const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy); | |||
29823 | unsigned computedAddrVReg = MRI.createVirtualRegister(AddrRegClass); | |||
29824 | ||||
29825 | X86AddressMode AM = getAddressFromInstr(&MI, 0); | |||
29826 | // Regalloc does not need any help when the memory operand of CMPXCHG8B | |||
29827 | // does not use index register. | |||
29828 | if (AM.IndexReg == X86::NoRegister) | |||
29829 | return BB; | |||
29830 | ||||
29831 | // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its | |||
29832 | // four operand definitions that are E[ABCD] registers. We skip them and | |||
29833 | // then insert the LEA. | |||
29834 | MachineBasicBlock::iterator MBBI(MI); | |||
29835 | while (MBBI->definesRegister(X86::EAX) || MBBI->definesRegister(X86::EBX) || | |||
29836 | MBBI->definesRegister(X86::ECX) || MBBI->definesRegister(X86::EDX)) | |||
29837 | --MBBI; | |||
29838 | addFullAddress( | |||
29839 | BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM); | |||
29840 | ||||
29841 | setDirectAddressInInstr(&MI, 0, computedAddrVReg); | |||
29842 | ||||
29843 | return BB; | |||
29844 | } | |||
29845 | case X86::LCMPXCHG16B: | |||
29846 | return BB; | |||
29847 | case X86::LCMPXCHG8B_SAVE_EBX: | |||
29848 | case X86::LCMPXCHG16B_SAVE_RBX: { | |||
29849 | unsigned BasePtr = | |||
29850 | MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX; | |||
29851 | if (!BB->isLiveIn(BasePtr)) | |||
29852 | BB->addLiveIn(BasePtr); | |||
29853 | return BB; | |||
29854 | } | |||
29855 | } | |||
29856 | } | |||
29857 | ||||
29858 | //===----------------------------------------------------------------------===// | |||
29859 | // X86 Optimization Hooks | |||
29860 | //===----------------------------------------------------------------------===// | |||
29861 | ||||
29862 | bool | |||
29863 | X86TargetLowering::targetShrinkDemandedConstant(SDValue Op, | |||
29864 | const APInt &Demanded, | |||
29865 | TargetLoweringOpt &TLO) const { | |||
29866 | // Only optimize Ands to prevent shrinking a constant that could be | |||
29867 | // matched by movzx. | |||
29868 | if (Op.getOpcode() != ISD::AND) | |||
29869 | return false; | |||
29870 | ||||
29871 | EVT VT = Op.getValueType(); | |||
29872 | ||||
29873 | // Ignore vectors. | |||
29874 | if (VT.isVector()) | |||
29875 | return false; | |||
29876 | ||||
29877 | unsigned Size = VT.getSizeInBits(); | |||
29878 | ||||
29879 | // Make sure the RHS really is a constant. | |||
29880 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); | |||
29881 | if (!C) | |||
29882 | return false; | |||
29883 | ||||
29884 | const APInt &Mask = C->getAPIntValue(); | |||
29885 | ||||
29886 | // Clear all non-demanded bits initially. | |||
29887 | APInt ShrunkMask = Mask & Demanded; | |||
29888 | ||||
29889 | // Find the width of the shrunk mask. | |||
29890 | unsigned Width = ShrunkMask.getActiveBits(); | |||
29891 | ||||
29892 | // If the mask is all 0s there's nothing to do here. | |||
29893 | if (Width == 0) | |||
29894 | return false; | |||
29895 | ||||
29896 | // Find the next power of 2 width, rounding up to a byte. | |||
29897 | Width = PowerOf2Ceil(std::max(Width, 8U)); | |||
29898 | // Truncate the width to size to handle illegal types. | |||
29899 | Width = std::min(Width, Size); | |||
29900 | ||||
29901 | // Calculate a possible zero extend mask for this constant. | |||
29902 | APInt ZeroExtendMask = APInt::getLowBitsSet(Size, Width); | |||
29903 | ||||
29904 | // If we aren't changing the mask, just return true to keep it and prevent | |||
29905 | // the caller from optimizing. | |||
29906 | if (ZeroExtendMask == Mask) | |||
29907 | return true; | |||
29908 | ||||
29909 | // Make sure the new mask can be represented by a combination of mask bits | |||
29910 | // and non-demanded bits. | |||
29911 | if (!ZeroExtendMask.isSubsetOf(Mask | ~Demanded)) | |||
29912 | return false; | |||
29913 | ||||
29914 | // Replace the constant with the zero extend mask. | |||
29915 | SDLoc DL(Op); | |||
29916 | SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT); | |||
29917 | SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); | |||
29918 | return TLO.CombineTo(Op, NewOp); | |||
29919 | } | |||
29920 | ||||
29921 | void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, | |||
29922 | KnownBits &Known, | |||
29923 | const APInt &DemandedElts, | |||
29924 | const SelectionDAG &DAG, | |||
29925 | unsigned Depth) const { | |||
29926 | unsigned BitWidth = Known.getBitWidth(); | |||
29927 | unsigned Opc = Op.getOpcode(); | |||
29928 | EVT VT = Op.getValueType(); | |||
29929 | assert((Opc >= ISD::BUILTIN_OP_END ||(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29934, __PRETTY_FUNCTION__)) | |||
29930 | Opc == ISD::INTRINSIC_WO_CHAIN ||(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29934, __PRETTY_FUNCTION__)) | |||
29931 | Opc == ISD::INTRINSIC_W_CHAIN ||(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29934, __PRETTY_FUNCTION__)) | |||
29932 | Opc == ISD::INTRINSIC_VOID) &&(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29934, __PRETTY_FUNCTION__)) | |||
29933 | "Should use MaskedValueIsZero if you don't know whether Op"(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29934, __PRETTY_FUNCTION__)) | |||
29934 | " is a target node!")(((Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID ) && "Should use MaskedValueIsZero if you don't know whether Op" " is a target node!") ? static_cast<void> (0) : __assert_fail ("(Opc >= ISD::BUILTIN_OP_END || Opc == ISD::INTRINSIC_WO_CHAIN || Opc == ISD::INTRINSIC_W_CHAIN || Opc == ISD::INTRINSIC_VOID) && \"Should use MaskedValueIsZero if you don't know whether Op\" \" is a target node!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 29934, __PRETTY_FUNCTION__)); | |||
29935 | ||||
29936 | Known.resetAll(); | |||
29937 | switch (Opc) { | |||
29938 | default: break; | |||
29939 | case X86ISD::SETCC: | |||
29940 | Known.Zero.setBitsFrom(1); | |||
29941 | break; | |||
29942 | case X86ISD::MOVMSK: { | |||
29943 | unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements(); | |||
29944 | Known.Zero.setBitsFrom(NumLoBits); | |||
29945 | break; | |||
29946 | } | |||
29947 | case X86ISD::PEXTRB: | |||
29948 | case X86ISD::PEXTRW: { | |||
29949 | SDValue Src = Op.getOperand(0); | |||
29950 | EVT SrcVT = Src.getValueType(); | |||
29951 | APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(), | |||
29952 | Op.getConstantOperandVal(1)); | |||
29953 | Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1); | |||
29954 | Known = Known.zextOrTrunc(BitWidth); | |||
29955 | Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits()); | |||
29956 | break; | |||
29957 | } | |||
29958 | case X86ISD::VSHLI: | |||
29959 | case X86ISD::VSRLI: { | |||
29960 | if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { | |||
29961 | if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) { | |||
29962 | Known.setAllZero(); | |||
29963 | break; | |||
29964 | } | |||
29965 | ||||
29966 | Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); | |||
29967 | unsigned ShAmt = ShiftImm->getZExtValue(); | |||
29968 | if (Opc == X86ISD::VSHLI) { | |||
29969 | Known.Zero <<= ShAmt; | |||
29970 | Known.One <<= ShAmt; | |||
29971 | // Low bits are known zero. | |||
29972 | Known.Zero.setLowBits(ShAmt); | |||
29973 | } else { | |||
29974 | Known.Zero.lshrInPlace(ShAmt); | |||
29975 | Known.One.lshrInPlace(ShAmt); | |||
29976 | // High bits are known zero. | |||
29977 | Known.Zero.setHighBits(ShAmt); | |||
29978 | } | |||
29979 | } | |||
29980 | break; | |||
29981 | } | |||
29982 | case X86ISD::PACKUS: { | |||
29983 | // PACKUS is just a truncation if the upper half is zero. | |||
29984 | APInt DemandedLHS, DemandedRHS; | |||
29985 | getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS); | |||
29986 | ||||
29987 | Known.One = APInt::getAllOnesValue(BitWidth * 2); | |||
29988 | Known.Zero = APInt::getAllOnesValue(BitWidth * 2); | |||
29989 | ||||
29990 | KnownBits Known2; | |||
29991 | if (!!DemandedLHS) { | |||
29992 | Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1); | |||
29993 | Known.One &= Known2.One; | |||
29994 | Known.Zero &= Known2.Zero; | |||
29995 | } | |||
29996 | if (!!DemandedRHS) { | |||
29997 | Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1); | |||
29998 | Known.One &= Known2.One; | |||
29999 | Known.Zero &= Known2.Zero; | |||
30000 | } | |||
30001 | ||||
30002 | if (Known.countMinLeadingZeros() < BitWidth) | |||
30003 | Known.resetAll(); | |||
30004 | Known = Known.trunc(BitWidth); | |||
30005 | break; | |||
30006 | } | |||
30007 | case X86ISD::CMOV: { | |||
30008 | Known = DAG.computeKnownBits(Op.getOperand(1), Depth+1); | |||
30009 | // If we don't know any bits, early out. | |||
30010 | if (Known.isUnknown()) | |||
30011 | break; | |||
30012 | KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth+1); | |||
30013 | ||||
30014 | // Only known if known in both the LHS and RHS. | |||
30015 | Known.One &= Known2.One; | |||
30016 | Known.Zero &= Known2.Zero; | |||
30017 | break; | |||
30018 | } | |||
30019 | } | |||
30020 | ||||
30021 | // Handle target shuffles. | |||
30022 | // TODO - use resolveTargetShuffleInputs once we can limit recursive depth. | |||
30023 | if (isTargetShuffle(Opc)) { | |||
30024 | bool IsUnary; | |||
30025 | SmallVector<int, 64> Mask; | |||
30026 | SmallVector<SDValue, 2> Ops; | |||
30027 | if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask, | |||
30028 | IsUnary)) { | |||
30029 | unsigned NumOps = Ops.size(); | |||
30030 | unsigned NumElts = VT.getVectorNumElements(); | |||
30031 | if (Mask.size() == NumElts) { | |||
30032 | SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0)); | |||
30033 | Known.Zero.setAllBits(); Known.One.setAllBits(); | |||
30034 | for (unsigned i = 0; i != NumElts; ++i) { | |||
30035 | if (!DemandedElts[i]) | |||
30036 | continue; | |||
30037 | int M = Mask[i]; | |||
30038 | if (M == SM_SentinelUndef) { | |||
30039 | // For UNDEF elements, we don't know anything about the common state | |||
30040 | // of the shuffle result. | |||
30041 | Known.resetAll(); | |||
30042 | break; | |||
30043 | } else if (M == SM_SentinelZero) { | |||
30044 | Known.One.clearAllBits(); | |||
30045 | continue; | |||
30046 | } | |||
30047 | assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&((0 <= M && (unsigned)M < (NumOps * NumElts) && "Shuffle index out of range") ? static_cast<void> (0) : __assert_fail ("0 <= M && (unsigned)M < (NumOps * NumElts) && \"Shuffle index out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30048, __PRETTY_FUNCTION__)) | |||
30048 | "Shuffle index out of range")((0 <= M && (unsigned)M < (NumOps * NumElts) && "Shuffle index out of range") ? static_cast<void> (0) : __assert_fail ("0 <= M && (unsigned)M < (NumOps * NumElts) && \"Shuffle index out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30048, __PRETTY_FUNCTION__)); | |||
30049 | ||||
30050 | unsigned OpIdx = (unsigned)M / NumElts; | |||
30051 | unsigned EltIdx = (unsigned)M % NumElts; | |||
30052 | if (Ops[OpIdx].getValueType() != VT) { | |||
30053 | // TODO - handle target shuffle ops with different value types. | |||
30054 | Known.resetAll(); | |||
30055 | break; | |||
30056 | } | |||
30057 | DemandedOps[OpIdx].setBit(EltIdx); | |||
30058 | } | |||
30059 | // Known bits are the values that are shared by every demanded element. | |||
30060 | for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) { | |||
30061 | if (!DemandedOps[i]) | |||
30062 | continue; | |||
30063 | KnownBits Known2 = | |||
30064 | DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1); | |||
30065 | Known.One &= Known2.One; | |||
30066 | Known.Zero &= Known2.Zero; | |||
30067 | } | |||
30068 | } | |||
30069 | } | |||
30070 | } | |||
30071 | } | |||
30072 | ||||
30073 | unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode( | |||
30074 | SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, | |||
30075 | unsigned Depth) const { | |||
30076 | unsigned VTBits = Op.getScalarValueSizeInBits(); | |||
30077 | unsigned Opcode = Op.getOpcode(); | |||
30078 | switch (Opcode) { | |||
30079 | case X86ISD::SETCC_CARRY: | |||
30080 | // SETCC_CARRY sets the dest to ~0 for true or 0 for false. | |||
30081 | return VTBits; | |||
30082 | ||||
30083 | case X86ISD::VTRUNC: { | |||
30084 | // TODO: Add DemandedElts support. | |||
30085 | SDValue Src = Op.getOperand(0); | |||
30086 | unsigned NumSrcBits = Src.getScalarValueSizeInBits(); | |||
30087 | assert(VTBits < NumSrcBits && "Illegal truncation input type")((VTBits < NumSrcBits && "Illegal truncation input type" ) ? static_cast<void> (0) : __assert_fail ("VTBits < NumSrcBits && \"Illegal truncation input type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30087, __PRETTY_FUNCTION__)); | |||
30088 | unsigned Tmp = DAG.ComputeNumSignBits(Src, Depth + 1); | |||
30089 | if (Tmp > (NumSrcBits - VTBits)) | |||
30090 | return Tmp - (NumSrcBits - VTBits); | |||
30091 | return 1; | |||
30092 | } | |||
30093 | ||||
30094 | case X86ISD::PACKSS: { | |||
30095 | // PACKSS is just a truncation if the sign bits extend to the packed size. | |||
30096 | APInt DemandedLHS, DemandedRHS; | |||
30097 | getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS, | |||
30098 | DemandedRHS); | |||
30099 | ||||
30100 | unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits(); | |||
30101 | unsigned Tmp0 = SrcBits, Tmp1 = SrcBits; | |||
30102 | if (!!DemandedLHS) | |||
30103 | Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1); | |||
30104 | if (!!DemandedRHS) | |||
30105 | Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1); | |||
30106 | unsigned Tmp = std::min(Tmp0, Tmp1); | |||
30107 | if (Tmp > (SrcBits - VTBits)) | |||
30108 | return Tmp - (SrcBits - VTBits); | |||
30109 | return 1; | |||
30110 | } | |||
30111 | ||||
30112 | case X86ISD::VSHLI: { | |||
30113 | SDValue Src = Op.getOperand(0); | |||
30114 | APInt ShiftVal = cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue(); | |||
30115 | if (ShiftVal.uge(VTBits)) | |||
30116 | return VTBits; // Shifted all bits out --> zero. | |||
30117 | unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1); | |||
30118 | if (ShiftVal.uge(Tmp)) | |||
30119 | return 1; // Shifted all sign bits out --> unknown. | |||
30120 | return Tmp - ShiftVal.getZExtValue(); | |||
30121 | } | |||
30122 | ||||
30123 | case X86ISD::VSRAI: { | |||
30124 | SDValue Src = Op.getOperand(0); | |||
30125 | APInt ShiftVal = cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue(); | |||
30126 | if (ShiftVal.uge(VTBits - 1)) | |||
30127 | return VTBits; // Sign splat. | |||
30128 | unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1); | |||
30129 | ShiftVal += Tmp; | |||
30130 | return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue(); | |||
30131 | } | |||
30132 | ||||
30133 | case X86ISD::PCMPGT: | |||
30134 | case X86ISD::PCMPEQ: | |||
30135 | case X86ISD::CMPP: | |||
30136 | case X86ISD::VPCOM: | |||
30137 | case X86ISD::VPCOMU: | |||
30138 | // Vector compares return zero/all-bits result values. | |||
30139 | return VTBits; | |||
30140 | ||||
30141 | case X86ISD::CMOV: { | |||
30142 | unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1); | |||
30143 | if (Tmp0 == 1) return 1; // Early out. | |||
30144 | unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1); | |||
30145 | return std::min(Tmp0, Tmp1); | |||
30146 | } | |||
30147 | } | |||
30148 | ||||
30149 | // Fallback case. | |||
30150 | return 1; | |||
30151 | } | |||
30152 | ||||
30153 | SDValue X86TargetLowering::unwrapAddress(SDValue N) const { | |||
30154 | if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP) | |||
30155 | return N->getOperand(0); | |||
30156 | return N; | |||
30157 | } | |||
30158 | ||||
30159 | // Attempt to match a combined shuffle mask against supported unary shuffle | |||
30160 | // instructions. | |||
30161 | // TODO: Investigate sharing more of this with shuffle lowering. | |||
30162 | static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, | |||
30163 | bool AllowFloatDomain, bool AllowIntDomain, | |||
30164 | SDValue &V1, const SDLoc &DL, | |||
30165 | SelectionDAG &DAG, | |||
30166 | const X86Subtarget &Subtarget, | |||
30167 | unsigned &Shuffle, MVT &SrcVT, MVT &DstVT) { | |||
30168 | unsigned NumMaskElts = Mask.size(); | |||
30169 | unsigned MaskEltSize = MaskVT.getScalarSizeInBits(); | |||
30170 | ||||
30171 | // Match against a VZEXT_MOVL vXi32 zero-extending instruction. | |||
30172 | if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) && | |||
30173 | isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) { | |||
30174 | Shuffle = X86ISD::VZEXT_MOVL; | |||
30175 | SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT; | |||
30176 | return true; | |||
30177 | } | |||
30178 | ||||
30179 | // Match against a ZERO_EXTEND_VECTOR_INREG/VZEXT instruction. | |||
30180 | // TODO: Add 512-bit vector support (split AVX512F and AVX512BW). | |||
30181 | if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) || | |||
30182 | (MaskVT.is256BitVector() && Subtarget.hasInt256()))) { | |||
30183 | unsigned MaxScale = 64 / MaskEltSize; | |||
30184 | for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) { | |||
30185 | bool Match = true; | |||
30186 | unsigned NumDstElts = NumMaskElts / Scale; | |||
30187 | for (unsigned i = 0; i != NumDstElts && Match; ++i) { | |||
30188 | Match &= isUndefOrEqual(Mask[i * Scale], (int)i); | |||
30189 | Match &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1); | |||
30190 | } | |||
30191 | if (Match) { | |||
30192 | unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize); | |||
30193 | MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() : | |||
30194 | MVT::getIntegerVT(MaskEltSize); | |||
30195 | SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize); | |||
30196 | ||||
30197 | if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits()) | |||
30198 | V1 = extractSubVector(V1, 0, DAG, DL, SrcSize); | |||
30199 | ||||
30200 | if (SrcVT.getVectorNumElements() == NumDstElts) | |||
30201 | Shuffle = unsigned(ISD::ZERO_EXTEND); | |||
30202 | else | |||
30203 | Shuffle = unsigned(ISD::ZERO_EXTEND_VECTOR_INREG); | |||
30204 | ||||
30205 | DstVT = MVT::getIntegerVT(Scale * MaskEltSize); | |||
30206 | DstVT = MVT::getVectorVT(DstVT, NumDstElts); | |||
30207 | return true; | |||
30208 | } | |||
30209 | } | |||
30210 | } | |||
30211 | ||||
30212 | // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS). | |||
30213 | if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) && | |||
30214 | isUndefOrEqual(Mask[0], 0) && | |||
30215 | isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) { | |||
30216 | Shuffle = X86ISD::VZEXT_MOVL; | |||
30217 | SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT; | |||
30218 | return true; | |||
30219 | } | |||
30220 | ||||
30221 | // Check if we have SSE3 which will let us use MOVDDUP etc. The | |||
30222 | // instructions are no slower than UNPCKLPD but has the option to | |||
30223 | // fold the input operand into even an unaligned memory load. | |||
30224 | if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) { | |||
30225 | if (!Subtarget.hasAVX2() && isTargetShuffleEquivalent(Mask, {0, 0})) { | |||
30226 | Shuffle = X86ISD::MOVDDUP; | |||
30227 | SrcVT = DstVT = MVT::v2f64; | |||
30228 | return true; | |||
30229 | } | |||
30230 | if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) { | |||
30231 | Shuffle = X86ISD::MOVSLDUP; | |||
30232 | SrcVT = DstVT = MVT::v4f32; | |||
30233 | return true; | |||
30234 | } | |||
30235 | if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) { | |||
30236 | Shuffle = X86ISD::MOVSHDUP; | |||
30237 | SrcVT = DstVT = MVT::v4f32; | |||
30238 | return true; | |||
30239 | } | |||
30240 | } | |||
30241 | ||||
30242 | if (MaskVT.is256BitVector() && AllowFloatDomain) { | |||
30243 | assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles")((Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX() && \"AVX required for 256-bit vector shuffles\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30243, __PRETTY_FUNCTION__)); | |||
30244 | if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) { | |||
30245 | Shuffle = X86ISD::MOVDDUP; | |||
30246 | SrcVT = DstVT = MVT::v4f64; | |||
30247 | return true; | |||
30248 | } | |||
30249 | if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) { | |||
30250 | Shuffle = X86ISD::MOVSLDUP; | |||
30251 | SrcVT = DstVT = MVT::v8f32; | |||
30252 | return true; | |||
30253 | } | |||
30254 | if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) { | |||
30255 | Shuffle = X86ISD::MOVSHDUP; | |||
30256 | SrcVT = DstVT = MVT::v8f32; | |||
30257 | return true; | |||
30258 | } | |||
30259 | } | |||
30260 | ||||
30261 | if (MaskVT.is512BitVector() && AllowFloatDomain) { | |||
30262 | assert(Subtarget.hasAVX512() &&((Subtarget.hasAVX512() && "AVX512 required for 512-bit vector shuffles" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"AVX512 required for 512-bit vector shuffles\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30263, __PRETTY_FUNCTION__)) | |||
30263 | "AVX512 required for 512-bit vector shuffles")((Subtarget.hasAVX512() && "AVX512 required for 512-bit vector shuffles" ) ? static_cast<void> (0) : __assert_fail ("Subtarget.hasAVX512() && \"AVX512 required for 512-bit vector shuffles\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30263, __PRETTY_FUNCTION__)); | |||
30264 | if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) { | |||
30265 | Shuffle = X86ISD::MOVDDUP; | |||
30266 | SrcVT = DstVT = MVT::v8f64; | |||
30267 | return true; | |||
30268 | } | |||
30269 | if (isTargetShuffleEquivalent( | |||
30270 | Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) { | |||
30271 | Shuffle = X86ISD::MOVSLDUP; | |||
30272 | SrcVT = DstVT = MVT::v16f32; | |||
30273 | return true; | |||
30274 | } | |||
30275 | if (isTargetShuffleEquivalent( | |||
30276 | Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) { | |||
30277 | Shuffle = X86ISD::MOVSHDUP; | |||
30278 | SrcVT = DstVT = MVT::v16f32; | |||
30279 | return true; | |||
30280 | } | |||
30281 | } | |||
30282 | ||||
30283 | // Attempt to match against broadcast-from-vector. | |||
30284 | if (Subtarget.hasAVX2()) { | |||
30285 | SmallVector<int, 64> BroadcastMask(NumMaskElts, 0); | |||
30286 | if (isTargetShuffleEquivalent(Mask, BroadcastMask)) { | |||
30287 | SrcVT = DstVT = MaskVT; | |||
30288 | Shuffle = X86ISD::VBROADCAST; | |||
30289 | return true; | |||
30290 | } | |||
30291 | } | |||
30292 | ||||
30293 | return false; | |||
30294 | } | |||
30295 | ||||
30296 | // Attempt to match a combined shuffle mask against supported unary immediate | |||
30297 | // permute instructions. | |||
30298 | // TODO: Investigate sharing more of this with shuffle lowering. | |||
30299 | static bool matchUnaryPermuteVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, | |||
30300 | const APInt &Zeroable, | |||
30301 | bool AllowFloatDomain, | |||
30302 | bool AllowIntDomain, | |||
30303 | const X86Subtarget &Subtarget, | |||
30304 | unsigned &Shuffle, MVT &ShuffleVT, | |||
30305 | unsigned &PermuteImm) { | |||
30306 | unsigned NumMaskElts = Mask.size(); | |||
30307 | unsigned InputSizeInBits = MaskVT.getSizeInBits(); | |||
30308 | unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts; | |||
30309 | MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits); | |||
30310 | ||||
30311 | bool ContainsZeros = | |||
30312 | llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; }); | |||
30313 | ||||
30314 | // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns. | |||
30315 | if (!ContainsZeros && MaskScalarSizeInBits == 64) { | |||
30316 | // Check for lane crossing permutes. | |||
30317 | if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) { | |||
30318 | // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+). | |||
30319 | if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) { | |||
30320 | Shuffle = X86ISD::VPERMI; | |||
30321 | ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64); | |||
30322 | PermuteImm = getV4X86ShuffleImm(Mask); | |||
30323 | return true; | |||
30324 | } | |||
30325 | if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) { | |||
30326 | SmallVector<int, 4> RepeatedMask; | |||
30327 | if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) { | |||
30328 | Shuffle = X86ISD::VPERMI; | |||
30329 | ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64); | |||
30330 | PermuteImm = getV4X86ShuffleImm(RepeatedMask); | |||
30331 | return true; | |||
30332 | } | |||
30333 | } | |||
30334 | } else if (AllowFloatDomain && Subtarget.hasAVX()) { | |||
30335 | // VPERMILPD can permute with a non-repeating shuffle. | |||
30336 | Shuffle = X86ISD::VPERMILPI; | |||
30337 | ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size()); | |||
30338 | PermuteImm = 0; | |||
30339 | for (int i = 0, e = Mask.size(); i != e; ++i) { | |||
30340 | int M = Mask[i]; | |||
30341 | if (M == SM_SentinelUndef) | |||
30342 | continue; | |||
30343 | assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index")((((M / 2) == (i / 2)) && "Out of range shuffle mask index" ) ? static_cast<void> (0) : __assert_fail ("((M / 2) == (i / 2)) && \"Out of range shuffle mask index\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30343, __PRETTY_FUNCTION__)); | |||
30344 | PermuteImm |= (M & 1) << i; | |||
30345 | } | |||
30346 | return true; | |||
30347 | } | |||
30348 | } | |||
30349 | ||||
30350 | // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns. | |||
30351 | // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we | |||
30352 | // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here). | |||
30353 | if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) && | |||
30354 | !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) { | |||
30355 | SmallVector<int, 4> RepeatedMask; | |||
30356 | if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) { | |||
30357 | // Narrow the repeated mask to create 32-bit element permutes. | |||
30358 | SmallVector<int, 4> WordMask = RepeatedMask; | |||
30359 | if (MaskScalarSizeInBits == 64) | |||
30360 | scaleShuffleMask<int>(2, RepeatedMask, WordMask); | |||
30361 | ||||
30362 | Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI); | |||
30363 | ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32); | |||
30364 | ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32); | |||
30365 | PermuteImm = getV4X86ShuffleImm(WordMask); | |||
30366 | return true; | |||
30367 | } | |||
30368 | } | |||
30369 | ||||
30370 | // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns. | |||
30371 | if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) { | |||
30372 | SmallVector<int, 4> RepeatedMask; | |||
30373 | if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) { | |||
30374 | ArrayRef<int> LoMask(Mask.data() + 0, 4); | |||
30375 | ArrayRef<int> HiMask(Mask.data() + 4, 4); | |||
30376 | ||||
30377 | // PSHUFLW: permute lower 4 elements only. | |||
30378 | if (isUndefOrInRange(LoMask, 0, 4) && | |||
30379 | isSequentialOrUndefInRange(HiMask, 0, 4, 4)) { | |||
30380 | Shuffle = X86ISD::PSHUFLW; | |||
30381 | ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16); | |||
30382 | PermuteImm = getV4X86ShuffleImm(LoMask); | |||
30383 | return true; | |||
30384 | } | |||
30385 | ||||
30386 | // PSHUFHW: permute upper 4 elements only. | |||
30387 | if (isUndefOrInRange(HiMask, 4, 8) && | |||
30388 | isSequentialOrUndefInRange(LoMask, 0, 4, 0)) { | |||
30389 | // Offset the HiMask so that we can create the shuffle immediate. | |||
30390 | int OffsetHiMask[4]; | |||
30391 | for (int i = 0; i != 4; ++i) | |||
30392 | OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4); | |||
30393 | ||||
30394 | Shuffle = X86ISD::PSHUFHW; | |||
30395 | ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16); | |||
30396 | PermuteImm = getV4X86ShuffleImm(OffsetHiMask); | |||
30397 | return true; | |||
30398 | } | |||
30399 | } | |||
30400 | } | |||
30401 | ||||
30402 | // Attempt to match against byte/bit shifts. | |||
30403 | // FIXME: Add 512-bit support. | |||
30404 | if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) || | |||
30405 | (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) { | |||
30406 | int ShiftAmt = matchVectorShuffleAsShift(ShuffleVT, Shuffle, | |||
30407 | MaskScalarSizeInBits, Mask, | |||
30408 | 0, Zeroable, Subtarget); | |||
30409 | if (0 < ShiftAmt) { | |||
30410 | PermuteImm = (unsigned)ShiftAmt; | |||
30411 | return true; | |||
30412 | } | |||
30413 | } | |||
30414 | ||||
30415 | return false; | |||
30416 | } | |||
30417 | ||||
30418 | // Attempt to match a combined unary shuffle mask against supported binary | |||
30419 | // shuffle instructions. | |||
30420 | // TODO: Investigate sharing more of this with shuffle lowering. | |||
30421 | static bool matchBinaryVectorShuffle(MVT MaskVT, ArrayRef<int> Mask, | |||
30422 | bool AllowFloatDomain, bool AllowIntDomain, | |||
30423 | SDValue &V1, SDValue &V2, const SDLoc &DL, | |||
30424 | SelectionDAG &DAG, | |||
30425 | const X86Subtarget &Subtarget, | |||
30426 | unsigned &Shuffle, MVT &SrcVT, MVT &DstVT, | |||
30427 | bool IsUnary) { | |||
30428 | unsigned EltSizeInBits = MaskVT.getScalarSizeInBits(); | |||
30429 | ||||
30430 | if (MaskVT.is128BitVector()) { | |||
30431 | if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) { | |||
30432 | V2 = V1; | |||
30433 | V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1); | |||
30434 | Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS; | |||
30435 | SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32; | |||
30436 | return true; | |||
30437 | } | |||
30438 | if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) { | |||
30439 | V2 = V1; | |||
30440 | Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS; | |||
30441 | SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32; | |||
30442 | return true; | |||
30443 | } | |||
30444 | if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() && | |||
30445 | (AllowFloatDomain || !Subtarget.hasSSE41())) { | |||
30446 | std::swap(V1, V2); | |||
30447 | Shuffle = X86ISD::MOVSD; | |||
30448 | SrcVT = DstVT = MVT::v2f64; | |||
30449 | return true; | |||
30450 | } | |||
30451 | if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) && | |||
30452 | (AllowFloatDomain || !Subtarget.hasSSE41())) { | |||
30453 | Shuffle = X86ISD::MOVSS; | |||
30454 | SrcVT = DstVT = MVT::v4f32; | |||
30455 | return true; | |||
30456 | } | |||
30457 | } | |||
30458 | ||||
30459 | // Attempt to match against either a unary or binary PACKSS/PACKUS shuffle. | |||
30460 | // TODO add support for 256/512-bit types. | |||
30461 | if ((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) { | |||
30462 | if (matchVectorShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG, | |||
30463 | Subtarget)) { | |||
30464 | DstVT = MaskVT; | |||
30465 | return true; | |||
30466 | } | |||
30467 | } | |||
30468 | ||||
30469 | // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle. | |||
30470 | if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) || | |||
30471 | (MaskVT.is128BitVector() && Subtarget.hasSSE2()) || | |||
30472 | (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) || | |||
30473 | (MaskVT.is256BitVector() && Subtarget.hasAVX2()) || | |||
30474 | (MaskVT.is512BitVector() && Subtarget.hasAVX512())) { | |||
30475 | if (matchVectorShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, | |||
30476 | DAG, Subtarget)) { | |||
30477 | SrcVT = DstVT = MaskVT; | |||
30478 | if (MaskVT.is256BitVector() && !Subtarget.hasAVX2()) | |||
30479 | SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64); | |||
30480 | return true; | |||
30481 | } | |||
30482 | } | |||
30483 | ||||
30484 | return false; | |||
30485 | } | |||
30486 | ||||
30487 | static bool matchBinaryPermuteVectorShuffle( | |||
30488 | MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable, | |||
30489 | bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2, | |||
30490 | const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget, | |||
30491 | unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) { | |||
30492 | unsigned NumMaskElts = Mask.size(); | |||
30493 | unsigned EltSizeInBits = MaskVT.getScalarSizeInBits(); | |||
30494 | ||||
30495 | // Attempt to match against PALIGNR byte rotate. | |||
30496 | if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) || | |||
30497 | (MaskVT.is256BitVector() && Subtarget.hasAVX2()))) { | |||
30498 | int ByteRotation = matchVectorShuffleAsByteRotate(MaskVT, V1, V2, Mask); | |||
30499 | if (0 < ByteRotation) { | |||
30500 | Shuffle = X86ISD::PALIGNR; | |||
30501 | ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8); | |||
30502 | PermuteImm = ByteRotation; | |||
30503 | return true; | |||
30504 | } | |||
30505 | } | |||
30506 | ||||
30507 | // Attempt to combine to X86ISD::BLENDI. | |||
30508 | if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) || | |||
30509 | (Subtarget.hasAVX() && MaskVT.is256BitVector()))) || | |||
30510 | (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) { | |||
30511 | uint64_t BlendMask = 0; | |||
30512 | bool ForceV1Zero = false, ForceV2Zero = false; | |||
30513 | SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end()); | |||
30514 | if (matchVectorShuffleAsBlend(V1, V2, TargetMask, ForceV1Zero, ForceV2Zero, | |||
30515 | BlendMask)) { | |||
30516 | if (MaskVT == MVT::v16i16) { | |||
30517 | // We can only use v16i16 PBLENDW if the lanes are repeated. | |||
30518 | SmallVector<int, 8> RepeatedMask; | |||
30519 | if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask, | |||
30520 | RepeatedMask)) { | |||
30521 | assert(RepeatedMask.size() == 8 &&((RepeatedMask.size() == 8 && "Repeated mask size doesn't match!" ) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 8 && \"Repeated mask size doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30522, __PRETTY_FUNCTION__)) | |||
30522 | "Repeated mask size doesn't match!")((RepeatedMask.size() == 8 && "Repeated mask size doesn't match!" ) ? static_cast<void> (0) : __assert_fail ("RepeatedMask.size() == 8 && \"Repeated mask size doesn't match!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30522, __PRETTY_FUNCTION__)); | |||
30523 | PermuteImm = 0; | |||
30524 | for (int i = 0; i < 8; ++i) | |||
30525 | if (RepeatedMask[i] >= 8) | |||
30526 | PermuteImm |= 1 << i; | |||
30527 | V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1; | |||
30528 | V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2; | |||
30529 | Shuffle = X86ISD::BLENDI; | |||
30530 | ShuffleVT = MaskVT; | |||
30531 | return true; | |||
30532 | } | |||
30533 | } else { | |||
30534 | // Determine a type compatible with X86ISD::BLENDI. | |||
30535 | ShuffleVT = MaskVT; | |||
30536 | if (Subtarget.hasAVX2()) { | |||
30537 | if (ShuffleVT == MVT::v4i64) | |||
30538 | ShuffleVT = MVT::v8i32; | |||
30539 | else if (ShuffleVT == MVT::v2i64) | |||
30540 | ShuffleVT = MVT::v4i32; | |||
30541 | } else { | |||
30542 | if (ShuffleVT == MVT::v2i64 || ShuffleVT == MVT::v4i32) | |||
30543 | ShuffleVT = MVT::v8i16; | |||
30544 | else if (ShuffleVT == MVT::v4i64) | |||
30545 | ShuffleVT = MVT::v4f64; | |||
30546 | else if (ShuffleVT == MVT::v8i32) | |||
30547 | ShuffleVT = MVT::v8f32; | |||
30548 | } | |||
30549 | ||||
30550 | if (!ShuffleVT.isFloatingPoint()) { | |||
30551 | int Scale = EltSizeInBits / ShuffleVT.getScalarSizeInBits(); | |||
30552 | BlendMask = | |||
30553 | scaleVectorShuffleBlendMask(BlendMask, NumMaskElts, Scale); | |||
30554 | ShuffleVT = MVT::getIntegerVT(EltSizeInBits / Scale); | |||
30555 | ShuffleVT = MVT::getVectorVT(ShuffleVT, NumMaskElts * Scale); | |||
30556 | } | |||
30557 | ||||
30558 | V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1; | |||
30559 | V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2; | |||
30560 | PermuteImm = (unsigned)BlendMask; | |||
30561 | Shuffle = X86ISD::BLENDI; | |||
30562 | return true; | |||
30563 | } | |||
30564 | } | |||
30565 | } | |||
30566 | ||||
30567 | // Attempt to combine to INSERTPS. | |||
30568 | if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() && | |||
30569 | MaskVT.is128BitVector()) { | |||
30570 | if (Zeroable.getBoolValue() && | |||
30571 | matchVectorShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) { | |||
30572 | Shuffle = X86ISD::INSERTPS; | |||
30573 | ShuffleVT = MVT::v4f32; | |||
30574 | return true; | |||
30575 | } | |||
30576 | } | |||
30577 | ||||
30578 | // Attempt to combine to SHUFPD. | |||
30579 | if (AllowFloatDomain && EltSizeInBits == 64 && | |||
30580 | ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) || | |||
30581 | (MaskVT.is256BitVector() && Subtarget.hasAVX()) || | |||
30582 | (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) { | |||
30583 | if (matchVectorShuffleWithSHUFPD(MaskVT, V1, V2, PermuteImm, Mask)) { | |||
30584 | Shuffle = X86ISD::SHUFP; | |||
30585 | ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64); | |||
30586 | return true; | |||
30587 | } | |||
30588 | } | |||
30589 | ||||
30590 | // Attempt to combine to SHUFPS. | |||
30591 | if (AllowFloatDomain && EltSizeInBits == 32 && | |||
30592 | ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) || | |||
30593 | (MaskVT.is256BitVector() && Subtarget.hasAVX()) || | |||
30594 | (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) { | |||
30595 | SmallVector<int, 4> RepeatedMask; | |||
30596 | if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) { | |||
30597 | // Match each half of the repeated mask, to determine if its just | |||
30598 | // referencing one of the vectors, is zeroable or entirely undef. | |||
30599 | auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) { | |||
30600 | int M0 = RepeatedMask[Offset]; | |||
30601 | int M1 = RepeatedMask[Offset + 1]; | |||
30602 | ||||
30603 | if (isUndefInRange(RepeatedMask, Offset, 2)) { | |||
30604 | return DAG.getUNDEF(MaskVT); | |||
30605 | } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) { | |||
30606 | S0 = (SM_SentinelUndef == M0 ? -1 : 0); | |||
30607 | S1 = (SM_SentinelUndef == M1 ? -1 : 1); | |||
30608 | return getZeroVector(MaskVT, Subtarget, DAG, DL); | |||
30609 | } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) { | |||
30610 | S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3); | |||
30611 | S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3); | |||
30612 | return V1; | |||
30613 | } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) { | |||
30614 | S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3); | |||
30615 | S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3); | |||
30616 | return V2; | |||
30617 | } | |||
30618 | ||||
30619 | return SDValue(); | |||
30620 | }; | |||
30621 | ||||
30622 | int ShufMask[4] = {-1, -1, -1, -1}; | |||
30623 | SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]); | |||
30624 | SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]); | |||
30625 | ||||
30626 | if (Lo && Hi) { | |||
30627 | V1 = Lo; | |||
30628 | V2 = Hi; | |||
30629 | Shuffle = X86ISD::SHUFP; | |||
30630 | ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32); | |||
30631 | PermuteImm = getV4X86ShuffleImm(ShufMask); | |||
30632 | return true; | |||
30633 | } | |||
30634 | } | |||
30635 | } | |||
30636 | ||||
30637 | return false; | |||
30638 | } | |||
30639 | ||||
30640 | /// Combine an arbitrary chain of shuffles into a single instruction if | |||
30641 | /// possible. | |||
30642 | /// | |||
30643 | /// This is the leaf of the recursive combine below. When we have found some | |||
30644 | /// chain of single-use x86 shuffle instructions and accumulated the combined | |||
30645 | /// shuffle mask represented by them, this will try to pattern match that mask | |||
30646 | /// into either a single instruction if there is a special purpose instruction | |||
30647 | /// for this operation, or into a PSHUFB instruction which is a fully general | |||
30648 | /// instruction but should only be used to replace chains over a certain depth. | |||
30649 | static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root, | |||
30650 | ArrayRef<int> BaseMask, int Depth, | |||
30651 | bool HasVariableMask, | |||
30652 | bool AllowVariableMask, SelectionDAG &DAG, | |||
30653 | const X86Subtarget &Subtarget) { | |||
30654 | assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!")((!BaseMask.empty() && "Cannot combine an empty shuffle mask!" ) ? static_cast<void> (0) : __assert_fail ("!BaseMask.empty() && \"Cannot combine an empty shuffle mask!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30654, __PRETTY_FUNCTION__)); | |||
30655 | assert((Inputs.size() == 1 || Inputs.size() == 2) &&(((Inputs.size() == 1 || Inputs.size() == 2) && "Unexpected number of shuffle inputs!" ) ? static_cast<void> (0) : __assert_fail ("(Inputs.size() == 1 || Inputs.size() == 2) && \"Unexpected number of shuffle inputs!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30656, __PRETTY_FUNCTION__)) | |||
30656 | "Unexpected number of shuffle inputs!")(((Inputs.size() == 1 || Inputs.size() == 2) && "Unexpected number of shuffle inputs!" ) ? static_cast<void> (0) : __assert_fail ("(Inputs.size() == 1 || Inputs.size() == 2) && \"Unexpected number of shuffle inputs!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30656, __PRETTY_FUNCTION__)); | |||
30657 | ||||
30658 | // Find the inputs that enter the chain. Note that multiple uses are OK | |||
30659 | // here, we're not going to remove the operands we find. | |||
30660 | bool UnaryShuffle = (Inputs.size() == 1); | |||
30661 | SDValue V1 = peekThroughBitcasts(Inputs[0]); | |||
30662 | SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType()) | |||
30663 | : peekThroughBitcasts(Inputs[1])); | |||
30664 | ||||
30665 | MVT VT1 = V1.getSimpleValueType(); | |||
30666 | MVT VT2 = V2.getSimpleValueType(); | |||
30667 | MVT RootVT = Root.getSimpleValueType(); | |||
30668 | assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&((VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2 .getSizeInBits() == RootVT.getSizeInBits() && "Vector size mismatch" ) ? static_cast<void> (0) : __assert_fail ("VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2.getSizeInBits() == RootVT.getSizeInBits() && \"Vector size mismatch\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30670, __PRETTY_FUNCTION__)) | |||
30669 | VT2.getSizeInBits() == RootVT.getSizeInBits() &&((VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2 .getSizeInBits() == RootVT.getSizeInBits() && "Vector size mismatch" ) ? static_cast<void> (0) : __assert_fail ("VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2.getSizeInBits() == RootVT.getSizeInBits() && \"Vector size mismatch\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30670, __PRETTY_FUNCTION__)) | |||
30670 | "Vector size mismatch")((VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2 .getSizeInBits() == RootVT.getSizeInBits() && "Vector size mismatch" ) ? static_cast<void> (0) : __assert_fail ("VT1.getSizeInBits() == RootVT.getSizeInBits() && VT2.getSizeInBits() == RootVT.getSizeInBits() && \"Vector size mismatch\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30670, __PRETTY_FUNCTION__)); | |||
30671 | ||||
30672 | SDLoc DL(Root); | |||
30673 | SDValue Res; | |||
30674 | ||||
30675 | unsigned NumBaseMaskElts = BaseMask.size(); | |||
30676 | if (NumBaseMaskElts == 1) { | |||
30677 | assert(BaseMask[0] == 0 && "Invalid shuffle index found!")((BaseMask[0] == 0 && "Invalid shuffle index found!") ? static_cast<void> (0) : __assert_fail ("BaseMask[0] == 0 && \"Invalid shuffle index found!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30677, __PRETTY_FUNCTION__)); | |||
30678 | return DAG.getBitcast(RootVT, V1); | |||
30679 | } | |||
30680 | ||||
30681 | unsigned RootSizeInBits = RootVT.getSizeInBits(); | |||
30682 | unsigned NumRootElts = RootVT.getVectorNumElements(); | |||
30683 | unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts; | |||
30684 | bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() || | |||
30685 | (RootVT.isFloatingPoint() && Depth >= 2) || | |||
30686 | (RootVT.is256BitVector() && !Subtarget.hasAVX2()); | |||
30687 | ||||
30688 | // Don't combine if we are a AVX512/EVEX target and the mask element size | |||
30689 | // is different from the root element size - this would prevent writemasks | |||
30690 | // from being reused. | |||
30691 | // TODO - this currently prevents all lane shuffles from occurring. | |||
30692 | // TODO - check for writemasks usage instead of always preventing combining. | |||
30693 | // TODO - attempt to narrow Mask back to writemask size. | |||
30694 | bool IsEVEXShuffle = | |||
30695 | RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128); | |||
30696 | ||||
30697 | // TODO - handle 128/256-bit lane shuffles of 512-bit vectors. | |||
30698 | ||||
30699 | // Handle 128-bit lane shuffles of 256-bit vectors. | |||
30700 | // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless | |||
30701 | // we need to use the zeroing feature. | |||
30702 | // TODO - this should support binary shuffles. | |||
30703 | if (UnaryShuffle && RootVT.is256BitVector() && NumBaseMaskElts == 2 && | |||
30704 | !(Subtarget.hasAVX2() && BaseMask[0] >= -1 && BaseMask[1] >= -1) && | |||
30705 | !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0)) { | |||
30706 | if (Depth == 1 && Root.getOpcode() == X86ISD::VPERM2X128) | |||
30707 | return SDValue(); // Nothing to do! | |||
30708 | MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64); | |||
30709 | unsigned PermMask = 0; | |||
30710 | PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0); | |||
30711 | PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4); | |||
30712 | ||||
30713 | Res = DAG.getBitcast(ShuffleVT, V1); | |||
30714 | Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res, | |||
30715 | DAG.getUNDEF(ShuffleVT), | |||
30716 | DAG.getConstant(PermMask, DL, MVT::i8)); | |||
30717 | return DAG.getBitcast(RootVT, Res); | |||
30718 | } | |||
30719 | ||||
30720 | // For masks that have been widened to 128-bit elements or more, | |||
30721 | // narrow back down to 64-bit elements. | |||
30722 | SmallVector<int, 64> Mask; | |||
30723 | if (BaseMaskEltSizeInBits > 64) { | |||
30724 | assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size")(((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size" ) ? static_cast<void> (0) : __assert_fail ("(BaseMaskEltSizeInBits % 64) == 0 && \"Illegal mask size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 30724, __PRETTY_FUNCTION__)); | |||
30725 | int MaskScale = BaseMaskEltSizeInBits / 64; | |||
30726 | scaleShuffleMask<int>(MaskScale, BaseMask, Mask); | |||
30727 | } else { | |||
30728 | Mask = SmallVector<int, 64>(BaseMask.begin(), BaseMask.end()); | |||
30729 | } | |||
30730 | ||||
30731 | unsigned NumMaskElts = Mask.size(); | |||
30732 | unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts; | |||
30733 | ||||
30734 | // Determine the effective mask value type. | |||
30735 | FloatDomain &= (32 <= MaskEltSizeInBits); | |||
30736 | MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits) | |||
30737 | : MVT::getIntegerVT(MaskEltSizeInBits); | |||
30738 | MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts); | |||
30739 | ||||
30740 | // Only allow legal mask types. | |||
30741 | if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) | |||
30742 | return SDValue(); | |||
30743 | ||||
30744 | // Attempt to match the mask against known shuffle patterns. | |||
30745 | MVT ShuffleSrcVT, ShuffleVT; | |||
30746 | unsigned Shuffle, PermuteImm; | |||
30747 | ||||
30748 | // Which shuffle domains are permitted? | |||
30749 | // Permit domain crossing at higher combine depths. | |||
30750 | bool AllowFloatDomain = FloatDomain || (Depth > 3); | |||
30751 | bool AllowIntDomain = (!FloatDomain || (Depth > 3)) && Subtarget.hasSSE2() && | |||
30752 | (!MaskVT.is256BitVector() || Subtarget.hasAVX2()); | |||
30753 | ||||
30754 | // Determine zeroable mask elements. | |||
30755 | APInt Zeroable(NumMaskElts, 0); | |||
30756 | for (unsigned i = 0; i != NumMaskElts; ++i) | |||
30757 | if (isUndefOrZero(Mask[i])) | |||
30758 | Zeroable.setBit(i); | |||
30759 | ||||
30760 | if (UnaryShuffle) { | |||
30761 | // If we are shuffling a X86ISD::VZEXT_LOAD then we can use the load | |||
30762 | // directly if we don't shuffle the lower element and we shuffle the upper | |||
30763 | // (zero) elements within themselves. | |||
30764 | if (V1.getOpcode() == X86ISD::VZEXT_LOAD && | |||
30765 | (V1.getScalarValueSizeInBits() % MaskEltSizeInBits) == 0) { | |||
30766 | unsigned Scale = V1.getScalarValueSizeInBits() / MaskEltSizeInBits; | |||
30767 | ArrayRef<int> HiMask(Mask.data() + Scale, NumMaskElts - Scale); | |||
30768 | if (isSequentialOrUndefInRange(Mask, 0, Scale, 0) && | |||
30769 | isUndefOrZeroOrInRange(HiMask, Scale, NumMaskElts)) { | |||
30770 | return DAG.getBitcast(RootVT, V1); | |||
30771 | } | |||
30772 | } | |||
30773 | ||||
30774 | SDValue NewV1 = V1; // Save operand in case early exit happens. | |||
30775 | if (matchUnaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, | |||
30776 | NewV1, DL, DAG, Subtarget, Shuffle, | |||
30777 | ShuffleSrcVT, ShuffleVT) && | |||
30778 | (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) { | |||
30779 | if (Depth == 1 && Root.getOpcode() == Shuffle) | |||
30780 | return SDValue(); // Nothing to do! | |||
30781 | Res = DAG.getBitcast(ShuffleSrcVT, NewV1); | |||
30782 | Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res); | |||
30783 | return DAG.getBitcast(RootVT, Res); | |||
30784 | } | |||
30785 | ||||
30786 | if (matchUnaryPermuteVectorShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain, | |||
30787 | AllowIntDomain, Subtarget, Shuffle, | |||
30788 | ShuffleVT, PermuteImm) && | |||
30789 | (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) { | |||
30790 | if (Depth == 1 && Root.getOpcode() == Shuffle) | |||
30791 | return SDValue(); // Nothing to do! | |||
30792 | Res = DAG.getBitcast(ShuffleVT, V1); | |||
30793 | Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res, | |||
30794 | DAG.getConstant(PermuteImm, DL, MVT::i8)); | |||
30795 | return DAG.getBitcast(RootVT, Res); | |||
30796 | } | |||
30797 | } | |||
30798 | ||||
30799 | SDValue NewV1 = V1; // Save operands in case early exit happens. | |||
30800 | SDValue NewV2 = V2; | |||
30801 | if (matchBinaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, | |||
30802 | NewV1, NewV2, DL, DAG, Subtarget, Shuffle, | |||
30803 | ShuffleSrcVT, ShuffleVT, UnaryShuffle) && | |||
30804 | (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) { | |||
30805 | if (Depth == 1 && Root.getOpcode() == Shuffle) | |||
30806 | return SDValue(); // Nothing to do! | |||
30807 | NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1); | |||
30808 | NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2); | |||
30809 | Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2); | |||
30810 | return DAG.getBitcast(RootVT, Res); | |||
30811 | } | |||
30812 | ||||
30813 | NewV1 = V1; // Save operands in case early exit happens. | |||
30814 | NewV2 = V2; | |||
30815 | if (matchBinaryPermuteVectorShuffle( | |||
30816 | MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1, | |||
30817 | NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) && | |||
30818 | (!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) { | |||
30819 | if (Depth == 1 && Root.getOpcode() == Shuffle) | |||
30820 | return SDValue(); // Nothing to do! | |||
30821 | NewV1 = DAG.getBitcast(ShuffleVT, NewV1); | |||
30822 | NewV2 = DAG.getBitcast(ShuffleVT, NewV2); | |||
30823 | Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2, | |||
30824 | DAG.getConstant(PermuteImm, DL, MVT::i8)); | |||
30825 | return DAG.getBitcast(RootVT, Res); | |||
30826 | } | |||
30827 | ||||
30828 | // Typically from here on, we need an integer version of MaskVT. | |||
30829 | MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits); | |||
30830 | IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts); | |||
30831 | ||||
30832 | // Annoyingly, SSE4A instructions don't map into the above match helpers. | |||
30833 | if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) { | |||
30834 | uint64_t BitLen, BitIdx; | |||
30835 | if (matchVectorShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx, | |||
30836 | Zeroable)) { | |||
30837 | if (Depth == 1 && Root.getOpcode() == X86ISD::EXTRQI) | |||
30838 | return SDValue(); // Nothing to do! | |||
30839 | V1 = DAG.getBitcast(IntMaskVT, V1); | |||
30840 | Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1, | |||
30841 | DAG.getConstant(BitLen, DL, MVT::i8), | |||
30842 | DAG.getConstant(BitIdx, DL, MVT::i8)); | |||
30843 | return DAG.getBitcast(RootVT, Res); | |||
30844 | } | |||
30845 | ||||
30846 | if (matchVectorShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) { | |||
30847 | if (Depth == 1 && Root.getOpcode() == X86ISD::INSERTQI) | |||
30848 | return SDValue(); // Nothing to do! | |||
30849 | V1 = DAG.getBitcast(IntMaskVT, V1); | |||
30850 | V2 = DAG.getBitcast(IntMaskVT, V2); | |||
30851 | Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2, | |||
30852 | DAG.getConstant(BitLen, DL, MVT::i8), | |||
30853 | DAG.getConstant(BitIdx, DL, MVT::i8)); | |||
30854 | return DAG.getBitcast(RootVT, Res); | |||
30855 | } | |||
30856 | } | |||
30857 | ||||
30858 | // Don't try to re-form single instruction chains under any circumstances now | |||
30859 | // that we've done encoding canonicalization for them. | |||
30860 | if (Depth < 2) | |||
30861 | return SDValue(); | |||
30862 | ||||
30863 | // Depth threshold above which we can efficiently use variable mask shuffles. | |||
30864 | int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 2 : 3; | |||
30865 | AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask; | |||
30866 | ||||
30867 | bool MaskContainsZeros = | |||
30868 | any_of(Mask, [](int M) { return M == SM_SentinelZero; }); | |||
30869 | ||||
30870 | if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) { | |||
30871 | // If we have a single input lane-crossing shuffle then lower to VPERMV. | |||
30872 | if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros && | |||
30873 | ((Subtarget.hasAVX2() && | |||
30874 | (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) || | |||
30875 | (Subtarget.hasAVX512() && | |||
30876 | (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 || | |||
30877 | MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) || | |||
30878 | (Subtarget.hasBWI() && MaskVT == MVT::v32i16) || | |||
30879 | (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) || | |||
30880 | (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) || | |||
30881 | (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) { | |||
30882 | SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true); | |||
30883 | Res = DAG.getBitcast(MaskVT, V1); | |||
30884 | Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res); | |||
30885 | return DAG.getBitcast(RootVT, Res); | |||
30886 | } | |||
30887 | ||||
30888 | // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero | |||
30889 | // vector as the second source. | |||
30890 | if (UnaryShuffle && AllowVariableMask && | |||
30891 | ((Subtarget.hasAVX512() && | |||
30892 | (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 || | |||
30893 | MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) || | |||
30894 | (Subtarget.hasVLX() && | |||
30895 | (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 || | |||
30896 | MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) || | |||
30897 | (Subtarget.hasBWI() && MaskVT == MVT::v32i16) || | |||
30898 | (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) || | |||
30899 | (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) || | |||
30900 | (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) { | |||
30901 | // Adjust shuffle mask - replace SM_SentinelZero with second source index. | |||
30902 | for (unsigned i = 0; i != NumMaskElts; ++i) | |||
30903 | if (Mask[i] == SM_SentinelZero) | |||
30904 | Mask[i] = NumMaskElts + i; | |||
30905 | ||||
30906 | SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true); | |||
30907 | Res = DAG.getBitcast(MaskVT, V1); | |||
30908 | SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL); | |||
30909 | Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero); | |||
30910 | return DAG.getBitcast(RootVT, Res); | |||
30911 | } | |||
30912 | ||||
30913 | // If we have a dual input lane-crossing shuffle then lower to VPERMV3. | |||
30914 | if (AllowVariableMask && !MaskContainsZeros && | |||
30915 | ((Subtarget.hasAVX512() && | |||
30916 | (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 || | |||
30917 | MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) || | |||
30918 | (Subtarget.hasVLX() && | |||
30919 | (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 || | |||
30920 | MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) || | |||
30921 | (Subtarget.hasBWI() && MaskVT == MVT::v32i16) || | |||
30922 | (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) || | |||
30923 | (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) || | |||
30924 | (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) { | |||
30925 | SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true); | |||
30926 | V1 = DAG.getBitcast(MaskVT, V1); | |||
30927 | V2 = DAG.getBitcast(MaskVT, V2); | |||
30928 | Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2); | |||
30929 | return DAG.getBitcast(RootVT, Res); | |||
30930 | } | |||
30931 | return SDValue(); | |||
30932 | } | |||
30933 | ||||
30934 | // See if we can combine a single input shuffle with zeros to a bit-mask, | |||
30935 | // which is much simpler than any shuffle. | |||
30936 | if (UnaryShuffle && MaskContainsZeros && AllowVariableMask && | |||
30937 | isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) && | |||
30938 | DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) { | |||
30939 | APInt Zero = APInt::getNullValue(MaskEltSizeInBits); | |||
30940 | APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits); | |||
30941 | APInt UndefElts(NumMaskElts, 0); | |||
30942 | SmallVector<APInt, 64> EltBits(NumMaskElts, Zero); | |||
30943 | for (unsigned i = 0; i != NumMaskElts; ++i) { | |||
30944 | int M = Mask[i]; | |||
30945 | if (M == SM_SentinelUndef) { | |||
30946 | UndefElts.setBit(i); | |||
30947 | continue; | |||
30948 | } | |||
30949 | if (M == SM_SentinelZero) | |||
30950 | continue; | |||
30951 | EltBits[i] = AllOnes; | |||
30952 | } | |||
30953 | SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL); | |||
30954 | Res = DAG.getBitcast(MaskVT, V1); | |||
30955 | unsigned AndOpcode = | |||
30956 | FloatDomain ? unsigned(X86ISD::FAND) : unsigned(ISD::AND); | |||
30957 | Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask); | |||
30958 | return DAG.getBitcast(RootVT, Res); | |||
30959 | } | |||
30960 | ||||
30961 | // If we have a single input shuffle with different shuffle patterns in the | |||
30962 | // the 128-bit lanes use the variable mask to VPERMILPS. | |||
30963 | // TODO Combine other mask types at higher depths. | |||
30964 | if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros && | |||
30965 | ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) || | |||
30966 | (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) { | |||
30967 | SmallVector<SDValue, 16> VPermIdx; | |||
30968 | for (int M : Mask) { | |||
30969 | SDValue Idx = | |||
30970 | M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32); | |||
30971 | VPermIdx.push_back(Idx); | |||
30972 | } | |||
30973 | SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx); | |||
30974 | Res = DAG.getBitcast(MaskVT, V1); | |||
30975 | Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask); | |||
30976 | return DAG.getBitcast(RootVT, Res); | |||
30977 | } | |||
30978 | ||||
30979 | // With XOP, binary shuffles of 128/256-bit floating point vectors can combine | |||
30980 | // to VPERMIL2PD/VPERMIL2PS. | |||
30981 | if (AllowVariableMask && Subtarget.hasXOP() && | |||
30982 | (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 || | |||
30983 | MaskVT == MVT::v8f32)) { | |||
30984 | // VPERMIL2 Operation. | |||
30985 | // Bits[3] - Match Bit. | |||
30986 | // Bits[2:1] - (Per Lane) PD Shuffle Mask. | |||
30987 | // Bits[2:0] - (Per Lane) PS Shuffle Mask. | |||
30988 | unsigned NumLanes = MaskVT.getSizeInBits() / 128; | |||
30989 | unsigned NumEltsPerLane = NumMaskElts / NumLanes; | |||
30990 | SmallVector<int, 8> VPerm2Idx; | |||
30991 | unsigned M2ZImm = 0; | |||
30992 | for (int M : Mask) { | |||
30993 | if (M == SM_SentinelUndef) { | |||
30994 | VPerm2Idx.push_back(-1); | |||
30995 | continue; | |||
30996 | } | |||
30997 | if (M == SM_SentinelZero) { | |||
30998 | M2ZImm = 2; | |||
30999 | VPerm2Idx.push_back(8); | |||
31000 | continue; | |||
31001 | } | |||
31002 | int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane); | |||
31003 | Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index); | |||
31004 | VPerm2Idx.push_back(Index); | |||
31005 | } | |||
31006 | V1 = DAG.getBitcast(MaskVT, V1); | |||
31007 | V2 = DAG.getBitcast(MaskVT, V2); | |||
31008 | SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true); | |||
31009 | Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp, | |||
31010 | DAG.getConstant(M2ZImm, DL, MVT::i8)); | |||
31011 | return DAG.getBitcast(RootVT, Res); | |||
31012 | } | |||
31013 | ||||
31014 | // If we have 3 or more shuffle instructions or a chain involving a variable | |||
31015 | // mask, we can replace them with a single PSHUFB instruction profitably. | |||
31016 | // Intel's manuals suggest only using PSHUFB if doing so replacing 5 | |||
31017 | // instructions, but in practice PSHUFB tends to be *very* fast so we're | |||
31018 | // more aggressive. | |||
31019 | if (UnaryShuffle && AllowVariableMask && | |||
31020 | ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) || | |||
31021 | (RootVT.is256BitVector() && Subtarget.hasAVX2()) || | |||
31022 | (RootVT.is512BitVector() && Subtarget.hasBWI()))) { | |||
31023 | SmallVector<SDValue, 16> PSHUFBMask; | |||
31024 | int NumBytes = RootVT.getSizeInBits() / 8; | |||
31025 | int Ratio = NumBytes / NumMaskElts; | |||
31026 | for (int i = 0; i < NumBytes; ++i) { | |||
31027 | int M = Mask[i / Ratio]; | |||
31028 | if (M == SM_SentinelUndef) { | |||
31029 | PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8)); | |||
31030 | continue; | |||
31031 | } | |||
31032 | if (M == SM_SentinelZero) { | |||
31033 | PSHUFBMask.push_back(DAG.getConstant(255, DL, MVT::i8)); | |||
31034 | continue; | |||
31035 | } | |||
31036 | M = Ratio * M + i % Ratio; | |||
31037 | assert((M / 16) == (i / 16) && "Lane crossing detected")(((M / 16) == (i / 16) && "Lane crossing detected") ? static_cast<void> (0) : __assert_fail ("(M / 16) == (i / 16) && \"Lane crossing detected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31037, __PRETTY_FUNCTION__)); | |||
31038 | PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8)); | |||
31039 | } | |||
31040 | MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes); | |||
31041 | Res = DAG.getBitcast(ByteVT, V1); | |||
31042 | SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask); | |||
31043 | Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp); | |||
31044 | return DAG.getBitcast(RootVT, Res); | |||
31045 | } | |||
31046 | ||||
31047 | // With XOP, if we have a 128-bit binary input shuffle we can always combine | |||
31048 | // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never | |||
31049 | // slower than PSHUFB on targets that support both. | |||
31050 | if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) { | |||
31051 | // VPPERM Mask Operation | |||
31052 | // Bits[4:0] - Byte Index (0 - 31) | |||
31053 | // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO) | |||
31054 | SmallVector<SDValue, 16> VPPERMMask; | |||
31055 | int NumBytes = 16; | |||
31056 | int Ratio = NumBytes / NumMaskElts; | |||
31057 | for (int i = 0; i < NumBytes; ++i) { | |||
31058 | int M = Mask[i / Ratio]; | |||
31059 | if (M == SM_SentinelUndef) { | |||
31060 | VPPERMMask.push_back(DAG.getUNDEF(MVT::i8)); | |||
31061 | continue; | |||
31062 | } | |||
31063 | if (M == SM_SentinelZero) { | |||
31064 | VPPERMMask.push_back(DAG.getConstant(128, DL, MVT::i8)); | |||
31065 | continue; | |||
31066 | } | |||
31067 | M = Ratio * M + i % Ratio; | |||
31068 | VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8)); | |||
31069 | } | |||
31070 | MVT ByteVT = MVT::v16i8; | |||
31071 | V1 = DAG.getBitcast(ByteVT, V1); | |||
31072 | V2 = DAG.getBitcast(ByteVT, V2); | |||
31073 | SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask); | |||
31074 | Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp); | |||
31075 | return DAG.getBitcast(RootVT, Res); | |||
31076 | } | |||
31077 | ||||
31078 | // Failed to find any combines. | |||
31079 | return SDValue(); | |||
31080 | } | |||
31081 | ||||
31082 | // Attempt to constant fold all of the constant source ops. | |||
31083 | // Returns true if the entire shuffle is folded to a constant. | |||
31084 | // TODO: Extend this to merge multiple constant Ops and update the mask. | |||
31085 | static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops, | |||
31086 | ArrayRef<int> Mask, SDValue Root, | |||
31087 | bool HasVariableMask, | |||
31088 | SelectionDAG &DAG, | |||
31089 | const X86Subtarget &Subtarget) { | |||
31090 | MVT VT = Root.getSimpleValueType(); | |||
31091 | ||||
31092 | unsigned SizeInBits = VT.getSizeInBits(); | |||
31093 | unsigned NumMaskElts = Mask.size(); | |||
31094 | unsigned MaskSizeInBits = SizeInBits / NumMaskElts; | |||
31095 | unsigned NumOps = Ops.size(); | |||
31096 | ||||
31097 | // Extract constant bits from each source op. | |||
31098 | bool OneUseConstantOp = false; | |||
31099 | SmallVector<APInt, 16> UndefEltsOps(NumOps); | |||
31100 | SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps); | |||
31101 | for (unsigned i = 0; i != NumOps; ++i) { | |||
31102 | SDValue SrcOp = Ops[i]; | |||
31103 | OneUseConstantOp |= SrcOp.hasOneUse(); | |||
31104 | if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i], | |||
31105 | RawBitsOps[i])) | |||
31106 | return SDValue(); | |||
31107 | } | |||
31108 | ||||
31109 | // Only fold if at least one of the constants is only used once or | |||
31110 | // the combined shuffle has included a variable mask shuffle, this | |||
31111 | // is to avoid constant pool bloat. | |||
31112 | if (!OneUseConstantOp && !HasVariableMask) | |||
31113 | return SDValue(); | |||
31114 | ||||
31115 | // Shuffle the constant bits according to the mask. | |||
31116 | APInt UndefElts(NumMaskElts, 0); | |||
31117 | APInt ZeroElts(NumMaskElts, 0); | |||
31118 | APInt ConstantElts(NumMaskElts, 0); | |||
31119 | SmallVector<APInt, 8> ConstantBitData(NumMaskElts, | |||
31120 | APInt::getNullValue(MaskSizeInBits)); | |||
31121 | for (unsigned i = 0; i != NumMaskElts; ++i) { | |||
31122 | int M = Mask[i]; | |||
31123 | if (M == SM_SentinelUndef) { | |||
31124 | UndefElts.setBit(i); | |||
31125 | continue; | |||
31126 | } else if (M == SM_SentinelZero) { | |||
31127 | ZeroElts.setBit(i); | |||
31128 | continue; | |||
31129 | } | |||
31130 | assert(0 <= M && M < (int)(NumMaskElts * NumOps))((0 <= M && M < (int)(NumMaskElts * NumOps)) ? static_cast <void> (0) : __assert_fail ("0 <= M && M < (int)(NumMaskElts * NumOps)" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31130, __PRETTY_FUNCTION__)); | |||
31131 | ||||
31132 | unsigned SrcOpIdx = (unsigned)M / NumMaskElts; | |||
31133 | unsigned SrcMaskIdx = (unsigned)M % NumMaskElts; | |||
31134 | ||||
31135 | auto &SrcUndefElts = UndefEltsOps[SrcOpIdx]; | |||
31136 | if (SrcUndefElts[SrcMaskIdx]) { | |||
31137 | UndefElts.setBit(i); | |||
31138 | continue; | |||
31139 | } | |||
31140 | ||||
31141 | auto &SrcEltBits = RawBitsOps[SrcOpIdx]; | |||
31142 | APInt &Bits = SrcEltBits[SrcMaskIdx]; | |||
31143 | if (!Bits) { | |||
31144 | ZeroElts.setBit(i); | |||
31145 | continue; | |||
31146 | } | |||
31147 | ||||
31148 | ConstantElts.setBit(i); | |||
31149 | ConstantBitData[i] = Bits; | |||
31150 | } | |||
31151 | assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue())(((UndefElts | ZeroElts | ConstantElts).isAllOnesValue()) ? static_cast <void> (0) : __assert_fail ("(UndefElts | ZeroElts | ConstantElts).isAllOnesValue()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31151, __PRETTY_FUNCTION__)); | |||
31152 | ||||
31153 | // Create the constant data. | |||
31154 | MVT MaskSVT; | |||
31155 | if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64)) | |||
31156 | MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits); | |||
31157 | else | |||
31158 | MaskSVT = MVT::getIntegerVT(MaskSizeInBits); | |||
31159 | ||||
31160 | MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts); | |||
31161 | ||||
31162 | SDLoc DL(Root); | |||
31163 | SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL); | |||
31164 | return DAG.getBitcast(VT, CstOp); | |||
31165 | } | |||
31166 | ||||
31167 | /// Fully generic combining of x86 shuffle instructions. | |||
31168 | /// | |||
31169 | /// This should be the last combine run over the x86 shuffle instructions. Once | |||
31170 | /// they have been fully optimized, this will recursively consider all chains | |||
31171 | /// of single-use shuffle instructions, build a generic model of the cumulative | |||
31172 | /// shuffle operation, and check for simpler instructions which implement this | |||
31173 | /// operation. We use this primarily for two purposes: | |||
31174 | /// | |||
31175 | /// 1) Collapse generic shuffles to specialized single instructions when | |||
31176 | /// equivalent. In most cases, this is just an encoding size win, but | |||
31177 | /// sometimes we will collapse multiple generic shuffles into a single | |||
31178 | /// special-purpose shuffle. | |||
31179 | /// 2) Look for sequences of shuffle instructions with 3 or more total | |||
31180 | /// instructions, and replace them with the slightly more expensive SSSE3 | |||
31181 | /// PSHUFB instruction if available. We do this as the last combining step | |||
31182 | /// to ensure we avoid using PSHUFB if we can implement the shuffle with | |||
31183 | /// a suitable short sequence of other instructions. The PSHUFB will either | |||
31184 | /// use a register or have to read from memory and so is slightly (but only | |||
31185 | /// slightly) more expensive than the other shuffle instructions. | |||
31186 | /// | |||
31187 | /// Because this is inherently a quadratic operation (for each shuffle in | |||
31188 | /// a chain, we recurse up the chain), the depth is limited to 8 instructions. | |||
31189 | /// This should never be an issue in practice as the shuffle lowering doesn't | |||
31190 | /// produce sequences of more than 8 instructions. | |||
31191 | /// | |||
31192 | /// FIXME: We will currently miss some cases where the redundant shuffling | |||
31193 | /// would simplify under the threshold for PSHUFB formation because of | |||
31194 | /// combine-ordering. To fix this, we should do the redundant instruction | |||
31195 | /// combining in this recursive walk. | |||
31196 | static SDValue combineX86ShufflesRecursively( | |||
31197 | ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root, | |||
31198 | ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth, | |||
31199 | bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG, | |||
31200 | const X86Subtarget &Subtarget) { | |||
31201 | // Bound the depth of our recursive combine because this is ultimately | |||
31202 | // quadratic in nature. | |||
31203 | const unsigned MaxRecursionDepth = 8; | |||
31204 | if (Depth > MaxRecursionDepth) | |||
31205 | return SDValue(); | |||
31206 | ||||
31207 | // Directly rip through bitcasts to find the underlying operand. | |||
31208 | SDValue Op = SrcOps[SrcOpIndex]; | |||
31209 | Op = peekThroughOneUseBitcasts(Op); | |||
31210 | ||||
31211 | MVT VT = Op.getSimpleValueType(); | |||
31212 | if (!VT.isVector()) | |||
31213 | return SDValue(); // Bail if we hit a non-vector. | |||
31214 | ||||
31215 | assert(Root.getSimpleValueType().isVector() &&((Root.getSimpleValueType().isVector() && "Shuffles operate on vector types!" ) ? static_cast<void> (0) : __assert_fail ("Root.getSimpleValueType().isVector() && \"Shuffles operate on vector types!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31216, __PRETTY_FUNCTION__)) | |||
31216 | "Shuffles operate on vector types!")((Root.getSimpleValueType().isVector() && "Shuffles operate on vector types!" ) ? static_cast<void> (0) : __assert_fail ("Root.getSimpleValueType().isVector() && \"Shuffles operate on vector types!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31216, __PRETTY_FUNCTION__)); | |||
31217 | assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&((VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits () && "Can only combine shuffles of the same vector register size." ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() && \"Can only combine shuffles of the same vector register size.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31218, __PRETTY_FUNCTION__)) | |||
31218 | "Can only combine shuffles of the same vector register size.")((VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits () && "Can only combine shuffles of the same vector register size." ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() && \"Can only combine shuffles of the same vector register size.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31218, __PRETTY_FUNCTION__)); | |||
31219 | ||||
31220 | // Extract target shuffle mask and resolve sentinels and inputs. | |||
31221 | SmallVector<int, 64> OpMask; | |||
31222 | SmallVector<SDValue, 2> OpInputs; | |||
31223 | if (!resolveTargetShuffleInputs(Op, OpInputs, OpMask, DAG)) | |||
31224 | return SDValue(); | |||
31225 | ||||
31226 | // TODO - Add support for more than 2 inputs. | |||
31227 | if (2 < OpInputs.size()) | |||
31228 | return SDValue(); | |||
31229 | ||||
31230 | SDValue Input0 = (OpInputs.size() > 0 ? OpInputs[0] : SDValue()); | |||
31231 | SDValue Input1 = (OpInputs.size() > 1 ? OpInputs[1] : SDValue()); | |||
31232 | ||||
31233 | // Add the inputs to the Ops list, avoiding duplicates. | |||
31234 | SmallVector<SDValue, 16> Ops(SrcOps.begin(), SrcOps.end()); | |||
31235 | ||||
31236 | auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int { | |||
31237 | if (!Input) | |||
31238 | return -1; | |||
31239 | // Attempt to find an existing match. | |||
31240 | SDValue InputBC = peekThroughBitcasts(Input); | |||
31241 | for (int i = 0, e = Ops.size(); i < e; ++i) | |||
31242 | if (InputBC == peekThroughBitcasts(Ops[i])) | |||
31243 | return i; | |||
31244 | // Match failed - should we replace an existing Op? | |||
31245 | if (InsertionPoint >= 0) { | |||
31246 | Ops[InsertionPoint] = Input; | |||
31247 | return InsertionPoint; | |||
31248 | } | |||
31249 | // Add to the end of the Ops list. | |||
31250 | Ops.push_back(Input); | |||
31251 | return Ops.size() - 1; | |||
31252 | }; | |||
31253 | ||||
31254 | int InputIdx0 = AddOp(Input0, SrcOpIndex); | |||
31255 | int InputIdx1 = AddOp(Input1, -1); | |||
31256 | ||||
31257 | assert(((RootMask.size() > OpMask.size() &&((((RootMask.size() > OpMask.size() && RootMask.size () % OpMask.size() == 0) || (OpMask.size() > RootMask.size () && OpMask.size() % RootMask.size() == 0) || OpMask .size() == RootMask.size()) && "The smaller number of elements must divide the larger." ) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31262, __PRETTY_FUNCTION__)) | |||
31258 | RootMask.size() % OpMask.size() == 0) ||((((RootMask.size() > OpMask.size() && RootMask.size () % OpMask.size() == 0) || (OpMask.size() > RootMask.size () && OpMask.size() % RootMask.size() == 0) || OpMask .size() == RootMask.size()) && "The smaller number of elements must divide the larger." ) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31262, __PRETTY_FUNCTION__)) | |||
31259 | (OpMask.size() > RootMask.size() &&((((RootMask.size() > OpMask.size() && RootMask.size () % OpMask.size() == 0) || (OpMask.size() > RootMask.size () && OpMask.size() % RootMask.size() == 0) || OpMask .size() == RootMask.size()) && "The smaller number of elements must divide the larger." ) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31262, __PRETTY_FUNCTION__)) | |||
31260 | OpMask.size() % RootMask.size() == 0) ||((((RootMask.size() > OpMask.size() && RootMask.size () % OpMask.size() == 0) || (OpMask.size() > RootMask.size () && OpMask.size() % RootMask.size() == 0) || OpMask .size() == RootMask.size()) && "The smaller number of elements must divide the larger." ) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31262, __PRETTY_FUNCTION__)) | |||
31261 | OpMask.size() == RootMask.size()) &&((((RootMask.size() > OpMask.size() && RootMask.size () % OpMask.size() == 0) || (OpMask.size() > RootMask.size () && OpMask.size() % RootMask.size() == 0) || OpMask .size() == RootMask.size()) && "The smaller number of elements must divide the larger." ) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31262, __PRETTY_FUNCTION__)) | |||
31262 | "The smaller number of elements must divide the larger.")((((RootMask.size() > OpMask.size() && RootMask.size () % OpMask.size() == 0) || (OpMask.size() > RootMask.size () && OpMask.size() % RootMask.size() == 0) || OpMask .size() == RootMask.size()) && "The smaller number of elements must divide the larger." ) ? static_cast<void> (0) : __assert_fail ("((RootMask.size() > OpMask.size() && RootMask.size() % OpMask.size() == 0) || (OpMask.size() > RootMask.size() && OpMask.size() % RootMask.size() == 0) || OpMask.size() == RootMask.size()) && \"The smaller number of elements must divide the larger.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31262, __PRETTY_FUNCTION__)); | |||
31263 | ||||
31264 | // This function can be performance-critical, so we rely on the power-of-2 | |||
31265 | // knowledge that we have about the mask sizes to replace div/rem ops with | |||
31266 | // bit-masks and shifts. | |||
31267 | assert(isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(RootMask.size()) && "Non-power-of-2 shuffle mask sizes" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(RootMask.size()) && \"Non-power-of-2 shuffle mask sizes\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31267, __PRETTY_FUNCTION__)); | |||
31268 | assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(OpMask.size()) && \"Non-power-of-2 shuffle mask sizes\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31268, __PRETTY_FUNCTION__)); | |||
31269 | unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size()); | |||
31270 | unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size()); | |||
31271 | ||||
31272 | unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size()); | |||
31273 | unsigned RootRatio = std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2); | |||
31274 | unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2); | |||
31275 | assert((RootRatio == 1 || OpRatio == 1) &&(((RootRatio == 1 || OpRatio == 1) && "Must not have a ratio for both incoming and op masks!" ) ? static_cast<void> (0) : __assert_fail ("(RootRatio == 1 || OpRatio == 1) && \"Must not have a ratio for both incoming and op masks!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31276, __PRETTY_FUNCTION__)) | |||
31276 | "Must not have a ratio for both incoming and op masks!")(((RootRatio == 1 || OpRatio == 1) && "Must not have a ratio for both incoming and op masks!" ) ? static_cast<void> (0) : __assert_fail ("(RootRatio == 1 || OpRatio == 1) && \"Must not have a ratio for both incoming and op masks!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31276, __PRETTY_FUNCTION__)); | |||
31277 | ||||
31278 | assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(MaskWidth) && \"Non-power-of-2 shuffle mask sizes\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31278, __PRETTY_FUNCTION__)); | |||
31279 | assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(RootRatio) && \"Non-power-of-2 shuffle mask sizes\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31279, __PRETTY_FUNCTION__)); | |||
31280 | assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes")((isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(OpRatio) && \"Non-power-of-2 shuffle mask sizes\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31280, __PRETTY_FUNCTION__)); | |||
31281 | unsigned RootRatioLog2 = countTrailingZeros(RootRatio); | |||
31282 | unsigned OpRatioLog2 = countTrailingZeros(OpRatio); | |||
31283 | ||||
31284 | SmallVector<int, 64> Mask(MaskWidth, SM_SentinelUndef); | |||
31285 | ||||
31286 | // Merge this shuffle operation's mask into our accumulated mask. Note that | |||
31287 | // this shuffle's mask will be the first applied to the input, followed by the | |||
31288 | // root mask to get us all the way to the root value arrangement. The reason | |||
31289 | // for this order is that we are recursing up the operation chain. | |||
31290 | for (unsigned i = 0; i < MaskWidth; ++i) { | |||
31291 | unsigned RootIdx = i >> RootRatioLog2; | |||
31292 | if (RootMask[RootIdx] < 0) { | |||
31293 | // This is a zero or undef lane, we're done. | |||
31294 | Mask[i] = RootMask[RootIdx]; | |||
31295 | continue; | |||
31296 | } | |||
31297 | ||||
31298 | unsigned RootMaskedIdx = | |||
31299 | RootRatio == 1 | |||
31300 | ? RootMask[RootIdx] | |||
31301 | : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1)); | |||
31302 | ||||
31303 | // Just insert the scaled root mask value if it references an input other | |||
31304 | // than the SrcOp we're currently inserting. | |||
31305 | if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) || | |||
31306 | (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) { | |||
31307 | Mask[i] = RootMaskedIdx; | |||
31308 | continue; | |||
31309 | } | |||
31310 | ||||
31311 | RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1); | |||
31312 | unsigned OpIdx = RootMaskedIdx >> OpRatioLog2; | |||
31313 | if (OpMask[OpIdx] < 0) { | |||
31314 | // The incoming lanes are zero or undef, it doesn't matter which ones we | |||
31315 | // are using. | |||
31316 | Mask[i] = OpMask[OpIdx]; | |||
31317 | continue; | |||
31318 | } | |||
31319 | ||||
31320 | // Ok, we have non-zero lanes, map them through to one of the Op's inputs. | |||
31321 | unsigned OpMaskedIdx = | |||
31322 | OpRatio == 1 | |||
31323 | ? OpMask[OpIdx] | |||
31324 | : (OpMask[OpIdx] << OpRatioLog2) + (RootMaskedIdx & (OpRatio - 1)); | |||
31325 | ||||
31326 | OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1); | |||
31327 | if (OpMask[OpIdx] < (int)OpMask.size()) { | |||
31328 | assert(0 <= InputIdx0 && "Unknown target shuffle input")((0 <= InputIdx0 && "Unknown target shuffle input" ) ? static_cast<void> (0) : __assert_fail ("0 <= InputIdx0 && \"Unknown target shuffle input\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31328, __PRETTY_FUNCTION__)); | |||
31329 | OpMaskedIdx += InputIdx0 * MaskWidth; | |||
31330 | } else { | |||
31331 | assert(0 <= InputIdx1 && "Unknown target shuffle input")((0 <= InputIdx1 && "Unknown target shuffle input" ) ? static_cast<void> (0) : __assert_fail ("0 <= InputIdx1 && \"Unknown target shuffle input\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31331, __PRETTY_FUNCTION__)); | |||
31332 | OpMaskedIdx += InputIdx1 * MaskWidth; | |||
31333 | } | |||
31334 | ||||
31335 | Mask[i] = OpMaskedIdx; | |||
31336 | } | |||
31337 | ||||
31338 | // Handle the all undef/zero cases early. | |||
31339 | if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; })) | |||
31340 | return DAG.getUNDEF(Root.getValueType()); | |||
31341 | ||||
31342 | // TODO - should we handle the mixed zero/undef case as well? Just returning | |||
31343 | // a zero mask will lose information on undef elements possibly reducing | |||
31344 | // future combine possibilities. | |||
31345 | if (all_of(Mask, [](int Idx) { return Idx < 0; })) | |||
31346 | return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, | |||
31347 | SDLoc(Root)); | |||
31348 | ||||
31349 | // Remove unused shuffle source ops. | |||
31350 | resolveTargetShuffleInputsAndMask(Ops, Mask); | |||
31351 | assert(!Ops.empty() && "Shuffle with no inputs detected")((!Ops.empty() && "Shuffle with no inputs detected") ? static_cast<void> (0) : __assert_fail ("!Ops.empty() && \"Shuffle with no inputs detected\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31351, __PRETTY_FUNCTION__)); | |||
31352 | ||||
31353 | HasVariableMask |= isTargetShuffleVariableMask(Op.getOpcode()); | |||
31354 | ||||
31355 | // Update the list of shuffle nodes that have been combined so far. | |||
31356 | SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(), | |||
31357 | SrcNodes.end()); | |||
31358 | CombinedNodes.push_back(Op.getNode()); | |||
31359 | ||||
31360 | // See if we can recurse into each shuffle source op (if it's a target | |||
31361 | // shuffle). The source op should only be generally combined if it either has | |||
31362 | // a single use (i.e. current Op) or all its users have already been combined, | |||
31363 | // if not then we can still combine but should prevent generation of variable | |||
31364 | // shuffles to avoid constant pool bloat. | |||
31365 | // Don't recurse if we already have more source ops than we can combine in | |||
31366 | // the remaining recursion depth. | |||
31367 | if (Ops.size() < (MaxRecursionDepth - Depth)) { | |||
31368 | for (int i = 0, e = Ops.size(); i < e; ++i) { | |||
31369 | bool AllowVar = false; | |||
31370 | if (Ops[i].getNode()->hasOneUse() || | |||
31371 | SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode())) | |||
31372 | AllowVar = AllowVariableMask; | |||
31373 | if (SDValue Res = combineX86ShufflesRecursively( | |||
31374 | Ops, i, Root, Mask, CombinedNodes, Depth + 1, HasVariableMask, | |||
31375 | AllowVar, DAG, Subtarget)) | |||
31376 | return Res; | |||
31377 | } | |||
31378 | } | |||
31379 | ||||
31380 | // Attempt to constant fold all of the constant source ops. | |||
31381 | if (SDValue Cst = combineX86ShufflesConstants( | |||
31382 | Ops, Mask, Root, HasVariableMask, DAG, Subtarget)) | |||
31383 | return Cst; | |||
31384 | ||||
31385 | // We can only combine unary and binary shuffle mask cases. | |||
31386 | if (Ops.size() > 2) | |||
31387 | return SDValue(); | |||
31388 | ||||
31389 | // Minor canonicalization of the accumulated shuffle mask to make it easier | |||
31390 | // to match below. All this does is detect masks with sequential pairs of | |||
31391 | // elements, and shrink them to the half-width mask. It does this in a loop | |||
31392 | // so it will reduce the size of the mask to the minimal width mask which | |||
31393 | // performs an equivalent shuffle. | |||
31394 | SmallVector<int, 64> WidenedMask; | |||
31395 | while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) { | |||
31396 | Mask = std::move(WidenedMask); | |||
31397 | } | |||
31398 | ||||
31399 | // Canonicalization of binary shuffle masks to improve pattern matching by | |||
31400 | // commuting the inputs. | |||
31401 | if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) { | |||
31402 | ShuffleVectorSDNode::commuteMask(Mask); | |||
31403 | std::swap(Ops[0], Ops[1]); | |||
31404 | } | |||
31405 | ||||
31406 | // Finally, try to combine into a single shuffle instruction. | |||
31407 | return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask, | |||
31408 | AllowVariableMask, DAG, Subtarget); | |||
31409 | } | |||
31410 | ||||
31411 | /// Get the PSHUF-style mask from PSHUF node. | |||
31412 | /// | |||
31413 | /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4 | |||
31414 | /// PSHUF-style masks that can be reused with such instructions. | |||
31415 | static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) { | |||
31416 | MVT VT = N.getSimpleValueType(); | |||
31417 | SmallVector<int, 4> Mask; | |||
31418 | SmallVector<SDValue, 2> Ops; | |||
31419 | bool IsUnary; | |||
31420 | bool HaveMask = | |||
31421 | getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary); | |||
31422 | (void)HaveMask; | |||
31423 | assert(HaveMask)((HaveMask) ? static_cast<void> (0) : __assert_fail ("HaveMask" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31423, __PRETTY_FUNCTION__)); | |||
31424 | ||||
31425 | // If we have more than 128-bits, only the low 128-bits of shuffle mask | |||
31426 | // matter. Check that the upper masks are repeats and remove them. | |||
31427 | if (VT.getSizeInBits() > 128) { | |||
31428 | int LaneElts = 128 / VT.getScalarSizeInBits(); | |||
31429 | #ifndef NDEBUG | |||
31430 | for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i) | |||
31431 | for (int j = 0; j < LaneElts; ++j) | |||
31432 | assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&((Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) && "Mask doesn't repeat in high 128-bit lanes!") ? static_cast< void> (0) : __assert_fail ("Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) && \"Mask doesn't repeat in high 128-bit lanes!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31433, __PRETTY_FUNCTION__)) | |||
31433 | "Mask doesn't repeat in high 128-bit lanes!")((Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) && "Mask doesn't repeat in high 128-bit lanes!") ? static_cast< void> (0) : __assert_fail ("Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) && \"Mask doesn't repeat in high 128-bit lanes!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31433, __PRETTY_FUNCTION__)); | |||
31434 | #endif | |||
31435 | Mask.resize(LaneElts); | |||
31436 | } | |||
31437 | ||||
31438 | switch (N.getOpcode()) { | |||
31439 | case X86ISD::PSHUFD: | |||
31440 | return Mask; | |||
31441 | case X86ISD::PSHUFLW: | |||
31442 | Mask.resize(4); | |||
31443 | return Mask; | |||
31444 | case X86ISD::PSHUFHW: | |||
31445 | Mask.erase(Mask.begin(), Mask.begin() + 4); | |||
31446 | for (int &M : Mask) | |||
31447 | M -= 4; | |||
31448 | return Mask; | |||
31449 | default: | |||
31450 | llvm_unreachable("No valid shuffle instruction found!")::llvm::llvm_unreachable_internal("No valid shuffle instruction found!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31450); | |||
31451 | } | |||
31452 | } | |||
31453 | ||||
31454 | /// Search for a combinable shuffle across a chain ending in pshufd. | |||
31455 | /// | |||
31456 | /// We walk up the chain and look for a combinable shuffle, skipping over | |||
31457 | /// shuffles that we could hoist this shuffle's transformation past without | |||
31458 | /// altering anything. | |||
31459 | static SDValue | |||
31460 | combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask, | |||
31461 | SelectionDAG &DAG) { | |||
31462 | assert(N.getOpcode() == X86ISD::PSHUFD &&((N.getOpcode() == X86ISD::PSHUFD && "Called with something other than an x86 128-bit half shuffle!" ) ? static_cast<void> (0) : __assert_fail ("N.getOpcode() == X86ISD::PSHUFD && \"Called with something other than an x86 128-bit half shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31463, __PRETTY_FUNCTION__)) | |||
31463 | "Called with something other than an x86 128-bit half shuffle!")((N.getOpcode() == X86ISD::PSHUFD && "Called with something other than an x86 128-bit half shuffle!" ) ? static_cast<void> (0) : __assert_fail ("N.getOpcode() == X86ISD::PSHUFD && \"Called with something other than an x86 128-bit half shuffle!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31463, __PRETTY_FUNCTION__)); | |||
31464 | SDLoc DL(N); | |||
31465 | ||||
31466 | // Walk up a single-use chain looking for a combinable shuffle. Keep a stack | |||
31467 | // of the shuffles in the chain so that we can form a fresh chain to replace | |||
31468 | // this one. | |||
31469 | SmallVector<SDValue, 8> Chain; | |||
31470 | SDValue V = N.getOperand(0); | |||
31471 | for (; V.hasOneUse(); V = V.getOperand(0)) { | |||
31472 | switch (V.getOpcode()) { | |||
31473 | default: | |||
31474 | return SDValue(); // Nothing combined! | |||
31475 | ||||
31476 | case ISD::BITCAST: | |||
31477 | // Skip bitcasts as we always know the type for the target specific | |||
31478 | // instructions. | |||
31479 | continue; | |||
31480 | ||||
31481 | case X86ISD::PSHUFD: | |||
31482 | // Found another dword shuffle. | |||
31483 | break; | |||
31484 | ||||
31485 | case X86ISD::PSHUFLW: | |||
31486 | // Check that the low words (being shuffled) are the identity in the | |||
31487 | // dword shuffle, and the high words are self-contained. | |||
31488 | if (Mask[0] != 0 || Mask[1] != 1 || | |||
31489 | !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4)) | |||
31490 | return SDValue(); | |||
31491 | ||||
31492 | Chain.push_back(V); | |||
31493 | continue; | |||
31494 | ||||
31495 | case X86ISD::PSHUFHW: | |||
31496 | // Check that the high words (being shuffled) are the identity in the | |||
31497 | // dword shuffle, and the low words are self-contained. | |||
31498 | if (Mask[2] != 2 || Mask[3] != 3 || | |||
31499 | !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2)) | |||
31500 | return SDValue(); | |||
31501 | ||||
31502 | Chain.push_back(V); | |||
31503 | continue; | |||
31504 | ||||
31505 | case X86ISD::UNPCKL: | |||
31506 | case X86ISD::UNPCKH: | |||
31507 | // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword | |||
31508 | // shuffle into a preceding word shuffle. | |||
31509 | if (V.getSimpleValueType().getVectorElementType() != MVT::i8 && | |||
31510 | V.getSimpleValueType().getVectorElementType() != MVT::i16) | |||
31511 | return SDValue(); | |||
31512 | ||||
31513 | // Search for a half-shuffle which we can combine with. | |||
31514 | unsigned CombineOp = | |||
31515 | V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW; | |||
31516 | if (V.getOperand(0) != V.getOperand(1) || | |||
31517 | !V->isOnlyUserOf(V.getOperand(0).getNode())) | |||
31518 | return SDValue(); | |||
31519 | Chain.push_back(V); | |||
31520 | V = V.getOperand(0); | |||
31521 | do { | |||
31522 | switch (V.getOpcode()) { | |||
31523 | default: | |||
31524 | return SDValue(); // Nothing to combine. | |||
31525 | ||||
31526 | case X86ISD::PSHUFLW: | |||
31527 | case X86ISD::PSHUFHW: | |||
31528 | if (V.getOpcode() == CombineOp) | |||
31529 | break; | |||
31530 | ||||
31531 | Chain.push_back(V); | |||
31532 | ||||
31533 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
31534 | case ISD::BITCAST: | |||
31535 | V = V.getOperand(0); | |||
31536 | continue; | |||
31537 | } | |||
31538 | break; | |||
31539 | } while (V.hasOneUse()); | |||
31540 | break; | |||
31541 | } | |||
31542 | // Break out of the loop if we break out of the switch. | |||
31543 | break; | |||
31544 | } | |||
31545 | ||||
31546 | if (!V.hasOneUse()) | |||
31547 | // We fell out of the loop without finding a viable combining instruction. | |||
31548 | return SDValue(); | |||
31549 | ||||
31550 | // Merge this node's mask and our incoming mask. | |||
31551 | SmallVector<int, 4> VMask = getPSHUFShuffleMask(V); | |||
31552 | for (int &M : Mask) | |||
31553 | M = VMask[M]; | |||
31554 | V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0), | |||
31555 | getV4X86ShuffleImm8ForMask(Mask, DL, DAG)); | |||
31556 | ||||
31557 | // Rebuild the chain around this new shuffle. | |||
31558 | while (!Chain.empty()) { | |||
31559 | SDValue W = Chain.pop_back_val(); | |||
31560 | ||||
31561 | if (V.getValueType() != W.getOperand(0).getValueType()) | |||
31562 | V = DAG.getBitcast(W.getOperand(0).getValueType(), V); | |||
31563 | ||||
31564 | switch (W.getOpcode()) { | |||
31565 | default: | |||
31566 | llvm_unreachable("Only PSHUF and UNPCK instructions get here!")::llvm::llvm_unreachable_internal("Only PSHUF and UNPCK instructions get here!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31566); | |||
31567 | ||||
31568 | case X86ISD::UNPCKL: | |||
31569 | case X86ISD::UNPCKH: | |||
31570 | V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V); | |||
31571 | break; | |||
31572 | ||||
31573 | case X86ISD::PSHUFD: | |||
31574 | case X86ISD::PSHUFLW: | |||
31575 | case X86ISD::PSHUFHW: | |||
31576 | V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1)); | |||
31577 | break; | |||
31578 | } | |||
31579 | } | |||
31580 | if (V.getValueType() != N.getValueType()) | |||
31581 | V = DAG.getBitcast(N.getValueType(), V); | |||
31582 | ||||
31583 | // Return the new chain to replace N. | |||
31584 | return V; | |||
31585 | } | |||
31586 | ||||
31587 | /// Try to combine x86 target specific shuffles. | |||
31588 | static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG, | |||
31589 | TargetLowering::DAGCombinerInfo &DCI, | |||
31590 | const X86Subtarget &Subtarget) { | |||
31591 | SDLoc DL(N); | |||
31592 | MVT VT = N.getSimpleValueType(); | |||
31593 | SmallVector<int, 4> Mask; | |||
31594 | unsigned Opcode = N.getOpcode(); | |||
31595 | ||||
31596 | // Combine binary shuffle of 2 similar 'Horizontal' instructions into a | |||
31597 | // single instruction. | |||
31598 | if (VT.getScalarSizeInBits() == 64 && | |||
31599 | (Opcode == X86ISD::MOVSD || Opcode == X86ISD::UNPCKH || | |||
31600 | Opcode == X86ISD::UNPCKL)) { | |||
31601 | auto BC0 = peekThroughBitcasts(N.getOperand(0)); | |||
31602 | auto BC1 = peekThroughBitcasts(N.getOperand(1)); | |||
31603 | EVT VT0 = BC0.getValueType(); | |||
31604 | EVT VT1 = BC1.getValueType(); | |||
31605 | unsigned Opcode0 = BC0.getOpcode(); | |||
31606 | unsigned Opcode1 = BC1.getOpcode(); | |||
31607 | if (Opcode0 == Opcode1 && VT0 == VT1 && | |||
31608 | (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD || | |||
31609 | Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB || | |||
31610 | Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) { | |||
31611 | SDValue Lo, Hi; | |||
31612 | if (Opcode == X86ISD::MOVSD) { | |||
31613 | Lo = BC1.getOperand(0); | |||
31614 | Hi = BC0.getOperand(1); | |||
31615 | } else { | |||
31616 | Lo = BC0.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0); | |||
31617 | Hi = BC1.getOperand(Opcode == X86ISD::UNPCKH ? 1 : 0); | |||
31618 | } | |||
31619 | SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi); | |||
31620 | return DAG.getBitcast(VT, Horiz); | |||
31621 | } | |||
31622 | } | |||
31623 | ||||
31624 | switch (Opcode) { | |||
31625 | case X86ISD::VBROADCAST: { | |||
31626 | // If broadcasting from another shuffle, attempt to simplify it. | |||
31627 | // TODO - we really need a general SimplifyDemandedVectorElts mechanism. | |||
31628 | SDValue Src = N.getOperand(0); | |||
31629 | SDValue BC = peekThroughBitcasts(Src); | |||
31630 | EVT SrcVT = Src.getValueType(); | |||
31631 | EVT BCVT = BC.getValueType(); | |||
31632 | if (isTargetShuffle(BC.getOpcode()) && | |||
31633 | VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) { | |||
31634 | unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits(); | |||
31635 | SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(), | |||
31636 | SM_SentinelUndef); | |||
31637 | for (unsigned i = 0; i != Scale; ++i) | |||
31638 | DemandedMask[i] = i; | |||
31639 | if (SDValue Res = combineX86ShufflesRecursively( | |||
31640 | {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 1, | |||
31641 | /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget)) | |||
31642 | return DAG.getNode(X86ISD::VBROADCAST, DL, VT, | |||
31643 | DAG.getBitcast(SrcVT, Res)); | |||
31644 | } | |||
31645 | return SDValue(); | |||
31646 | } | |||
31647 | case X86ISD::PSHUFD: | |||
31648 | case X86ISD::PSHUFLW: | |||
31649 | case X86ISD::PSHUFHW: | |||
31650 | Mask = getPSHUFShuffleMask(N); | |||
31651 | assert(Mask.size() == 4)((Mask.size() == 4) ? static_cast<void> (0) : __assert_fail ("Mask.size() == 4", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31651, __PRETTY_FUNCTION__)); | |||
31652 | break; | |||
31653 | case X86ISD::MOVSD: | |||
31654 | case X86ISD::MOVSS: { | |||
31655 | SDValue N0 = N.getOperand(0); | |||
31656 | SDValue N1 = N.getOperand(1); | |||
31657 | ||||
31658 | // Canonicalize scalar FPOps: | |||
31659 | // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0]))) | |||
31660 | // If commutable, allow OP(N1[0], N0[0]). | |||
31661 | unsigned Opcode1 = N1.getOpcode(); | |||
31662 | if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB || | |||
31663 | Opcode1 == ISD::FDIV) { | |||
31664 | SDValue N10 = N1.getOperand(0); | |||
31665 | SDValue N11 = N1.getOperand(1); | |||
31666 | if (N10 == N0 || | |||
31667 | (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) { | |||
31668 | if (N10 != N0) | |||
31669 | std::swap(N10, N11); | |||
31670 | MVT SVT = VT.getVectorElementType(); | |||
31671 | SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL); | |||
31672 | N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx); | |||
31673 | N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx); | |||
31674 | SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11); | |||
31675 | SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl); | |||
31676 | return DAG.getNode(Opcode, DL, VT, N0, SclVec); | |||
31677 | } | |||
31678 | } | |||
31679 | ||||
31680 | return SDValue(); | |||
31681 | } | |||
31682 | case X86ISD::INSERTPS: { | |||
31683 | assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32")((VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32" ) ? static_cast<void> (0) : __assert_fail ("VT == MVT::v4f32 && \"INSERTPS ValueType must be MVT::v4f32\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31683, __PRETTY_FUNCTION__)); | |||
31684 | SDValue Op0 = N.getOperand(0); | |||
31685 | SDValue Op1 = N.getOperand(1); | |||
31686 | SDValue Op2 = N.getOperand(2); | |||
31687 | unsigned InsertPSMask = cast<ConstantSDNode>(Op2)->getZExtValue(); | |||
31688 | unsigned SrcIdx = (InsertPSMask >> 6) & 0x3; | |||
31689 | unsigned DstIdx = (InsertPSMask >> 4) & 0x3; | |||
31690 | unsigned ZeroMask = InsertPSMask & 0xF; | |||
31691 | ||||
31692 | // If we zero out all elements from Op0 then we don't need to reference it. | |||
31693 | if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef()) | |||
31694 | return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1, | |||
31695 | DAG.getConstant(InsertPSMask, DL, MVT::i8)); | |||
31696 | ||||
31697 | // If we zero out the element from Op1 then we don't need to reference it. | |||
31698 | if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef()) | |||
31699 | return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT), | |||
31700 | DAG.getConstant(InsertPSMask, DL, MVT::i8)); | |||
31701 | ||||
31702 | // Attempt to merge insertps Op1 with an inner target shuffle node. | |||
31703 | SmallVector<int, 8> TargetMask1; | |||
31704 | SmallVector<SDValue, 2> Ops1; | |||
31705 | if (setTargetShuffleZeroElements(Op1, TargetMask1, Ops1)) { | |||
31706 | int M = TargetMask1[SrcIdx]; | |||
31707 | if (isUndefOrZero(M)) { | |||
31708 | // Zero/UNDEF insertion - zero out element and remove dependency. | |||
31709 | InsertPSMask |= (1u << DstIdx); | |||
31710 | return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT), | |||
31711 | DAG.getConstant(InsertPSMask, DL, MVT::i8)); | |||
31712 | } | |||
31713 | // Update insertps mask srcidx and reference the source input directly. | |||
31714 | assert(0 <= M && M < 8 && "Shuffle index out of range")((0 <= M && M < 8 && "Shuffle index out of range" ) ? static_cast<void> (0) : __assert_fail ("0 <= M && M < 8 && \"Shuffle index out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31714, __PRETTY_FUNCTION__)); | |||
31715 | InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6); | |||
31716 | Op1 = Ops1[M < 4 ? 0 : 1]; | |||
31717 | return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1, | |||
31718 | DAG.getConstant(InsertPSMask, DL, MVT::i8)); | |||
31719 | } | |||
31720 | ||||
31721 | // Attempt to merge insertps Op0 with an inner target shuffle node. | |||
31722 | SmallVector<int, 8> TargetMask0; | |||
31723 | SmallVector<SDValue, 2> Ops0; | |||
31724 | if (!setTargetShuffleZeroElements(Op0, TargetMask0, Ops0)) | |||
31725 | return SDValue(); | |||
31726 | ||||
31727 | bool Updated = false; | |||
31728 | bool UseInput00 = false; | |||
31729 | bool UseInput01 = false; | |||
31730 | for (int i = 0; i != 4; ++i) { | |||
31731 | int M = TargetMask0[i]; | |||
31732 | if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) { | |||
31733 | // No change if element is already zero or the inserted element. | |||
31734 | continue; | |||
31735 | } else if (isUndefOrZero(M)) { | |||
31736 | // If the target mask is undef/zero then we must zero the element. | |||
31737 | InsertPSMask |= (1u << i); | |||
31738 | Updated = true; | |||
31739 | continue; | |||
31740 | } | |||
31741 | ||||
31742 | // The input vector element must be inline. | |||
31743 | if (M != i && M != (i + 4)) | |||
31744 | return SDValue(); | |||
31745 | ||||
31746 | // Determine which inputs of the target shuffle we're using. | |||
31747 | UseInput00 |= (0 <= M && M < 4); | |||
31748 | UseInput01 |= (4 <= M); | |||
31749 | } | |||
31750 | ||||
31751 | // If we're not using both inputs of the target shuffle then use the | |||
31752 | // referenced input directly. | |||
31753 | if (UseInput00 && !UseInput01) { | |||
31754 | Updated = true; | |||
31755 | Op0 = Ops0[0]; | |||
31756 | } else if (!UseInput00 && UseInput01) { | |||
31757 | Updated = true; | |||
31758 | Op0 = Ops0[1]; | |||
31759 | } | |||
31760 | ||||
31761 | if (Updated) | |||
31762 | return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1, | |||
31763 | DAG.getConstant(InsertPSMask, DL, MVT::i8)); | |||
31764 | ||||
31765 | return SDValue(); | |||
31766 | } | |||
31767 | default: | |||
31768 | return SDValue(); | |||
31769 | } | |||
31770 | ||||
31771 | // Nuke no-op shuffles that show up after combining. | |||
31772 | if (isNoopShuffleMask(Mask)) | |||
31773 | return N.getOperand(0); | |||
31774 | ||||
31775 | // Look for simplifications involving one or two shuffle instructions. | |||
31776 | SDValue V = N.getOperand(0); | |||
31777 | switch (N.getOpcode()) { | |||
31778 | default: | |||
31779 | break; | |||
31780 | case X86ISD::PSHUFLW: | |||
31781 | case X86ISD::PSHUFHW: | |||
31782 | assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!")((VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!" ) ? static_cast<void> (0) : __assert_fail ("VT.getVectorElementType() == MVT::i16 && \"Bad word shuffle type!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31782, __PRETTY_FUNCTION__)); | |||
31783 | ||||
31784 | // See if this reduces to a PSHUFD which is no more expensive and can | |||
31785 | // combine with more operations. Note that it has to at least flip the | |||
31786 | // dwords as otherwise it would have been removed as a no-op. | |||
31787 | if (makeArrayRef(Mask).equals({2, 3, 0, 1})) { | |||
31788 | int DMask[] = {0, 1, 2, 3}; | |||
31789 | int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2; | |||
31790 | DMask[DOffset + 0] = DOffset + 1; | |||
31791 | DMask[DOffset + 1] = DOffset + 0; | |||
31792 | MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2); | |||
31793 | V = DAG.getBitcast(DVT, V); | |||
31794 | V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V, | |||
31795 | getV4X86ShuffleImm8ForMask(DMask, DL, DAG)); | |||
31796 | return DAG.getBitcast(VT, V); | |||
31797 | } | |||
31798 | ||||
31799 | // Look for shuffle patterns which can be implemented as a single unpack. | |||
31800 | // FIXME: This doesn't handle the location of the PSHUFD generically, and | |||
31801 | // only works when we have a PSHUFD followed by two half-shuffles. | |||
31802 | if (Mask[0] == Mask[1] && Mask[2] == Mask[3] && | |||
31803 | (V.getOpcode() == X86ISD::PSHUFLW || | |||
31804 | V.getOpcode() == X86ISD::PSHUFHW) && | |||
31805 | V.getOpcode() != N.getOpcode() && | |||
31806 | V.hasOneUse()) { | |||
31807 | SDValue D = peekThroughOneUseBitcasts(V.getOperand(0)); | |||
31808 | if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) { | |||
31809 | SmallVector<int, 4> VMask = getPSHUFShuffleMask(V); | |||
31810 | SmallVector<int, 4> DMask = getPSHUFShuffleMask(D); | |||
31811 | int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4; | |||
31812 | int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4; | |||
31813 | int WordMask[8]; | |||
31814 | for (int i = 0; i < 4; ++i) { | |||
31815 | WordMask[i + NOffset] = Mask[i] + NOffset; | |||
31816 | WordMask[i + VOffset] = VMask[i] + VOffset; | |||
31817 | } | |||
31818 | // Map the word mask through the DWord mask. | |||
31819 | int MappedMask[8]; | |||
31820 | for (int i = 0; i < 8; ++i) | |||
31821 | MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2; | |||
31822 | if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) || | |||
31823 | makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) { | |||
31824 | // We can replace all three shuffles with an unpack. | |||
31825 | V = DAG.getBitcast(VT, D.getOperand(0)); | |||
31826 | return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL | |||
31827 | : X86ISD::UNPCKH, | |||
31828 | DL, VT, V, V); | |||
31829 | } | |||
31830 | } | |||
31831 | } | |||
31832 | ||||
31833 | break; | |||
31834 | ||||
31835 | case X86ISD::PSHUFD: | |||
31836 | if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG)) | |||
31837 | return NewN; | |||
31838 | ||||
31839 | break; | |||
31840 | } | |||
31841 | ||||
31842 | return SDValue(); | |||
31843 | } | |||
31844 | ||||
31845 | /// Checks if the shuffle mask takes subsequent elements | |||
31846 | /// alternately from two vectors. | |||
31847 | /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct. | |||
31848 | static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) { | |||
31849 | ||||
31850 | int ParitySrc[2] = {-1, -1}; | |||
31851 | unsigned Size = Mask.size(); | |||
31852 | for (unsigned i = 0; i != Size; ++i) { | |||
31853 | int M = Mask[i]; | |||
31854 | if (M < 0) | |||
31855 | continue; | |||
31856 | ||||
31857 | // Make sure we are using the matching element from the input. | |||
31858 | if ((M % Size) != i) | |||
31859 | return false; | |||
31860 | ||||
31861 | // Make sure we use the same input for all elements of the same parity. | |||
31862 | int Src = M / Size; | |||
31863 | if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src) | |||
31864 | return false; | |||
31865 | ParitySrc[i % 2] = Src; | |||
31866 | } | |||
31867 | ||||
31868 | // Make sure each input is used. | |||
31869 | if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1]) | |||
31870 | return false; | |||
31871 | ||||
31872 | Op0Even = ParitySrc[0] == 0; | |||
31873 | return true; | |||
31874 | } | |||
31875 | ||||
31876 | /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD) | |||
31877 | /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation | |||
31878 | /// are written to the parameters \p Opnd0 and \p Opnd1. | |||
31879 | /// | |||
31880 | /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes | |||
31881 | /// so it is easier to generically match. We also insert dummy vector shuffle | |||
31882 | /// nodes for the operands which explicitly discard the lanes which are unused | |||
31883 | /// by this operation to try to flow through the rest of the combiner | |||
31884 | /// the fact that they're unused. | |||
31885 | static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget, | |||
31886 | SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1, | |||
31887 | bool &IsSubAdd) { | |||
31888 | ||||
31889 | EVT VT = N->getValueType(0); | |||
31890 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
31891 | if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) || | |||
31892 | !VT.getSimpleVT().isFloatingPoint()) | |||
31893 | return false; | |||
31894 | ||||
31895 | // We only handle target-independent shuffles. | |||
31896 | // FIXME: It would be easy and harmless to use the target shuffle mask | |||
31897 | // extraction tool to support more. | |||
31898 | if (N->getOpcode() != ISD::VECTOR_SHUFFLE) | |||
31899 | return false; | |||
31900 | ||||
31901 | SDValue V1 = N->getOperand(0); | |||
31902 | SDValue V2 = N->getOperand(1); | |||
31903 | ||||
31904 | // Make sure we have an FADD and an FSUB. | |||
31905 | if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) || | |||
31906 | (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) || | |||
31907 | V1.getOpcode() == V2.getOpcode()) | |||
31908 | return false; | |||
31909 | ||||
31910 | // If there are other uses of these operations we can't fold them. | |||
31911 | if (!V1->hasOneUse() || !V2->hasOneUse()) | |||
31912 | return false; | |||
31913 | ||||
31914 | // Ensure that both operations have the same operands. Note that we can | |||
31915 | // commute the FADD operands. | |||
31916 | SDValue LHS, RHS; | |||
31917 | if (V1.getOpcode() == ISD::FSUB) { | |||
31918 | LHS = V1->getOperand(0); RHS = V1->getOperand(1); | |||
31919 | if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) && | |||
31920 | (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS)) | |||
31921 | return false; | |||
31922 | } else { | |||
31923 | assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode")((V2.getOpcode() == ISD::FSUB && "Unexpected opcode") ? static_cast<void> (0) : __assert_fail ("V2.getOpcode() == ISD::FSUB && \"Unexpected opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 31923, __PRETTY_FUNCTION__)); | |||
31924 | LHS = V2->getOperand(0); RHS = V2->getOperand(1); | |||
31925 | if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) && | |||
31926 | (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS)) | |||
31927 | return false; | |||
31928 | } | |||
31929 | ||||
31930 | ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask(); | |||
31931 | bool Op0Even; | |||
31932 | if (!isAddSubOrSubAddMask(Mask, Op0Even)) | |||
31933 | return false; | |||
31934 | ||||
31935 | // It's a subadd if the vector in the even parity is an FADD. | |||
31936 | IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD | |||
31937 | : V2->getOpcode() == ISD::FADD; | |||
31938 | ||||
31939 | Opnd0 = LHS; | |||
31940 | Opnd1 = RHS; | |||
31941 | return true; | |||
31942 | } | |||
31943 | ||||
31944 | /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd. | |||
31945 | static SDValue combineShuffleToFMAddSub(SDNode *N, | |||
31946 | const X86Subtarget &Subtarget, | |||
31947 | SelectionDAG &DAG) { | |||
31948 | // We only handle target-independent shuffles. | |||
31949 | // FIXME: It would be easy and harmless to use the target shuffle mask | |||
31950 | // extraction tool to support more. | |||
31951 | if (N->getOpcode() != ISD::VECTOR_SHUFFLE) | |||
31952 | return SDValue(); | |||
31953 | ||||
31954 | MVT VT = N->getSimpleValueType(0); | |||
31955 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
31956 | if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT)) | |||
31957 | return SDValue(); | |||
31958 | ||||
31959 | // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c). | |||
31960 | SDValue Op0 = N->getOperand(0); | |||
31961 | SDValue Op1 = N->getOperand(1); | |||
31962 | SDValue FMAdd = Op0, FMSub = Op1; | |||
31963 | if (FMSub.getOpcode() != X86ISD::FMSUB) | |||
31964 | std::swap(FMAdd, FMSub); | |||
31965 | ||||
31966 | if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB || | |||
31967 | FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() || | |||
31968 | FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() || | |||
31969 | FMAdd.getOperand(2) != FMSub.getOperand(2)) | |||
31970 | return SDValue(); | |||
31971 | ||||
31972 | // Check for correct shuffle mask. | |||
31973 | ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask(); | |||
31974 | bool Op0Even; | |||
31975 | if (!isAddSubOrSubAddMask(Mask, Op0Even)) | |||
31976 | return SDValue(); | |||
31977 | ||||
31978 | // FMAddSub takes zeroth operand from FMSub node. | |||
31979 | SDLoc DL(N); | |||
31980 | bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd; | |||
31981 | unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB; | |||
31982 | return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1), | |||
31983 | FMAdd.getOperand(2)); | |||
31984 | } | |||
31985 | ||||
31986 | /// Try to combine a shuffle into a target-specific add-sub or | |||
31987 | /// mul-add-sub node. | |||
31988 | static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N, | |||
31989 | const X86Subtarget &Subtarget, | |||
31990 | SelectionDAG &DAG) { | |||
31991 | if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG)) | |||
31992 | return V; | |||
31993 | ||||
31994 | SDValue Opnd0, Opnd1; | |||
31995 | bool IsSubAdd; | |||
31996 | if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd)) | |||
31997 | return SDValue(); | |||
31998 | ||||
31999 | MVT VT = N->getSimpleValueType(0); | |||
32000 | SDLoc DL(N); | |||
32001 | ||||
32002 | // Try to generate X86ISD::FMADDSUB node here. | |||
32003 | SDValue Opnd2; | |||
32004 | if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) { | |||
32005 | unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB; | |||
32006 | return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2); | |||
32007 | } | |||
32008 | ||||
32009 | if (IsSubAdd) | |||
32010 | return SDValue(); | |||
32011 | ||||
32012 | // Do not generate X86ISD::ADDSUB node for 512-bit types even though | |||
32013 | // the ADDSUB idiom has been successfully recognized. There are no known | |||
32014 | // X86 targets with 512-bit ADDSUB instructions! | |||
32015 | if (VT.is512BitVector()) | |||
32016 | return SDValue(); | |||
32017 | ||||
32018 | return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1); | |||
32019 | } | |||
32020 | ||||
32021 | // We are looking for a shuffle where both sources are concatenated with undef | |||
32022 | // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so | |||
32023 | // if we can express this as a single-source shuffle, that's preferable. | |||
32024 | static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG, | |||
32025 | const X86Subtarget &Subtarget) { | |||
32026 | if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N)) | |||
32027 | return SDValue(); | |||
32028 | ||||
32029 | EVT VT = N->getValueType(0); | |||
32030 | ||||
32031 | // We only care about shuffles of 128/256-bit vectors of 32/64-bit values. | |||
32032 | if (!VT.is128BitVector() && !VT.is256BitVector()) | |||
32033 | return SDValue(); | |||
32034 | ||||
32035 | if (VT.getVectorElementType() != MVT::i32 && | |||
32036 | VT.getVectorElementType() != MVT::i64 && | |||
32037 | VT.getVectorElementType() != MVT::f32 && | |||
32038 | VT.getVectorElementType() != MVT::f64) | |||
32039 | return SDValue(); | |||
32040 | ||||
32041 | SDValue N0 = N->getOperand(0); | |||
32042 | SDValue N1 = N->getOperand(1); | |||
32043 | ||||
32044 | // Check that both sources are concats with undef. | |||
32045 | if (N0.getOpcode() != ISD::CONCAT_VECTORS || | |||
32046 | N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 || | |||
32047 | N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() || | |||
32048 | !N1.getOperand(1).isUndef()) | |||
32049 | return SDValue(); | |||
32050 | ||||
32051 | // Construct the new shuffle mask. Elements from the first source retain their | |||
32052 | // index, but elements from the second source no longer need to skip an undef. | |||
32053 | SmallVector<int, 8> Mask; | |||
32054 | int NumElts = VT.getVectorNumElements(); | |||
32055 | ||||
32056 | ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); | |||
32057 | for (int Elt : SVOp->getMask()) | |||
32058 | Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2)); | |||
32059 | ||||
32060 | SDLoc DL(N); | |||
32061 | SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0), | |||
32062 | N1.getOperand(0)); | |||
32063 | return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask); | |||
32064 | } | |||
32065 | ||||
32066 | /// Eliminate a redundant shuffle of a horizontal math op. | |||
32067 | static SDValue foldShuffleOfHorizOp(SDNode *N) { | |||
32068 | if (N->getOpcode() != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef()) | |||
32069 | return SDValue(); | |||
32070 | ||||
32071 | SDValue HOp = N->getOperand(0); | |||
32072 | if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD && | |||
32073 | HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB) | |||
32074 | return SDValue(); | |||
32075 | ||||
32076 | // 128-bit horizontal math instructions are defined to operate on adjacent | |||
32077 | // lanes of each operand as: | |||
32078 | // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3] | |||
32079 | // ...similarly for v2f64 and v8i16. | |||
32080 | // TODO: Handle UNDEF operands. | |||
32081 | if (HOp.getOperand(0) != HOp.getOperand(1)) | |||
32082 | return SDValue(); | |||
32083 | ||||
32084 | // When the operands of a horizontal math op are identical, the low half of | |||
32085 | // the result is the same as the high half. If the shuffle is also replicating | |||
32086 | // low and high halves, we don't need the shuffle. | |||
32087 | // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X | |||
32088 | ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask(); | |||
32089 | // TODO: Other mask possibilities like {1,1} and {1,0} could be added here, | |||
32090 | // but this should be tied to whatever horizontal op matching and shuffle | |||
32091 | // canonicalization are producing. | |||
32092 | if (HOp.getValueSizeInBits() == 128 && | |||
32093 | (isTargetShuffleEquivalent(Mask, {0, 0}) || | |||
32094 | isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) || | |||
32095 | isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3}))) | |||
32096 | return HOp; | |||
32097 | ||||
32098 | if (HOp.getValueSizeInBits() == 256 && | |||
32099 | (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) || | |||
32100 | isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) || | |||
32101 | isTargetShuffleEquivalent( | |||
32102 | Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11}))) | |||
32103 | return HOp; | |||
32104 | ||||
32105 | return SDValue(); | |||
32106 | } | |||
32107 | ||||
32108 | static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG, | |||
32109 | TargetLowering::DAGCombinerInfo &DCI, | |||
32110 | const X86Subtarget &Subtarget) { | |||
32111 | SDLoc dl(N); | |||
32112 | EVT VT = N->getValueType(0); | |||
32113 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
32114 | // If we have legalized the vector types, look for blends of FADD and FSUB | |||
32115 | // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node. | |||
32116 | if (TLI.isTypeLegal(VT)) { | |||
32117 | if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG)) | |||
32118 | return AddSub; | |||
32119 | ||||
32120 | if (SDValue HAddSub = foldShuffleOfHorizOp(N)) | |||
32121 | return HAddSub; | |||
32122 | } | |||
32123 | ||||
32124 | // During Type Legalization, when promoting illegal vector types, | |||
32125 | // the backend might introduce new shuffle dag nodes and bitcasts. | |||
32126 | // | |||
32127 | // This code performs the following transformation: | |||
32128 | // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) -> | |||
32129 | // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>) | |||
32130 | // | |||
32131 | // We do this only if both the bitcast and the BINOP dag nodes have | |||
32132 | // one use. Also, perform this transformation only if the new binary | |||
32133 | // operation is legal. This is to avoid introducing dag nodes that | |||
32134 | // potentially need to be further expanded (or custom lowered) into a | |||
32135 | // less optimal sequence of dag nodes. | |||
32136 | if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() && | |||
32137 | N->getOpcode() == ISD::VECTOR_SHUFFLE && | |||
32138 | N->getOperand(0).getOpcode() == ISD::BITCAST && | |||
32139 | N->getOperand(1).isUndef() && N->getOperand(0).hasOneUse()) { | |||
32140 | SDValue N0 = N->getOperand(0); | |||
32141 | SDValue N1 = N->getOperand(1); | |||
32142 | ||||
32143 | SDValue BC0 = N0.getOperand(0); | |||
32144 | EVT SVT = BC0.getValueType(); | |||
32145 | unsigned Opcode = BC0.getOpcode(); | |||
32146 | unsigned NumElts = VT.getVectorNumElements(); | |||
32147 | ||||
32148 | if (BC0.hasOneUse() && SVT.isVector() && | |||
32149 | SVT.getVectorNumElements() * 2 == NumElts && | |||
32150 | TLI.isOperationLegal(Opcode, VT)) { | |||
32151 | bool CanFold = false; | |||
32152 | switch (Opcode) { | |||
32153 | default : break; | |||
32154 | case ISD::ADD: | |||
32155 | case ISD::SUB: | |||
32156 | case ISD::MUL: | |||
32157 | // isOperationLegal lies for integer ops on floating point types. | |||
32158 | CanFold = VT.isInteger(); | |||
32159 | break; | |||
32160 | case ISD::FADD: | |||
32161 | case ISD::FSUB: | |||
32162 | case ISD::FMUL: | |||
32163 | // isOperationLegal lies for floating point ops on integer types. | |||
32164 | CanFold = VT.isFloatingPoint(); | |||
32165 | break; | |||
32166 | } | |||
32167 | ||||
32168 | unsigned SVTNumElts = SVT.getVectorNumElements(); | |||
32169 | ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); | |||
32170 | for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i) | |||
32171 | CanFold = SVOp->getMaskElt(i) == (int)(i * 2); | |||
32172 | for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i) | |||
32173 | CanFold = SVOp->getMaskElt(i) < 0; | |||
32174 | ||||
32175 | if (CanFold) { | |||
32176 | SDValue BC00 = DAG.getBitcast(VT, BC0.getOperand(0)); | |||
32177 | SDValue BC01 = DAG.getBitcast(VT, BC0.getOperand(1)); | |||
32178 | SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01); | |||
32179 | return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, SVOp->getMask()); | |||
32180 | } | |||
32181 | } | |||
32182 | } | |||
32183 | ||||
32184 | // Combine a vector_shuffle that is equal to build_vector load1, load2, load3, | |||
32185 | // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are | |||
32186 | // consecutive, non-overlapping, and in the right order. | |||
32187 | SmallVector<SDValue, 16> Elts; | |||
32188 | for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) { | |||
32189 | if (SDValue Elt = getShuffleScalarElt(N, i, DAG, 0)) { | |||
32190 | Elts.push_back(Elt); | |||
32191 | continue; | |||
32192 | } | |||
32193 | Elts.clear(); | |||
32194 | break; | |||
32195 | } | |||
32196 | ||||
32197 | if (Elts.size() == VT.getVectorNumElements()) | |||
32198 | if (SDValue LD = | |||
32199 | EltsFromConsecutiveLoads(VT, Elts, dl, DAG, Subtarget, true)) | |||
32200 | return LD; | |||
32201 | ||||
32202 | // For AVX2, we sometimes want to combine | |||
32203 | // (vector_shuffle <mask> (concat_vectors t1, undef) | |||
32204 | // (concat_vectors t2, undef)) | |||
32205 | // Into: | |||
32206 | // (vector_shuffle <mask> (concat_vectors t1, t2), undef) | |||
32207 | // Since the latter can be efficiently lowered with VPERMD/VPERMQ | |||
32208 | if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget)) | |||
32209 | return ShufConcat; | |||
32210 | ||||
32211 | if (isTargetShuffle(N->getOpcode())) { | |||
32212 | SDValue Op(N, 0); | |||
32213 | if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget)) | |||
32214 | return Shuffle; | |||
32215 | ||||
32216 | // Try recursively combining arbitrary sequences of x86 shuffle | |||
32217 | // instructions into higher-order shuffles. We do this after combining | |||
32218 | // specific PSHUF instruction sequences into their minimal form so that we | |||
32219 | // can evaluate how many specialized shuffle instructions are involved in | |||
32220 | // a particular chain. | |||
32221 | if (SDValue Res = combineX86ShufflesRecursively( | |||
32222 | {Op}, 0, Op, {0}, {}, /*Depth*/ 1, | |||
32223 | /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget)) | |||
32224 | return Res; | |||
32225 | ||||
32226 | // Simplify source operands based on shuffle mask. | |||
32227 | // TODO - merge this into combineX86ShufflesRecursively. | |||
32228 | APInt KnownUndef, KnownZero; | |||
32229 | APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); | |||
32230 | if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI)) | |||
32231 | return SDValue(N, 0); | |||
32232 | } | |||
32233 | ||||
32234 | return SDValue(); | |||
32235 | } | |||
32236 | ||||
32237 | bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode( | |||
32238 | SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, | |||
32239 | TargetLoweringOpt &TLO, unsigned Depth) const { | |||
32240 | int NumElts = DemandedElts.getBitWidth(); | |||
32241 | unsigned Opc = Op.getOpcode(); | |||
32242 | EVT VT = Op.getValueType(); | |||
32243 | ||||
32244 | // Handle special case opcodes. | |||
32245 | switch (Opc) { | |||
32246 | case X86ISD::VSHL: | |||
32247 | case X86ISD::VSRL: | |||
32248 | case X86ISD::VSRA: { | |||
32249 | // We only need the bottom 64-bits of the (128-bit) shift amount. | |||
32250 | SDValue Amt = Op.getOperand(1); | |||
32251 | MVT AmtVT = Amt.getSimpleValueType(); | |||
32252 | assert(AmtVT.is128BitVector() && "Unexpected value type")((AmtVT.is128BitVector() && "Unexpected value type") ? static_cast<void> (0) : __assert_fail ("AmtVT.is128BitVector() && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32252, __PRETTY_FUNCTION__)); | |||
32253 | APInt AmtUndef, AmtZero; | |||
32254 | unsigned NumAmtElts = AmtVT.getVectorNumElements(); | |||
32255 | APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2); | |||
32256 | if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO, | |||
32257 | Depth + 1)) | |||
32258 | return true; | |||
32259 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
32260 | } | |||
32261 | case X86ISD::VSHLI: | |||
32262 | case X86ISD::VSRLI: | |||
32263 | case X86ISD::VSRAI: { | |||
32264 | SDValue Src = Op.getOperand(0); | |||
32265 | APInt SrcUndef; | |||
32266 | if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO, | |||
32267 | Depth + 1)) | |||
32268 | return true; | |||
32269 | // TODO convert SrcUndef to KnownUndef. | |||
32270 | break; | |||
32271 | } | |||
32272 | case X86ISD::CVTSI2P: | |||
32273 | case X86ISD::CVTUI2P: { | |||
32274 | SDValue Src = Op.getOperand(0); | |||
32275 | MVT SrcVT = Src.getSimpleValueType(); | |||
32276 | APInt SrcUndef, SrcZero; | |||
32277 | APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements()); | |||
32278 | if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO, | |||
32279 | Depth + 1)) | |||
32280 | return true; | |||
32281 | break; | |||
32282 | } | |||
32283 | case X86ISD::PACKSS: | |||
32284 | case X86ISD::PACKUS: { | |||
32285 | APInt DemandedLHS, DemandedRHS; | |||
32286 | getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS); | |||
32287 | ||||
32288 | APInt SrcUndef, SrcZero; | |||
32289 | if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, SrcUndef, | |||
32290 | SrcZero, TLO, Depth + 1)) | |||
32291 | return true; | |||
32292 | if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, SrcUndef, | |||
32293 | SrcZero, TLO, Depth + 1)) | |||
32294 | return true; | |||
32295 | break; | |||
32296 | } | |||
32297 | case X86ISD::VBROADCAST: { | |||
32298 | SDValue Src = Op.getOperand(0); | |||
32299 | MVT SrcVT = Src.getSimpleValueType(); | |||
32300 | if (!SrcVT.isVector()) | |||
32301 | return false; | |||
32302 | // Don't bother broadcasting if we just need the 0'th element. | |||
32303 | if (DemandedElts == 1) { | |||
32304 | if(Src.getValueType() != VT) | |||
32305 | Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG, | |||
32306 | SDLoc(Op)); | |||
32307 | return TLO.CombineTo(Op, Src); | |||
32308 | } | |||
32309 | APInt SrcUndef, SrcZero; | |||
32310 | APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0); | |||
32311 | if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO, | |||
32312 | Depth + 1)) | |||
32313 | return true; | |||
32314 | break; | |||
32315 | } | |||
32316 | case X86ISD::PSHUFB: { | |||
32317 | // TODO - simplify other variable shuffle masks. | |||
32318 | SDValue Mask = Op.getOperand(1); | |||
32319 | APInt MaskUndef, MaskZero; | |||
32320 | if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO, | |||
32321 | Depth + 1)) | |||
32322 | return true; | |||
32323 | break; | |||
32324 | } | |||
32325 | } | |||
32326 | ||||
32327 | // Simplify target shuffles. | |||
32328 | if (!isTargetShuffle(Opc) || !VT.isSimple()) | |||
32329 | return false; | |||
32330 | ||||
32331 | // Get target shuffle mask. | |||
32332 | bool IsUnary; | |||
32333 | SmallVector<int, 64> OpMask; | |||
32334 | SmallVector<SDValue, 2> OpInputs; | |||
32335 | if (!getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, OpInputs, | |||
32336 | OpMask, IsUnary)) | |||
32337 | return false; | |||
32338 | ||||
32339 | // Shuffle inputs must be the same type as the result. | |||
32340 | if (llvm::any_of(OpInputs, | |||
32341 | [VT](SDValue V) { return VT != V.getValueType(); })) | |||
32342 | return false; | |||
32343 | ||||
32344 | // Clear known elts that might have been set above. | |||
32345 | KnownZero.clearAllBits(); | |||
32346 | KnownUndef.clearAllBits(); | |||
32347 | ||||
32348 | // Check if shuffle mask can be simplified to undef/zero/identity. | |||
32349 | int NumSrcs = OpInputs.size(); | |||
32350 | for (int i = 0; i != NumElts; ++i) { | |||
32351 | int &M = OpMask[i]; | |||
32352 | if (!DemandedElts[i]) | |||
32353 | M = SM_SentinelUndef; | |||
32354 | else if (0 <= M && OpInputs[M / NumElts].isUndef()) | |||
32355 | M = SM_SentinelUndef; | |||
32356 | } | |||
32357 | ||||
32358 | if (isUndefInRange(OpMask, 0, NumElts)) { | |||
32359 | KnownUndef.setAllBits(); | |||
32360 | return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); | |||
32361 | } | |||
32362 | if (isUndefOrZeroInRange(OpMask, 0, NumElts)) { | |||
32363 | KnownZero.setAllBits(); | |||
32364 | return TLO.CombineTo( | |||
32365 | Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op))); | |||
32366 | } | |||
32367 | for (int Src = 0; Src != NumSrcs; ++Src) | |||
32368 | if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts)) | |||
32369 | return TLO.CombineTo(Op, OpInputs[Src]); | |||
32370 | ||||
32371 | // Attempt to simplify inputs. | |||
32372 | for (int Src = 0; Src != NumSrcs; ++Src) { | |||
32373 | int Lo = Src * NumElts; | |||
32374 | APInt SrcElts = APInt::getNullValue(NumElts); | |||
32375 | for (int i = 0; i != NumElts; ++i) | |||
32376 | if (DemandedElts[i]) { | |||
32377 | int M = OpMask[i] - Lo; | |||
32378 | if (0 <= M && M < NumElts) | |||
32379 | SrcElts.setBit(M); | |||
32380 | } | |||
32381 | ||||
32382 | APInt SrcUndef, SrcZero; | |||
32383 | if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero, | |||
32384 | TLO, Depth + 1)) | |||
32385 | return true; | |||
32386 | } | |||
32387 | ||||
32388 | // Extract known zero/undef elements. | |||
32389 | // TODO - Propagate input undef/zero elts. | |||
32390 | for (int i = 0; i != NumElts; ++i) { | |||
32391 | if (OpMask[i] == SM_SentinelUndef) | |||
32392 | KnownUndef.setBit(i); | |||
32393 | if (OpMask[i] == SM_SentinelZero) | |||
32394 | KnownZero.setBit(i); | |||
32395 | } | |||
32396 | ||||
32397 | return false; | |||
32398 | } | |||
32399 | ||||
32400 | bool X86TargetLowering::SimplifyDemandedBitsForTargetNode( | |||
32401 | SDValue Op, const APInt &OriginalDemandedBits, | |||
32402 | const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, | |||
32403 | unsigned Depth) const { | |||
32404 | EVT VT = Op.getValueType(); | |||
32405 | unsigned BitWidth = OriginalDemandedBits.getBitWidth(); | |||
32406 | unsigned Opc = Op.getOpcode(); | |||
32407 | switch(Opc) { | |||
32408 | case X86ISD::PMULDQ: | |||
32409 | case X86ISD::PMULUDQ: { | |||
32410 | // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element. | |||
32411 | KnownBits KnownOp; | |||
32412 | SDValue LHS = Op.getOperand(0); | |||
32413 | SDValue RHS = Op.getOperand(1); | |||
32414 | // FIXME: Can we bound this better? | |||
32415 | APInt DemandedMask = APInt::getLowBitsSet(64, 32); | |||
32416 | if (SimplifyDemandedBits(LHS, DemandedMask, KnownOp, TLO, Depth + 1)) | |||
32417 | return true; | |||
32418 | if (SimplifyDemandedBits(RHS, DemandedMask, KnownOp, TLO, Depth + 1)) | |||
32419 | return true; | |||
32420 | break; | |||
32421 | } | |||
32422 | case X86ISD::VSHLI: { | |||
32423 | if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { | |||
32424 | if (ShiftImm->getAPIntValue().uge(BitWidth)) | |||
32425 | break; | |||
32426 | ||||
32427 | unsigned ShAmt = ShiftImm->getZExtValue(); | |||
32428 | APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt); | |||
32429 | ||||
32430 | if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, | |||
32431 | OriginalDemandedElts, Known, TLO, Depth + 1)) | |||
32432 | return true; | |||
32433 | ||||
32434 | assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?" ) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32434, __PRETTY_FUNCTION__)); | |||
32435 | Known.Zero <<= ShAmt; | |||
32436 | Known.One <<= ShAmt; | |||
32437 | ||||
32438 | // Low bits known zero. | |||
32439 | Known.Zero.setLowBits(ShAmt); | |||
32440 | } | |||
32441 | break; | |||
32442 | } | |||
32443 | case X86ISD::VSRLI: { | |||
32444 | if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { | |||
32445 | if (ShiftImm->getAPIntValue().uge(BitWidth)) | |||
32446 | break; | |||
32447 | ||||
32448 | unsigned ShAmt = ShiftImm->getZExtValue(); | |||
32449 | APInt DemandedMask = OriginalDemandedBits << ShAmt; | |||
32450 | ||||
32451 | if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask, | |||
32452 | OriginalDemandedElts, Known, TLO, Depth + 1)) | |||
32453 | return true; | |||
32454 | ||||
32455 | assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?" ) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32455, __PRETTY_FUNCTION__)); | |||
32456 | Known.Zero.lshrInPlace(ShAmt); | |||
32457 | Known.One.lshrInPlace(ShAmt); | |||
32458 | ||||
32459 | // High bits known zero. | |||
32460 | Known.Zero.setHighBits(ShAmt); | |||
32461 | } | |||
32462 | break; | |||
32463 | } | |||
32464 | case X86ISD::VSRAI: { | |||
32465 | SDValue Op0 = Op.getOperand(0); | |||
32466 | SDValue Op1 = Op.getOperand(1); | |||
32467 | ||||
32468 | if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op1)) { | |||
32469 | if (ShiftImm->getAPIntValue().uge(BitWidth)) | |||
32470 | break; | |||
32471 | ||||
32472 | unsigned ShAmt = ShiftImm->getZExtValue(); | |||
32473 | APInt DemandedMask = OriginalDemandedBits << ShAmt; | |||
32474 | ||||
32475 | // If we just want the sign bit then we don't need to shift it. | |||
32476 | if (OriginalDemandedBits.isSignMask()) | |||
32477 | return TLO.CombineTo(Op, Op0); | |||
32478 | ||||
32479 | // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1 | |||
32480 | if (Op0.getOpcode() == X86ISD::VSHLI && Op1 == Op0.getOperand(1)) { | |||
32481 | SDValue Op00 = Op0.getOperand(0); | |||
32482 | unsigned NumSignBits = | |||
32483 | TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts); | |||
32484 | if (ShAmt < NumSignBits) | |||
32485 | return TLO.CombineTo(Op, Op00); | |||
32486 | } | |||
32487 | ||||
32488 | // If any of the demanded bits are produced by the sign extension, we also | |||
32489 | // demand the input sign bit. | |||
32490 | if (OriginalDemandedBits.countLeadingZeros() < ShAmt) | |||
32491 | DemandedMask.setSignBit(); | |||
32492 | ||||
32493 | if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known, | |||
32494 | TLO, Depth + 1)) | |||
32495 | return true; | |||
32496 | ||||
32497 | assert(!Known.hasConflict() && "Bits known to be one AND zero?")((!Known.hasConflict() && "Bits known to be one AND zero?" ) ? static_cast<void> (0) : __assert_fail ("!Known.hasConflict() && \"Bits known to be one AND zero?\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32497, __PRETTY_FUNCTION__)); | |||
32498 | Known.Zero.lshrInPlace(ShAmt); | |||
32499 | Known.One.lshrInPlace(ShAmt); | |||
32500 | ||||
32501 | // If the input sign bit is known to be zero, or if none of the top bits | |||
32502 | // are demanded, turn this into an unsigned shift right. | |||
32503 | if (Known.Zero[BitWidth - ShAmt - 1] || | |||
32504 | OriginalDemandedBits.countLeadingZeros() >= ShAmt) | |||
32505 | return TLO.CombineTo( | |||
32506 | Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1)); | |||
32507 | ||||
32508 | // High bits are known one. | |||
32509 | if (Known.One[BitWidth - ShAmt - 1]) | |||
32510 | Known.One.setHighBits(ShAmt); | |||
32511 | } | |||
32512 | break; | |||
32513 | } | |||
32514 | case X86ISD::MOVMSK: { | |||
32515 | SDValue Src = Op.getOperand(0); | |||
32516 | MVT SrcVT = Src.getSimpleValueType(); | |||
32517 | unsigned SrcBits = SrcVT.getScalarSizeInBits(); | |||
32518 | unsigned NumElts = SrcVT.getVectorNumElements(); | |||
32519 | ||||
32520 | // If we don't need the sign bits at all just return zero. | |||
32521 | if (OriginalDemandedBits.countTrailingZeros() >= NumElts) | |||
32522 | return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); | |||
32523 | ||||
32524 | // Only demand the vector elements of the sign bits we need. | |||
32525 | APInt KnownUndef, KnownZero; | |||
32526 | APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts); | |||
32527 | if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero, | |||
32528 | TLO, Depth + 1)) | |||
32529 | return true; | |||
32530 | ||||
32531 | Known.Zero = KnownZero.zextOrSelf(BitWidth); | |||
32532 | Known.Zero.setHighBits(BitWidth - NumElts); | |||
32533 | ||||
32534 | // MOVMSK only uses the MSB from each vector element. | |||
32535 | KnownBits KnownSrc; | |||
32536 | if (SimplifyDemandedBits(Src, APInt::getSignMask(SrcBits), DemandedElts, | |||
32537 | KnownSrc, TLO, Depth + 1)) | |||
32538 | return true; | |||
32539 | ||||
32540 | if (KnownSrc.One[SrcBits - 1]) | |||
32541 | Known.One.setLowBits(NumElts); | |||
32542 | else if (KnownSrc.Zero[SrcBits - 1]) | |||
32543 | Known.Zero.setLowBits(NumElts); | |||
32544 | return false; | |||
32545 | } | |||
32546 | } | |||
32547 | ||||
32548 | return TargetLowering::SimplifyDemandedBitsForTargetNode( | |||
32549 | Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth); | |||
32550 | } | |||
32551 | ||||
32552 | /// Check if a vector extract from a target-specific shuffle of a load can be | |||
32553 | /// folded into a single element load. | |||
32554 | /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but | |||
32555 | /// shuffles have been custom lowered so we need to handle those here. | |||
32556 | static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG, | |||
32557 | TargetLowering::DAGCombinerInfo &DCI) { | |||
32558 | if (DCI.isBeforeLegalizeOps()) | |||
32559 | return SDValue(); | |||
32560 | ||||
32561 | SDValue InVec = N->getOperand(0); | |||
32562 | SDValue EltNo = N->getOperand(1); | |||
32563 | EVT EltVT = N->getValueType(0); | |||
32564 | ||||
32565 | if (!isa<ConstantSDNode>(EltNo)) | |||
32566 | return SDValue(); | |||
32567 | ||||
32568 | EVT OriginalVT = InVec.getValueType(); | |||
32569 | ||||
32570 | // Peek through bitcasts, don't duplicate a load with other uses. | |||
32571 | InVec = peekThroughOneUseBitcasts(InVec); | |||
32572 | ||||
32573 | EVT CurrentVT = InVec.getValueType(); | |||
32574 | if (!CurrentVT.isVector() || | |||
32575 | CurrentVT.getVectorNumElements() != OriginalVT.getVectorNumElements()) | |||
32576 | return SDValue(); | |||
32577 | ||||
32578 | if (!isTargetShuffle(InVec.getOpcode())) | |||
32579 | return SDValue(); | |||
32580 | ||||
32581 | // Don't duplicate a load with other uses. | |||
32582 | if (!InVec.hasOneUse()) | |||
32583 | return SDValue(); | |||
32584 | ||||
32585 | SmallVector<int, 16> ShuffleMask; | |||
32586 | SmallVector<SDValue, 2> ShuffleOps; | |||
32587 | bool UnaryShuffle; | |||
32588 | if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(), true, | |||
32589 | ShuffleOps, ShuffleMask, UnaryShuffle)) | |||
32590 | return SDValue(); | |||
32591 | ||||
32592 | // Select the input vector, guarding against out of range extract vector. | |||
32593 | unsigned NumElems = CurrentVT.getVectorNumElements(); | |||
32594 | int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); | |||
32595 | int Idx = (Elt > (int)NumElems) ? SM_SentinelUndef : ShuffleMask[Elt]; | |||
32596 | ||||
32597 | if (Idx == SM_SentinelZero) | |||
32598 | return EltVT.isInteger() ? DAG.getConstant(0, SDLoc(N), EltVT) | |||
32599 | : DAG.getConstantFP(+0.0, SDLoc(N), EltVT); | |||
32600 | if (Idx == SM_SentinelUndef) | |||
32601 | return DAG.getUNDEF(EltVT); | |||
32602 | ||||
32603 | // Bail if any mask element is SM_SentinelZero - getVectorShuffle below | |||
32604 | // won't handle it. | |||
32605 | if (llvm::any_of(ShuffleMask, [](int M) { return M == SM_SentinelZero; })) | |||
32606 | return SDValue(); | |||
32607 | ||||
32608 | assert(0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range")((0 <= Idx && Idx < (int)(2 * NumElems) && "Shuffle index out of range") ? static_cast<void> (0) : __assert_fail ("0 <= Idx && Idx < (int)(2 * NumElems) && \"Shuffle index out of range\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32608, __PRETTY_FUNCTION__)); | |||
32609 | SDValue LdNode = (Idx < (int)NumElems) ? ShuffleOps[0] : ShuffleOps[1]; | |||
32610 | ||||
32611 | // If inputs to shuffle are the same for both ops, then allow 2 uses | |||
32612 | unsigned AllowedUses = | |||
32613 | (ShuffleOps.size() > 1 && ShuffleOps[0] == ShuffleOps[1]) ? 2 : 1; | |||
32614 | ||||
32615 | if (LdNode.getOpcode() == ISD::BITCAST) { | |||
32616 | // Don't duplicate a load with other uses. | |||
32617 | if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0)) | |||
32618 | return SDValue(); | |||
32619 | ||||
32620 | AllowedUses = 1; // only allow 1 load use if we have a bitcast | |||
32621 | LdNode = LdNode.getOperand(0); | |||
32622 | } | |||
32623 | ||||
32624 | if (!ISD::isNormalLoad(LdNode.getNode())) | |||
32625 | return SDValue(); | |||
32626 | ||||
32627 | LoadSDNode *LN0 = cast<LoadSDNode>(LdNode); | |||
32628 | ||||
32629 | if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile()) | |||
32630 | return SDValue(); | |||
32631 | ||||
32632 | // If there's a bitcast before the shuffle, check if the load type and | |||
32633 | // alignment is valid. | |||
32634 | unsigned Align = LN0->getAlignment(); | |||
32635 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
32636 | unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( | |||
32637 | EltVT.getTypeForEVT(*DAG.getContext())); | |||
32638 | ||||
32639 | if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT)) | |||
32640 | return SDValue(); | |||
32641 | ||||
32642 | // All checks match so transform back to vector_shuffle so that DAG combiner | |||
32643 | // can finish the job | |||
32644 | SDLoc dl(N); | |||
32645 | ||||
32646 | // Create shuffle node taking into account the case that its a unary shuffle | |||
32647 | SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT) : ShuffleOps[1]; | |||
32648 | Shuffle = DAG.getVectorShuffle(CurrentVT, dl, ShuffleOps[0], Shuffle, | |||
32649 | ShuffleMask); | |||
32650 | Shuffle = DAG.getBitcast(OriginalVT, Shuffle); | |||
32651 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle, | |||
32652 | EltNo); | |||
32653 | } | |||
32654 | ||||
32655 | // Try to match patterns such as | |||
32656 | // (i16 bitcast (v16i1 x)) | |||
32657 | // -> | |||
32658 | // (i16 movmsk (16i8 sext (v16i1 x))) | |||
32659 | // before the illegal vector is scalarized on subtargets that don't have legal | |||
32660 | // vxi1 types. | |||
32661 | static SDValue combineBitcastvxi1(SelectionDAG &DAG, SDValue BitCast, | |||
32662 | const X86Subtarget &Subtarget) { | |||
32663 | EVT VT = BitCast.getValueType(); | |||
32664 | SDValue N0 = BitCast.getOperand(0); | |||
32665 | EVT VecVT = N0->getValueType(0); | |||
32666 | ||||
32667 | if (!VT.isScalarInteger() || !VecVT.isSimple()) | |||
32668 | return SDValue(); | |||
32669 | ||||
32670 | // With AVX512 vxi1 types are legal and we prefer using k-regs. | |||
32671 | // MOVMSK is supported in SSE2 or later. | |||
32672 | if (Subtarget.hasAVX512() || !Subtarget.hasSSE2()) | |||
32673 | return SDValue(); | |||
32674 | ||||
32675 | // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and | |||
32676 | // v8f64. So all legal 128-bit and 256-bit vectors are covered except for | |||
32677 | // v8i16 and v16i16. | |||
32678 | // For these two cases, we can shuffle the upper element bytes to a | |||
32679 | // consecutive sequence at the start of the vector and treat the results as | |||
32680 | // v16i8 or v32i8, and for v16i8 this is the preferable solution. However, | |||
32681 | // for v16i16 this is not the case, because the shuffle is expensive, so we | |||
32682 | // avoid sign-extending to this type entirely. | |||
32683 | // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as: | |||
32684 | // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef) | |||
32685 | MVT SExtVT; | |||
32686 | switch (VecVT.getSimpleVT().SimpleTy) { | |||
32687 | default: | |||
32688 | return SDValue(); | |||
32689 | case MVT::v2i1: | |||
32690 | SExtVT = MVT::v2i64; | |||
32691 | break; | |||
32692 | case MVT::v4i1: | |||
32693 | SExtVT = MVT::v4i32; | |||
32694 | // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2)) | |||
32695 | // sign-extend to a 256-bit operation to avoid truncation. | |||
32696 | if (N0->getOpcode() == ISD::SETCC && Subtarget.hasAVX() && | |||
32697 | N0->getOperand(0).getValueType().is256BitVector()) { | |||
32698 | SExtVT = MVT::v4i64; | |||
32699 | } | |||
32700 | break; | |||
32701 | case MVT::v8i1: | |||
32702 | SExtVT = MVT::v8i16; | |||
32703 | // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)), | |||
32704 | // sign-extend to a 256-bit operation to match the compare. | |||
32705 | // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over | |||
32706 | // 256-bit because the shuffle is cheaper than sign extending the result of | |||
32707 | // the compare. | |||
32708 | if (N0->getOpcode() == ISD::SETCC && Subtarget.hasAVX() && | |||
32709 | (N0->getOperand(0).getValueType().is256BitVector() || | |||
32710 | N0->getOperand(0).getValueType().is512BitVector())) { | |||
32711 | SExtVT = MVT::v8i32; | |||
32712 | } | |||
32713 | break; | |||
32714 | case MVT::v16i1: | |||
32715 | SExtVT = MVT::v16i8; | |||
32716 | // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)), | |||
32717 | // it is not profitable to sign-extend to 256-bit because this will | |||
32718 | // require an extra cross-lane shuffle which is more expensive than | |||
32719 | // truncating the result of the compare to 128-bits. | |||
32720 | break; | |||
32721 | case MVT::v32i1: | |||
32722 | SExtVT = MVT::v32i8; | |||
32723 | break; | |||
32724 | }; | |||
32725 | ||||
32726 | SDLoc DL(BitCast); | |||
32727 | SDValue V = DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, N0); | |||
32728 | ||||
32729 | if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8) { | |||
32730 | V = getPMOVMSKB(DL, V, DAG, Subtarget); | |||
32731 | } else { | |||
32732 | if (SExtVT == MVT::v8i16) | |||
32733 | V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V, | |||
32734 | DAG.getUNDEF(MVT::v8i16)); | |||
32735 | V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V); | |||
32736 | } | |||
32737 | return DAG.getZExtOrTrunc(V, DL, VT); | |||
32738 | } | |||
32739 | ||||
32740 | // Convert a vXi1 constant build vector to the same width scalar integer. | |||
32741 | static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) { | |||
32742 | EVT SrcVT = Op.getValueType(); | |||
32743 | assert(SrcVT.getVectorElementType() == MVT::i1 &&((SrcVT.getVectorElementType() == MVT::i1 && "Expected a vXi1 vector" ) ? static_cast<void> (0) : __assert_fail ("SrcVT.getVectorElementType() == MVT::i1 && \"Expected a vXi1 vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32744, __PRETTY_FUNCTION__)) | |||
32744 | "Expected a vXi1 vector")((SrcVT.getVectorElementType() == MVT::i1 && "Expected a vXi1 vector" ) ? static_cast<void> (0) : __assert_fail ("SrcVT.getVectorElementType() == MVT::i1 && \"Expected a vXi1 vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32744, __PRETTY_FUNCTION__)); | |||
32745 | assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && "Expected a constant build vector") ? static_cast<void> (0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && \"Expected a constant build vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32746, __PRETTY_FUNCTION__)) | |||
32746 | "Expected a constant build vector")((ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && "Expected a constant build vector") ? static_cast<void> (0) : __assert_fail ("ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && \"Expected a constant build vector\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32746, __PRETTY_FUNCTION__)); | |||
32747 | ||||
32748 | APInt Imm(SrcVT.getVectorNumElements(), 0); | |||
32749 | for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) { | |||
32750 | SDValue In = Op.getOperand(Idx); | |||
32751 | if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1)) | |||
32752 | Imm.setBit(Idx); | |||
32753 | } | |||
32754 | EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth()); | |||
32755 | return DAG.getConstant(Imm, SDLoc(Op), IntVT); | |||
32756 | } | |||
32757 | ||||
32758 | static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG, | |||
32759 | TargetLowering::DAGCombinerInfo &DCI, | |||
32760 | const X86Subtarget &Subtarget) { | |||
32761 | assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast")((N->getOpcode() == ISD::BITCAST && "Expected a bitcast" ) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::BITCAST && \"Expected a bitcast\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 32761, __PRETTY_FUNCTION__)); | |||
32762 | ||||
32763 | if (!DCI.isBeforeLegalizeOps()) | |||
32764 | return SDValue(); | |||
32765 | ||||
32766 | // Only do this if we have k-registers. | |||
32767 | if (!Subtarget.hasAVX512()) | |||
32768 | return SDValue(); | |||
32769 | ||||
32770 | EVT DstVT = N->getValueType(0); | |||
32771 | SDValue Op = N->getOperand(0); | |||
32772 | EVT SrcVT = Op.getValueType(); | |||
32773 | ||||
32774 | if (!Op.hasOneUse()) | |||
32775 | return SDValue(); | |||
32776 | ||||
32777 | // Look for logic ops. | |||
32778 | if (Op.getOpcode() != ISD::AND && | |||
32779 | Op.getOpcode() != ISD::OR && | |||
32780 | Op.getOpcode() != ISD::XOR) | |||
32781 | return SDValue(); | |||
32782 | ||||
32783 | // Make sure we have a bitcast between mask registers and a scalar type. | |||
32784 | if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 && | |||
32785 | DstVT.isScalarInteger()) && | |||
32786 | !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 && | |||
32787 | SrcVT.isScalarInteger())) | |||
32788 | return SDValue(); | |||
32789 | ||||
32790 | SDValue LHS = Op.getOperand(0); | |||
32791 | SDValue RHS = Op.getOperand(1); | |||
32792 | ||||
32793 | if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST && | |||
32794 | LHS.getOperand(0).getValueType() == DstVT) | |||
32795 | return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0), | |||
32796 | DAG.getBitcast(DstVT, RHS)); | |||
32797 | ||||
32798 | if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST && | |||
32799 | RHS.getOperand(0).getValueType() == DstVT) | |||
32800 | return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, | |||
32801 | DAG.getBitcast(DstVT, LHS), RHS.getOperand(0)); | |||
32802 | ||||
32803 | // If the RHS is a vXi1 build vector, this is a good reason to flip too. | |||
32804 | // Most of these have to move a constant from the scalar domain anyway. | |||
32805 | if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) { | |||
32806 | RHS = combinevXi1ConstantToInteger(RHS, DAG); | |||
32807 | return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, | |||
32808 | DAG.getBitcast(DstVT, LHS), RHS); | |||
32809 | } | |||
32810 | ||||
32811 | return SDValue(); | |||
32812 | } | |||
32813 | ||||
32814 | static SDValue createMMXBuildVector(SDValue N, SelectionDAG &DAG, | |||
32815 | const X86Subtarget &Subtarget) { | |||
32816 | SDLoc DL(N); | |||
32817 | unsigned NumElts = N.getNumOperands(); | |||
32818 | ||||
32819 | auto *BV = cast<BuildVectorSDNode>(N); | |||
32820 | SDValue Splat = BV->getSplatValue(); | |||
32821 | ||||
32822 | // Build MMX element from integer GPR or SSE float values. | |||
32823 | auto CreateMMXElement = [&](SDValue V) { | |||
32824 | if (V.isUndef()) | |||
32825 | return DAG.getUNDEF(MVT::x86mmx); | |||
32826 | if (V.getValueType().isFloatingPoint()) { | |||
32827 | if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) { | |||
32828 | V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V); | |||
32829 | V = DAG.getBitcast(MVT::v2i64, V); | |||
32830 | return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V); | |||
32831 | } | |||
32832 | V = DAG.getBitcast(MVT::i32, V); | |||
32833 | } else { | |||
32834 | V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32); | |||
32835 | } | |||
32836 | return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V); | |||
32837 | }; | |||
32838 | ||||
32839 | // Convert build vector ops to MMX data in the bottom elements. | |||
32840 | SmallVector<SDValue, 8> Ops; | |||
32841 | ||||
32842 | // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element. | |||
32843 | if (Splat) { | |||
32844 | if (Splat.isUndef()) | |||
32845 | return DAG.getUNDEF(MVT::x86mmx); | |||
32846 | ||||
32847 | Splat = CreateMMXElement(Splat); | |||
32848 | ||||
32849 | if (Subtarget.hasSSE1()) { | |||
32850 | // Unpack v8i8 to splat i8 elements to lowest 16-bits. | |||
32851 | if (NumElts == 8) | |||
32852 | Splat = DAG.getNode( | |||
32853 | ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, | |||
32854 | DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat, | |||
32855 | Splat); | |||
32856 | ||||
32857 | // Use PSHUFW to repeat 16-bit elements. | |||
32858 | unsigned ShufMask = (NumElts > 2 ? 0 : 0x44); | |||
32859 | return DAG.getNode( | |||
32860 | ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, | |||
32861 | DAG.getConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32), Splat, | |||
32862 | DAG.getConstant(ShufMask, DL, MVT::i8)); | |||
32863 | } | |||
32864 | Ops.append(NumElts, Splat); | |||
32865 | } else { | |||
32866 | for (unsigned i = 0; i != NumElts; ++i) | |||
32867 | Ops.push_back(CreateMMXElement(N.getOperand(i))); | |||
32868 | } | |||
32869 | ||||
32870 | // Use tree of PUNPCKLs to build up general MMX vector. | |||
32871 | while (Ops.size() > 1) { | |||
32872 | unsigned NumOps = Ops.size(); | |||
32873 | unsigned IntrinOp = | |||
32874 | (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq | |||
32875 | : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd | |||
32876 | : Intrinsic::x86_mmx_punpcklbw)); | |||
32877 | SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32); | |||
32878 | for (unsigned i = 0; i != NumOps; i += 2) | |||
32879 | Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin, | |||
32880 | Ops[i], Ops[i + 1]); | |||
32881 | Ops.resize(NumOps / 2); | |||
32882 | } | |||
32883 | ||||
32884 | return Ops[0]; | |||
32885 | } | |||
32886 | ||||
32887 | static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG, | |||
32888 | TargetLowering::DAGCombinerInfo &DCI, | |||
32889 | const X86Subtarget &Subtarget) { | |||
32890 | SDValue N0 = N->getOperand(0); | |||
32891 | EVT VT = N->getValueType(0); | |||
32892 | EVT SrcVT = N0.getValueType(); | |||
32893 | ||||
32894 | // Try to match patterns such as | |||
32895 | // (i16 bitcast (v16i1 x)) | |||
32896 | // -> | |||
32897 | // (i16 movmsk (16i8 sext (v16i1 x))) | |||
32898 | // before the setcc result is scalarized on subtargets that don't have legal | |||
32899 | // vxi1 types. | |||
32900 | if (DCI.isBeforeLegalize()) { | |||
32901 | if (SDValue V = combineBitcastvxi1(DAG, SDValue(N, 0), Subtarget)) | |||
32902 | return V; | |||
32903 | ||||
32904 | // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer | |||
32905 | // type, widen both sides to avoid a trip through memory. | |||
32906 | if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() && | |||
32907 | Subtarget.hasAVX512()) { | |||
32908 | SDLoc dl(N); | |||
32909 | N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0); | |||
32910 | N0 = DAG.getBitcast(MVT::v8i1, N0); | |||
32911 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0, | |||
32912 | DAG.getIntPtrConstant(0, dl)); | |||
32913 | } | |||
32914 | ||||
32915 | // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer | |||
32916 | // type, widen both sides to avoid a trip through memory. | |||
32917 | if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() && | |||
32918 | Subtarget.hasAVX512()) { | |||
32919 | SDLoc dl(N); | |||
32920 | unsigned NumConcats = 8 / SrcVT.getVectorNumElements(); | |||
32921 | SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT)); | |||
32922 | Ops[0] = N0; | |||
32923 | N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops); | |||
32924 | N0 = DAG.getBitcast(MVT::i8, N0); | |||
32925 | return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); | |||
32926 | } | |||
32927 | } | |||
32928 | ||||
32929 | // Since MMX types are special and don't usually play with other vector types, | |||
32930 | // it's better to handle them early to be sure we emit efficient code by | |||
32931 | // avoiding store-load conversions. | |||
32932 | if (VT == MVT::x86mmx) { | |||
32933 | // Detect MMX constant vectors. | |||
32934 | APInt UndefElts; | |||
32935 | SmallVector<APInt, 1> EltBits; | |||
32936 | if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) { | |||
32937 | SDLoc DL(N0); | |||
32938 | // Handle zero-extension of i32 with MOVD. | |||
32939 | if (EltBits[0].countLeadingZeros() >= 32) | |||
32940 | return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT, | |||
32941 | DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32)); | |||
32942 | // Else, bitcast to a double. | |||
32943 | // TODO - investigate supporting sext 32-bit immediates on x86_64. | |||
32944 | APFloat F64(APFloat::IEEEdouble(), EltBits[0]); | |||
32945 | return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64)); | |||
32946 | } | |||
32947 | ||||
32948 | // Detect bitcasts to x86mmx low word. | |||
32949 | if (N0.getOpcode() == ISD::BUILD_VECTOR && | |||
32950 | (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) && | |||
32951 | N0.getOperand(0).getValueType() == SrcVT.getScalarType()) { | |||
32952 | bool LowUndef = true, AllUndefOrZero = true; | |||
32953 | for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) { | |||
32954 | SDValue Op = N0.getOperand(i); | |||
32955 | LowUndef &= Op.isUndef() || (i >= e/2); | |||
32956 | AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op)); | |||
32957 | } | |||
32958 | if (AllUndefOrZero) { | |||
32959 | SDValue N00 = N0.getOperand(0); | |||
32960 | SDLoc dl(N00); | |||
32961 | N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32) | |||
32962 | : DAG.getZExtOrTrunc(N00, dl, MVT::i32); | |||
32963 | return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00); | |||
32964 | } | |||
32965 | } | |||
32966 | ||||
32967 | // Detect bitcasts of 64-bit build vectors and convert to a | |||
32968 | // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the | |||
32969 | // lowest element. | |||
32970 | if (N0.getOpcode() == ISD::BUILD_VECTOR && | |||
32971 | (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || | |||
32972 | SrcVT == MVT::v8i8)) | |||
32973 | return createMMXBuildVector(N0, DAG, Subtarget); | |||
32974 | ||||
32975 | // Detect bitcasts between element or subvector extraction to x86mmx. | |||
32976 | if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT || | |||
32977 | N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) && | |||
32978 | isNullConstant(N0.getOperand(1))) { | |||
32979 | SDValue N00 = N0.getOperand(0); | |||
32980 | if (N00.getValueType().is128BitVector()) | |||
32981 | return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT, | |||
32982 | DAG.getBitcast(MVT::v2i64, N00)); | |||
32983 | } | |||
32984 | ||||
32985 | // Detect bitcasts from FP_TO_SINT to x86mmx. | |||
32986 | if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) { | |||
32987 | SDLoc DL(N0); | |||
32988 | SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0, | |||
32989 | DAG.getUNDEF(MVT::v2i32)); | |||
32990 | return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT, | |||
32991 | DAG.getBitcast(MVT::v2i64, Res)); | |||
32992 | } | |||
32993 | } | |||
32994 | ||||
32995 | // Try to remove a bitcast of constant vXi1 vector. We have to legalize | |||
32996 | // most of these to scalar anyway. | |||
32997 | if (Subtarget.hasAVX512() && VT.isScalarInteger() && | |||
32998 | SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 && | |||
32999 | ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) { | |||
33000 | return combinevXi1ConstantToInteger(N0, DAG); | |||
33001 | } | |||
33002 | ||||
33003 | if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() && | |||
33004 | VT.isVector() && VT.getVectorElementType() == MVT::i1 && | |||
33005 | isa<ConstantSDNode>(N0)) { | |||
33006 | auto *C = cast<ConstantSDNode>(N0); | |||
33007 | if (C->isAllOnesValue()) | |||
33008 | return DAG.getConstant(1, SDLoc(N0), VT); | |||
33009 | if (C->isNullValue()) | |||
33010 | return DAG.getConstant(0, SDLoc(N0), VT); | |||
33011 | } | |||
33012 | ||||
33013 | // Try to remove bitcasts from input and output of mask arithmetic to | |||
33014 | // remove GPR<->K-register crossings. | |||
33015 | if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget)) | |||
33016 | return V; | |||
33017 | ||||
33018 | // Convert a bitcasted integer logic operation that has one bitcasted | |||
33019 | // floating-point operand into a floating-point logic operation. This may | |||
33020 | // create a load of a constant, but that is cheaper than materializing the | |||
33021 | // constant in an integer register and transferring it to an SSE register or | |||
33022 | // transferring the SSE operand to integer register and back. | |||
33023 | unsigned FPOpcode; | |||
33024 | switch (N0.getOpcode()) { | |||
33025 | case ISD::AND: FPOpcode = X86ISD::FAND; break; | |||
33026 | case ISD::OR: FPOpcode = X86ISD::FOR; break; | |||
33027 | case ISD::XOR: FPOpcode = X86ISD::FXOR; break; | |||
33028 | default: return SDValue(); | |||
33029 | } | |||
33030 | ||||
33031 | if (!((Subtarget.hasSSE1() && VT == MVT::f32) || | |||
33032 | (Subtarget.hasSSE2() && VT == MVT::f64))) | |||
33033 | return SDValue(); | |||
33034 | ||||
33035 | SDValue LogicOp0 = N0.getOperand(0); | |||
33036 | SDValue LogicOp1 = N0.getOperand(1); | |||
33037 | SDLoc DL0(N0); | |||
33038 | ||||
33039 | // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y)) | |||
33040 | if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST && | |||
33041 | LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT && | |||
33042 | !isa<ConstantSDNode>(LogicOp0.getOperand(0))) { | |||
33043 | SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1); | |||
33044 | return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1); | |||
33045 | } | |||
33046 | // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y) | |||
33047 | if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST && | |||
33048 | LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT && | |||
33049 | !isa<ConstantSDNode>(LogicOp1.getOperand(0))) { | |||
33050 | SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0); | |||
33051 | return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0); | |||
33052 | } | |||
33053 | ||||
33054 | return SDValue(); | |||
33055 | } | |||
33056 | ||||
33057 | // Given a select, detect the following pattern: | |||
33058 | // 1: %2 = zext <N x i8> %0 to <N x i32> | |||
33059 | // 2: %3 = zext <N x i8> %1 to <N x i32> | |||
33060 | // 3: %4 = sub nsw <N x i32> %2, %3 | |||
33061 | // 4: %5 = icmp sgt <N x i32> %4, [0 x N] or [-1 x N] | |||
33062 | // 5: %6 = sub nsw <N x i32> zeroinitializer, %4 | |||
33063 | // 6: %7 = select <N x i1> %5, <N x i32> %4, <N x i32> %6 | |||
33064 | // This is useful as it is the input into a SAD pattern. | |||
33065 | static bool detectZextAbsDiff(const SDValue &Select, SDValue &Op0, | |||
33066 | SDValue &Op1) { | |||
33067 | // Check the condition of the select instruction is greater-than. | |||
33068 | SDValue SetCC = Select->getOperand(0); | |||
33069 | if (SetCC.getOpcode() != ISD::SETCC) | |||
33070 | return false; | |||
33071 | ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get(); | |||
33072 | if (CC != ISD::SETGT && CC != ISD::SETLT) | |||
33073 | return false; | |||
33074 | ||||
33075 | SDValue SelectOp1 = Select->getOperand(1); | |||
33076 | SDValue SelectOp2 = Select->getOperand(2); | |||
33077 | ||||
33078 | // The following instructions assume SelectOp1 is the subtraction operand | |||
33079 | // and SelectOp2 is the negation operand. | |||
33080 | // In the case of SETLT this is the other way around. | |||
33081 | if (CC == ISD::SETLT) | |||
33082 | std::swap(SelectOp1, SelectOp2); | |||
33083 | ||||
33084 | // The second operand of the select should be the negation of the first | |||
33085 | // operand, which is implemented as 0 - SelectOp1. | |||
33086 | if (!(SelectOp2.getOpcode() == ISD::SUB && | |||
33087 | ISD::isBuildVectorAllZeros(SelectOp2.getOperand(0).getNode()) && | |||
33088 | SelectOp2.getOperand(1) == SelectOp1)) | |||
33089 | return false; | |||
33090 | ||||
33091 | // The first operand of SetCC is the first operand of the select, which is the | |||
33092 | // difference between the two input vectors. | |||
33093 | if (SetCC.getOperand(0) != SelectOp1) | |||
33094 | return false; | |||
33095 | ||||
33096 | // In SetLT case, The second operand of the comparison can be either 1 or 0. | |||
33097 | APInt SplatVal; | |||
33098 | if ((CC == ISD::SETLT) && | |||
33099 | !((ISD::isConstantSplatVector(SetCC.getOperand(1).getNode(), SplatVal) && | |||
33100 | SplatVal.isOneValue()) || | |||
33101 | (ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode())))) | |||
33102 | return false; | |||
33103 | ||||
33104 | // In SetGT case, The second operand of the comparison can be either -1 or 0. | |||
33105 | if ((CC == ISD::SETGT) && | |||
33106 | !(ISD::isBuildVectorAllZeros(SetCC.getOperand(1).getNode()) || | |||
33107 | ISD::isBuildVectorAllOnes(SetCC.getOperand(1).getNode()))) | |||
33108 | return false; | |||
33109 | ||||
33110 | // The first operand of the select is the difference between the two input | |||
33111 | // vectors. | |||
33112 | if (SelectOp1.getOpcode() != ISD::SUB) | |||
33113 | return false; | |||
33114 | ||||
33115 | Op0 = SelectOp1.getOperand(0); | |||
33116 | Op1 = SelectOp1.getOperand(1); | |||
33117 | ||||
33118 | // Check if the operands of the sub are zero-extended from vectors of i8. | |||
33119 | if (Op0.getOpcode() != ISD::ZERO_EXTEND || | |||
33120 | Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 || | |||
33121 | Op1.getOpcode() != ISD::ZERO_EXTEND || | |||
33122 | Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8) | |||
33123 | return false; | |||
33124 | ||||
33125 | return true; | |||
33126 | } | |||
33127 | ||||
33128 | // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs | |||
33129 | // to these zexts. | |||
33130 | static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0, | |||
33131 | const SDValue &Zext1, const SDLoc &DL, | |||
33132 | const X86Subtarget &Subtarget) { | |||
33133 | // Find the appropriate width for the PSADBW. | |||
33134 | EVT InVT = Zext0.getOperand(0).getValueType(); | |||
33135 | unsigned RegSize = std::max(128u, InVT.getSizeInBits()); | |||
33136 | ||||
33137 | // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we | |||
33138 | // fill in the missing vector elements with 0. | |||
33139 | unsigned NumConcat = RegSize / InVT.getSizeInBits(); | |||
33140 | SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT)); | |||
33141 | Ops[0] = Zext0.getOperand(0); | |||
33142 | MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8); | |||
33143 | SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops); | |||
33144 | Ops[0] = Zext1.getOperand(0); | |||
33145 | SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops); | |||
33146 | ||||
33147 | // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW. | |||
33148 | auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
33149 | ArrayRef<SDValue> Ops) { | |||
33150 | MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64); | |||
33151 | return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops); | |||
33152 | }; | |||
33153 | MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64); | |||
33154 | return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 }, | |||
33155 | PSADBWBuilder); | |||
33156 | } | |||
33157 | ||||
33158 | // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with | |||
33159 | // PHMINPOSUW. | |||
33160 | static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG, | |||
33161 | const X86Subtarget &Subtarget) { | |||
33162 | // Bail without SSE41. | |||
33163 | if (!Subtarget.hasSSE41()) | |||
33164 | return SDValue(); | |||
33165 | ||||
33166 | EVT ExtractVT = Extract->getValueType(0); | |||
33167 | if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8) | |||
33168 | return SDValue(); | |||
33169 | ||||
33170 | // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns. | |||
33171 | ISD::NodeType BinOp; | |||
33172 | SDValue Src = DAG.matchBinOpReduction( | |||
33173 | Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}); | |||
33174 | if (!Src) | |||
33175 | return SDValue(); | |||
33176 | ||||
33177 | EVT SrcVT = Src.getValueType(); | |||
33178 | EVT SrcSVT = SrcVT.getScalarType(); | |||
33179 | if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0) | |||
33180 | return SDValue(); | |||
33181 | ||||
33182 | SDLoc DL(Extract); | |||
33183 | SDValue MinPos = Src; | |||
33184 | ||||
33185 | // First, reduce the source down to 128-bit, applying BinOp to lo/hi. | |||
33186 | while (SrcVT.getSizeInBits() > 128) { | |||
33187 | unsigned NumElts = SrcVT.getVectorNumElements(); | |||
33188 | unsigned NumSubElts = NumElts / 2; | |||
33189 | SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcSVT, NumSubElts); | |||
33190 | unsigned SubSizeInBits = SrcVT.getSizeInBits(); | |||
33191 | SDValue Lo = extractSubVector(MinPos, 0, DAG, DL, SubSizeInBits); | |||
33192 | SDValue Hi = extractSubVector(MinPos, NumSubElts, DAG, DL, SubSizeInBits); | |||
33193 | MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi); | |||
33194 | } | |||
33195 | assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||((((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || ( SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) && "Unexpected value type") ? static_cast<void> (0) : __assert_fail ("((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 33197, __PRETTY_FUNCTION__)) | |||
33196 | (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&((((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || ( SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) && "Unexpected value type") ? static_cast<void> (0) : __assert_fail ("((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 33197, __PRETTY_FUNCTION__)) | |||
33197 | "Unexpected value type")((((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || ( SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) && "Unexpected value type") ? static_cast<void> (0) : __assert_fail ("((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) || (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 33197, __PRETTY_FUNCTION__)); | |||
33198 | ||||
33199 | // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask | |||
33200 | // to flip the value accordingly. | |||
33201 | SDValue Mask; | |||
33202 | unsigned MaskEltsBits = ExtractVT.getSizeInBits(); | |||
33203 | if (BinOp == ISD::SMAX) | |||
33204 | Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT); | |||
33205 | else if (BinOp == ISD::SMIN) | |||
33206 | Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT); | |||
33207 | else if (BinOp == ISD::UMAX) | |||
33208 | Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT); | |||
33209 | ||||
33210 | if (Mask) | |||
33211 | MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos); | |||
33212 | ||||
33213 | // For v16i8 cases we need to perform UMIN on pairs of byte elements, | |||
33214 | // shuffling each upper element down and insert zeros. This means that the | |||
33215 | // v16i8 UMIN will leave the upper element as zero, performing zero-extension | |||
33216 | // ready for the PHMINPOS. | |||
33217 | if (ExtractVT == MVT::i8) { | |||
33218 | SDValue Upper = DAG.getVectorShuffle( | |||
33219 | SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8), | |||
33220 | {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16}); | |||
33221 | MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper); | |||
33222 | } | |||
33223 | ||||
33224 | // Perform the PHMINPOS on a v8i16 vector, | |||
33225 | MinPos = DAG.getBitcast(MVT::v8i16, MinPos); | |||
33226 | MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos); | |||
33227 | MinPos = DAG.getBitcast(SrcVT, MinPos); | |||
33228 | ||||
33229 | if (Mask) | |||
33230 | MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos); | |||
33231 | ||||
33232 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos, | |||
33233 | DAG.getIntPtrConstant(0, DL)); | |||
33234 | } | |||
33235 | ||||
33236 | // Attempt to replace an all_of/any_of style horizontal reduction with a MOVMSK. | |||
33237 | static SDValue combineHorizontalPredicateResult(SDNode *Extract, | |||
33238 | SelectionDAG &DAG, | |||
33239 | const X86Subtarget &Subtarget) { | |||
33240 | // Bail without SSE2 or with AVX512VL (which uses predicate registers). | |||
33241 | if (!Subtarget.hasSSE2() || Subtarget.hasVLX()) | |||
33242 | return SDValue(); | |||
33243 | ||||
33244 | EVT ExtractVT = Extract->getValueType(0); | |||
33245 | unsigned BitWidth = ExtractVT.getSizeInBits(); | |||
33246 | if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 && | |||
33247 | ExtractVT != MVT::i8) | |||
33248 | return SDValue(); | |||
33249 | ||||
33250 | // Check for OR(any_of) and AND(all_of) horizontal reduction patterns. | |||
33251 | ISD::NodeType BinOp; | |||
33252 | SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND}); | |||
33253 | if (!Match) | |||
33254 | return SDValue(); | |||
33255 | ||||
33256 | // EXTRACT_VECTOR_ELT can require implicit extension of the vector element | |||
33257 | // which we can't support here for now. | |||
33258 | if (Match.getScalarValueSizeInBits() != BitWidth) | |||
33259 | return SDValue(); | |||
33260 | ||||
33261 | // We require AVX2 for PMOVMSKB for v16i16/v32i8; | |||
33262 | unsigned MatchSizeInBits = Match.getValueSizeInBits(); | |||
33263 | if (!(MatchSizeInBits == 128 || | |||
33264 | (MatchSizeInBits == 256 && | |||
33265 | ((Subtarget.hasAVX() && BitWidth >= 32) || Subtarget.hasAVX2())))) | |||
33266 | return SDValue(); | |||
33267 | ||||
33268 | // Don't bother performing this for 2-element vectors. | |||
33269 | if (Match.getValueType().getVectorNumElements() <= 2) | |||
33270 | return SDValue(); | |||
33271 | ||||
33272 | // Check that we are extracting a reduction of all sign bits. | |||
33273 | if (DAG.ComputeNumSignBits(Match) != BitWidth) | |||
33274 | return SDValue(); | |||
33275 | ||||
33276 | // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB. | |||
33277 | MVT MaskVT; | |||
33278 | if (64 == BitWidth || 32 == BitWidth) | |||
33279 | MaskVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth), | |||
33280 | MatchSizeInBits / BitWidth); | |||
33281 | else | |||
33282 | MaskVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8); | |||
33283 | ||||
33284 | APInt CompareBits; | |||
33285 | ISD::CondCode CondCode; | |||
33286 | if (BinOp == ISD::OR) { | |||
33287 | // any_of -> MOVMSK != 0 | |||
33288 | CompareBits = APInt::getNullValue(32); | |||
33289 | CondCode = ISD::CondCode::SETNE; | |||
33290 | } else { | |||
33291 | // all_of -> MOVMSK == ((1 << NumElts) - 1) | |||
33292 | CompareBits = APInt::getLowBitsSet(32, MaskVT.getVectorNumElements()); | |||
33293 | CondCode = ISD::CondCode::SETEQ; | |||
33294 | } | |||
33295 | ||||
33296 | // Perform the select as i32/i64 and then truncate to avoid partial register | |||
33297 | // stalls. | |||
33298 | unsigned ResWidth = std::max(BitWidth, 32u); | |||
33299 | EVT ResVT = EVT::getIntegerVT(*DAG.getContext(), ResWidth); | |||
33300 | SDLoc DL(Extract); | |||
33301 | SDValue Zero = DAG.getConstant(0, DL, ResVT); | |||
33302 | SDValue Ones = DAG.getAllOnesConstant(DL, ResVT); | |||
33303 | SDValue Res = DAG.getBitcast(MaskVT, Match); | |||
33304 | Res = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Res); | |||
33305 | Res = DAG.getSelectCC(DL, Res, DAG.getConstant(CompareBits, DL, MVT::i32), | |||
33306 | Ones, Zero, CondCode); | |||
33307 | return DAG.getSExtOrTrunc(Res, DL, ExtractVT); | |||
33308 | } | |||
33309 | ||||
33310 | static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG, | |||
33311 | const X86Subtarget &Subtarget) { | |||
33312 | // PSADBW is only supported on SSE2 and up. | |||
33313 | if (!Subtarget.hasSSE2()) | |||
33314 | return SDValue(); | |||
33315 | ||||
33316 | // Verify the type we're extracting from is any integer type above i16. | |||
33317 | EVT VT = Extract->getOperand(0).getValueType(); | |||
33318 | if (!VT.isSimple() || !(VT.getVectorElementType().getSizeInBits() > 16)) | |||
33319 | return SDValue(); | |||
33320 | ||||
33321 | unsigned RegSize = 128; | |||
33322 | if (Subtarget.useBWIRegs()) | |||
33323 | RegSize = 512; | |||
33324 | else if (Subtarget.hasAVX()) | |||
33325 | RegSize = 256; | |||
33326 | ||||
33327 | // We handle upto v16i* for SSE2 / v32i* for AVX / v64i* for AVX512. | |||
33328 | // TODO: We should be able to handle larger vectors by splitting them before | |||
33329 | // feeding them into several SADs, and then reducing over those. | |||
33330 | if (RegSize / VT.getVectorNumElements() < 8) | |||
33331 | return SDValue(); | |||
33332 | ||||
33333 | // Match shuffle + add pyramid. | |||
33334 | ISD::NodeType BinOp; | |||
33335 | SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD}); | |||
33336 | ||||
33337 | // The operand is expected to be zero extended from i8 | |||
33338 | // (verified in detectZextAbsDiff). | |||
33339 | // In order to convert to i64 and above, additional any/zero/sign | |||
33340 | // extend is expected. | |||
33341 | // The zero extend from 32 bit has no mathematical effect on the result. | |||
33342 | // Also the sign extend is basically zero extend | |||
33343 | // (extends the sign bit which is zero). | |||
33344 | // So it is correct to skip the sign/zero extend instruction. | |||
33345 | if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND || | |||
33346 | Root.getOpcode() == ISD::ZERO_EXTEND || | |||
33347 | Root.getOpcode() == ISD::ANY_EXTEND)) | |||
33348 | Root = Root.getOperand(0); | |||
33349 | ||||
33350 | // If there was a match, we want Root to be a select that is the root of an | |||
33351 | // abs-diff pattern. | |||
33352 | if (!Root || (Root.getOpcode() != ISD::VSELECT)) | |||
33353 | return SDValue(); | |||
33354 | ||||
33355 | // Check whether we have an abs-diff pattern feeding into the select. | |||
33356 | SDValue Zext0, Zext1; | |||
33357 | if (!detectZextAbsDiff(Root, Zext0, Zext1)) | |||
33358 | return SDValue(); | |||
33359 | ||||
33360 | // Create the SAD instruction. | |||
33361 | SDLoc DL(Extract); | |||
33362 | SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget); | |||
33363 | ||||
33364 | // If the original vector was wider than 8 elements, sum over the results | |||
33365 | // in the SAD vector. | |||
33366 | unsigned Stages = Log2_32(VT.getVectorNumElements()); | |||
33367 | MVT SadVT = SAD.getSimpleValueType(); | |||
33368 | if (Stages > 3) { | |||
33369 | unsigned SadElems = SadVT.getVectorNumElements(); | |||
33370 | ||||
33371 | for(unsigned i = Stages - 3; i > 0; --i) { | |||
33372 | SmallVector<int, 16> Mask(SadElems, -1); | |||
33373 | for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j) | |||
33374 | Mask[j] = MaskEnd + j; | |||
33375 | ||||
33376 | SDValue Shuffle = | |||
33377 | DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask); | |||
33378 | SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle); | |||
33379 | } | |||
33380 | } | |||
33381 | ||||
33382 | MVT Type = Extract->getSimpleValueType(0); | |||
33383 | unsigned TypeSizeInBits = Type.getSizeInBits(); | |||
33384 | // Return the lowest TypeSizeInBits bits. | |||
33385 | MVT ResVT = MVT::getVectorVT(Type, SadVT.getSizeInBits() / TypeSizeInBits); | |||
33386 | SAD = DAG.getBitcast(ResVT, SAD); | |||
33387 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Type, SAD, | |||
33388 | Extract->getOperand(1)); | |||
33389 | } | |||
33390 | ||||
33391 | // Attempt to peek through a target shuffle and extract the scalar from the | |||
33392 | // source. | |||
33393 | static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG, | |||
33394 | TargetLowering::DAGCombinerInfo &DCI, | |||
33395 | const X86Subtarget &Subtarget) { | |||
33396 | if (DCI.isBeforeLegalizeOps()) | |||
33397 | return SDValue(); | |||
33398 | ||||
33399 | SDValue Src = N->getOperand(0); | |||
33400 | SDValue Idx = N->getOperand(1); | |||
33401 | ||||
33402 | EVT VT = N->getValueType(0); | |||
33403 | EVT SrcVT = Src.getValueType(); | |||
33404 | EVT SrcSVT = SrcVT.getVectorElementType(); | |||
33405 | unsigned NumSrcElts = SrcVT.getVectorNumElements(); | |||
33406 | ||||
33407 | // Don't attempt this for boolean mask vectors or unknown extraction indices. | |||
33408 | if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx)) | |||
33409 | return SDValue(); | |||
33410 | ||||
33411 | // Handle extract(broadcast(scalar_value)), it doesn't matter what index is. | |||
33412 | if (X86ISD::VBROADCAST == Src.getOpcode() && | |||
33413 | Src.getOperand(0).getValueType() == VT) | |||
33414 | return Src.getOperand(0); | |||
33415 | ||||
33416 | // Resolve the target shuffle inputs and mask. | |||
33417 | SmallVector<int, 16> Mask; | |||
33418 | SmallVector<SDValue, 2> Ops; | |||
33419 | if (!resolveTargetShuffleInputs(peekThroughBitcasts(Src), Ops, Mask, DAG)) | |||
33420 | return SDValue(); | |||
33421 | ||||
33422 | // Attempt to narrow/widen the shuffle mask to the correct size. | |||
33423 | if (Mask.size() != NumSrcElts) { | |||
33424 | if ((NumSrcElts % Mask.size()) == 0) { | |||
33425 | SmallVector<int, 16> ScaledMask; | |||
33426 | int Scale = NumSrcElts / Mask.size(); | |||
33427 | scaleShuffleMask<int>(Scale, Mask, ScaledMask); | |||
33428 | Mask = std::move(ScaledMask); | |||
33429 | } else if ((Mask.size() % NumSrcElts) == 0) { | |||
33430 | SmallVector<int, 16> WidenedMask; | |||
33431 | while (Mask.size() > NumSrcElts && | |||
33432 | canWidenShuffleElements(Mask, WidenedMask)) | |||
33433 | Mask = std::move(WidenedMask); | |||
33434 | // TODO - investigate support for wider shuffle masks with known upper | |||
33435 | // undef/zero elements for implicit zero-extension. | |||
33436 | } | |||
33437 | } | |||
33438 | ||||
33439 | // Check if narrowing/widening failed. | |||
33440 | if (Mask.size() != NumSrcElts) | |||
33441 | return SDValue(); | |||
33442 | ||||
33443 | int SrcIdx = Mask[N->getConstantOperandVal(1)]; | |||
33444 | SDLoc dl(N); | |||
33445 | ||||
33446 | // If the shuffle source element is undef/zero then we can just accept it. | |||
33447 | if (SrcIdx == SM_SentinelUndef) | |||
33448 | return DAG.getUNDEF(VT); | |||
33449 | ||||
33450 | if (SrcIdx == SM_SentinelZero) | |||
33451 | return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT) | |||
33452 | : DAG.getConstant(0, dl, VT); | |||
33453 | ||||
33454 | SDValue SrcOp = Ops[SrcIdx / Mask.size()]; | |||
33455 | SrcOp = DAG.getBitcast(SrcVT, SrcOp); | |||
33456 | SrcIdx = SrcIdx % Mask.size(); | |||
33457 | ||||
33458 | // We can only extract other elements from 128-bit vectors and in certain | |||
33459 | // circumstances, depending on SSE-level. | |||
33460 | // TODO: Investigate using extract_subvector for larger vectors. | |||
33461 | // TODO: Investigate float/double extraction if it will be just stored. | |||
33462 | if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) && | |||
33463 | ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) { | |||
33464 | assert(SrcSVT == VT && "Unexpected extraction type")((SrcSVT == VT && "Unexpected extraction type") ? static_cast <void> (0) : __assert_fail ("SrcSVT == VT && \"Unexpected extraction type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 33464, __PRETTY_FUNCTION__)); | |||
33465 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp, | |||
33466 | DAG.getIntPtrConstant(SrcIdx, dl)); | |||
33467 | } | |||
33468 | ||||
33469 | if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) || | |||
33470 | (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) { | |||
33471 | assert(VT.getSizeInBits() >= SrcSVT.getSizeInBits() &&((VT.getSizeInBits() >= SrcSVT.getSizeInBits() && "Unexpected extraction type" ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() >= SrcSVT.getSizeInBits() && \"Unexpected extraction type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 33472, __PRETTY_FUNCTION__)) | |||
33472 | "Unexpected extraction type")((VT.getSizeInBits() >= SrcSVT.getSizeInBits() && "Unexpected extraction type" ) ? static_cast<void> (0) : __assert_fail ("VT.getSizeInBits() >= SrcSVT.getSizeInBits() && \"Unexpected extraction type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 33472, __PRETTY_FUNCTION__)); | |||
33473 | unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB); | |||
33474 | SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp, | |||
33475 | DAG.getIntPtrConstant(SrcIdx, dl)); | |||
33476 | return DAG.getZExtOrTrunc(ExtOp, dl, VT); | |||
33477 | } | |||
33478 | ||||
33479 | return SDValue(); | |||
33480 | } | |||
33481 | ||||
33482 | /// Detect vector gather/scatter index generation and convert it from being a | |||
33483 | /// bunch of shuffles and extracts into a somewhat faster sequence. | |||
33484 | /// For i686, the best sequence is apparently storing the value and loading | |||
33485 | /// scalars back, while for x64 we should use 64-bit extracts and shifts. | |||
33486 | static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG, | |||
33487 | TargetLowering::DAGCombinerInfo &DCI, | |||
33488 | const X86Subtarget &Subtarget) { | |||
33489 | if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget)) | |||
33490 | return NewOp; | |||
33491 | ||||
33492 | // TODO - Remove this once we can handle the implicit zero-extension of | |||
33493 | // X86ISD::PEXTRW/X86ISD::PEXTRB in: | |||
33494 | // XFormVExtractWithShuffleIntoLoad, combineHorizontalPredicateResult and | |||
33495 | // combineBasicSADPattern. | |||
33496 | if (N->getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
33497 | return SDValue(); | |||
33498 | ||||
33499 | if (SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI)) | |||
33500 | return NewOp; | |||
33501 | ||||
33502 | SDValue InputVector = N->getOperand(0); | |||
33503 | SDValue EltIdx = N->getOperand(1); | |||
33504 | ||||
33505 | EVT SrcVT = InputVector.getValueType(); | |||
33506 | EVT VT = N->getValueType(0); | |||
33507 | SDLoc dl(InputVector); | |||
33508 | ||||
33509 | // Detect mmx extraction of all bits as a i64. It works better as a bitcast. | |||
33510 | if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() && | |||
33511 | VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) { | |||
33512 | SDValue MMXSrc = InputVector.getOperand(0); | |||
33513 | ||||
33514 | // The bitcast source is a direct mmx result. | |||
33515 | if (MMXSrc.getValueType() == MVT::x86mmx) | |||
33516 | return DAG.getBitcast(VT, InputVector); | |||
33517 | } | |||
33518 | ||||
33519 | // Detect mmx to i32 conversion through a v2i32 elt extract. | |||
33520 | if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() && | |||
33521 | VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) { | |||
33522 | SDValue MMXSrc = InputVector.getOperand(0); | |||
33523 | ||||
33524 | // The bitcast source is a direct mmx result. | |||
33525 | if (MMXSrc.getValueType() == MVT::x86mmx) | |||
33526 | return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc); | |||
33527 | } | |||
33528 | ||||
33529 | if (VT == MVT::i1 && InputVector.getOpcode() == ISD::BITCAST && | |||
33530 | isa<ConstantSDNode>(EltIdx) && | |||
33531 | isa<ConstantSDNode>(InputVector.getOperand(0))) { | |||
33532 | uint64_t ExtractedElt = N->getConstantOperandVal(1); | |||
33533 | auto *InputC = cast<ConstantSDNode>(InputVector.getOperand(0)); | |||
33534 | const APInt &InputValue = InputC->getAPIntValue(); | |||
33535 | uint64_t Res = InputValue[ExtractedElt]; | |||
33536 | return DAG.getConstant(Res, dl, MVT::i1); | |||
33537 | } | |||
33538 | ||||
33539 | // Check whether this extract is the root of a sum of absolute differences | |||
33540 | // pattern. This has to be done here because we really want it to happen | |||
33541 | // pre-legalization, | |||
33542 | if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget)) | |||
33543 | return SAD; | |||
33544 | ||||
33545 | // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK. | |||
33546 | if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget)) | |||
33547 | return Cmp; | |||
33548 | ||||
33549 | // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW. | |||
33550 | if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget)) | |||
33551 | return MinMax; | |||
33552 | ||||
33553 | return SDValue(); | |||
33554 | } | |||
33555 | ||||
33556 | /// If a vector select has an operand that is -1 or 0, try to simplify the | |||
33557 | /// select to a bitwise logic operation. | |||
33558 | /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()? | |||
33559 | static SDValue | |||
33560 | combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG, | |||
33561 | TargetLowering::DAGCombinerInfo &DCI, | |||
33562 | const X86Subtarget &Subtarget) { | |||
33563 | SDValue Cond = N->getOperand(0); | |||
33564 | SDValue LHS = N->getOperand(1); | |||
33565 | SDValue RHS = N->getOperand(2); | |||
33566 | EVT VT = LHS.getValueType(); | |||
33567 | EVT CondVT = Cond.getValueType(); | |||
33568 | SDLoc DL(N); | |||
33569 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
33570 | ||||
33571 | if (N->getOpcode() != ISD::VSELECT) | |||
33572 | return SDValue(); | |||
33573 | ||||
33574 | assert(CondVT.isVector() && "Vector select expects a vector selector!")((CondVT.isVector() && "Vector select expects a vector selector!" ) ? static_cast<void> (0) : __assert_fail ("CondVT.isVector() && \"Vector select expects a vector selector!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 33574, __PRETTY_FUNCTION__)); | |||
33575 | ||||
33576 | bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode()); | |||
33577 | // Check if the first operand is all zeros and Cond type is vXi1. | |||
33578 | // This situation only applies to avx512. | |||
33579 | if (TValIsAllZeros && Subtarget.hasAVX512() && Cond.hasOneUse() && | |||
33580 | CondVT.getVectorElementType() == MVT::i1) { | |||
33581 | // Invert the cond to not(cond) : xor(op,allones)=not(op) | |||
33582 | SDValue CondNew = DAG.getNOT(DL, Cond, CondVT); | |||
33583 | // Vselect cond, op1, op2 = Vselect not(cond), op2, op1 | |||
33584 | return DAG.getSelect(DL, VT, CondNew, RHS, LHS); | |||
33585 | } | |||
33586 | ||||
33587 | // To use the condition operand as a bitwise mask, it must have elements that | |||
33588 | // are the same size as the select elements. Ie, the condition operand must | |||
33589 | // have already been promoted from the IR select condition type <N x i1>. | |||
33590 | // Don't check if the types themselves are equal because that excludes | |||
33591 | // vector floating-point selects. | |||
33592 | if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) | |||
33593 | return SDValue(); | |||
33594 | ||||
33595 | bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode()); | |||
33596 | bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); | |||
33597 | ||||
33598 | // Try to invert the condition if true value is not all 1s and false value is | |||
33599 | // not all 0s. | |||
33600 | if (!TValIsAllOnes && !FValIsAllZeros && | |||
33601 | // Check if the selector will be produced by CMPP*/PCMP*. | |||
33602 | Cond.getOpcode() == ISD::SETCC && | |||
33603 | // Check if SETCC has already been promoted. | |||
33604 | TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) == | |||
33605 | CondVT) { | |||
33606 | bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode()); | |||
33607 | ||||
33608 | if (TValIsAllZeros || FValIsAllOnes) { | |||
33609 | SDValue CC = Cond.getOperand(2); | |||
33610 | ISD::CondCode NewCC = | |||
33611 | ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), | |||
33612 | Cond.getOperand(0).getValueType().isInteger()); | |||
33613 | Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), | |||
33614 | NewCC); | |||
33615 | std::swap(LHS, RHS); | |||
33616 | TValIsAllOnes = FValIsAllOnes; | |||
33617 | FValIsAllZeros = TValIsAllZeros; | |||
33618 | } | |||
33619 | } | |||
33620 | ||||
33621 | // Cond value must be 'sign splat' to be converted to a logical op. | |||
33622 | if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits()) | |||
33623 | return SDValue(); | |||
33624 | ||||
33625 | // vselect Cond, 111..., 000... -> Cond | |||
33626 | if (TValIsAllOnes && FValIsAllZeros) | |||
33627 | return DAG.getBitcast(VT, Cond); | |||
33628 | ||||
33629 | if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT)) | |||
33630 | return SDValue(); | |||
33631 | ||||
33632 | // vselect Cond, 111..., X -> or Cond, X | |||
33633 | if (TValIsAllOnes) { | |||
33634 | SDValue CastRHS = DAG.getBitcast(CondVT, RHS); | |||
33635 | SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS); | |||
33636 | return DAG.getBitcast(VT, Or); | |||
33637 | } | |||
33638 | ||||
33639 | // vselect Cond, X, 000... -> and Cond, X | |||
33640 | if (FValIsAllZeros) { | |||
33641 | SDValue CastLHS = DAG.getBitcast(CondVT, LHS); | |||
33642 | SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS); | |||
33643 | return DAG.getBitcast(VT, And); | |||
33644 | } | |||
33645 | ||||
33646 | // vselect Cond, 000..., X -> andn Cond, X | |||
33647 | if (TValIsAllZeros) { | |||
33648 | MVT AndNVT = MVT::getVectorVT(MVT::i64, CondVT.getSizeInBits() / 64); | |||
33649 | SDValue CastCond = DAG.getBitcast(AndNVT, Cond); | |||
33650 | SDValue CastRHS = DAG.getBitcast(AndNVT, RHS); | |||
33651 | SDValue AndN = DAG.getNode(X86ISD::ANDNP, DL, AndNVT, CastCond, CastRHS); | |||
33652 | return DAG.getBitcast(VT, AndN); | |||
33653 | } | |||
33654 | ||||
33655 | return SDValue(); | |||
33656 | } | |||
33657 | ||||
33658 | static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) { | |||
33659 | SDValue Cond = N->getOperand(0); | |||
33660 | SDValue LHS = N->getOperand(1); | |||
33661 | SDValue RHS = N->getOperand(2); | |||
33662 | SDLoc DL(N); | |||
33663 | ||||
33664 | auto *TrueC = dyn_cast<ConstantSDNode>(LHS); | |||
33665 | auto *FalseC = dyn_cast<ConstantSDNode>(RHS); | |||
33666 | if (!TrueC || !FalseC) | |||
33667 | return SDValue(); | |||
33668 | ||||
33669 | // Don't do this for crazy integer types. | |||
33670 | EVT VT = N->getValueType(0); | |||
33671 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
33672 | return SDValue(); | |||
33673 | ||||
33674 | // We're going to use the condition bit in math or logic ops. We could allow | |||
33675 | // this with a wider condition value (post-legalization it becomes an i8), | |||
33676 | // but if nothing is creating selects that late, it doesn't matter. | |||
33677 | if (Cond.getValueType() != MVT::i1) | |||
33678 | return SDValue(); | |||
33679 | ||||
33680 | // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by | |||
33681 | // 3, 5, or 9 with i32/i64, so those get transformed too. | |||
33682 | // TODO: For constants that overflow or do not differ by power-of-2 or small | |||
33683 | // multiplier, convert to 'and' + 'add'. | |||
33684 | const APInt &TrueVal = TrueC->getAPIntValue(); | |||
33685 | const APInt &FalseVal = FalseC->getAPIntValue(); | |||
33686 | bool OV; | |||
33687 | APInt Diff = TrueVal.ssub_ov(FalseVal, OV); | |||
33688 | if (OV) | |||
33689 | return SDValue(); | |||
33690 | ||||
33691 | APInt AbsDiff = Diff.abs(); | |||
33692 | if (AbsDiff.isPowerOf2() || | |||
33693 | ((VT == MVT::i32 || VT == MVT::i64) && | |||
33694 | (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) { | |||
33695 | ||||
33696 | // We need a positive multiplier constant for shift/LEA codegen. The 'not' | |||
33697 | // of the condition can usually be folded into a compare predicate, but even | |||
33698 | // without that, the sequence should be cheaper than a CMOV alternative. | |||
33699 | if (TrueVal.slt(FalseVal)) { | |||
33700 | Cond = DAG.getNOT(DL, Cond, MVT::i1); | |||
33701 | std::swap(TrueC, FalseC); | |||
33702 | } | |||
33703 | ||||
33704 | // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC | |||
33705 | SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond); | |||
33706 | ||||
33707 | // Multiply condition by the difference if non-one. | |||
33708 | if (!AbsDiff.isOneValue()) | |||
33709 | R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT)); | |||
33710 | ||||
33711 | // Add the base if non-zero. | |||
33712 | if (!FalseC->isNullValue()) | |||
33713 | R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0)); | |||
33714 | ||||
33715 | return R; | |||
33716 | } | |||
33717 | ||||
33718 | return SDValue(); | |||
33719 | } | |||
33720 | ||||
33721 | /// If this is a *dynamic* select (non-constant condition) and we can match | |||
33722 | /// this node with one of the variable blend instructions, restructure the | |||
33723 | /// condition so that blends can use the high (sign) bit of each element. | |||
33724 | static SDValue combineVSelectToShrunkBlend(SDNode *N, SelectionDAG &DAG, | |||
33725 | TargetLowering::DAGCombinerInfo &DCI, | |||
33726 | const X86Subtarget &Subtarget) { | |||
33727 | SDValue Cond = N->getOperand(0); | |||
33728 | if (N->getOpcode() != ISD::VSELECT || | |||
33729 | ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) | |||
33730 | return SDValue(); | |||
33731 | ||||
33732 | // Don't optimize before the condition has been transformed to a legal type | |||
33733 | // and don't ever optimize vector selects that map to AVX512 mask-registers. | |||
33734 | unsigned BitWidth = Cond.getScalarValueSizeInBits(); | |||
33735 | if (BitWidth < 8 || BitWidth > 64) | |||
33736 | return SDValue(); | |||
33737 | ||||
33738 | // We can only handle the cases where VSELECT is directly legal on the | |||
33739 | // subtarget. We custom lower VSELECT nodes with constant conditions and | |||
33740 | // this makes it hard to see whether a dynamic VSELECT will correctly | |||
33741 | // lower, so we both check the operation's status and explicitly handle the | |||
33742 | // cases where a *dynamic* blend will fail even though a constant-condition | |||
33743 | // blend could be custom lowered. | |||
33744 | // FIXME: We should find a better way to handle this class of problems. | |||
33745 | // Potentially, we should combine constant-condition vselect nodes | |||
33746 | // pre-legalization into shuffles and not mark as many types as custom | |||
33747 | // lowered. | |||
33748 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
33749 | EVT VT = N->getValueType(0); | |||
33750 | if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT)) | |||
33751 | return SDValue(); | |||
33752 | // FIXME: We don't support i16-element blends currently. We could and | |||
33753 | // should support them by making *all* the bits in the condition be set | |||
33754 | // rather than just the high bit and using an i8-element blend. | |||
33755 | if (VT.getVectorElementType() == MVT::i16) | |||
33756 | return SDValue(); | |||
33757 | // Dynamic blending was only available from SSE4.1 onward. | |||
33758 | if (VT.is128BitVector() && !Subtarget.hasSSE41()) | |||
33759 | return SDValue(); | |||
33760 | // Byte blends are only available in AVX2 | |||
33761 | if (VT == MVT::v32i8 && !Subtarget.hasAVX2()) | |||
33762 | return SDValue(); | |||
33763 | // There are no 512-bit blend instructions that use sign bits. | |||
33764 | if (VT.is512BitVector()) | |||
33765 | return SDValue(); | |||
33766 | ||||
33767 | // TODO: Add other opcodes eventually lowered into BLEND. | |||
33768 | for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end(); | |||
33769 | UI != UE; ++UI) | |||
33770 | if (UI->getOpcode() != ISD::VSELECT || UI.getOperandNo() != 0) | |||
33771 | return SDValue(); | |||
33772 | ||||
33773 | APInt DemandedMask(APInt::getSignMask(BitWidth)); | |||
33774 | KnownBits Known; | |||
33775 | TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), | |||
33776 | !DCI.isBeforeLegalizeOps()); | |||
33777 | if (!TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO, 0, true)) | |||
33778 | return SDValue(); | |||
33779 | ||||
33780 | // If we changed the computation somewhere in the DAG, this change will | |||
33781 | // affect all users of Cond. Update all the nodes so that we do not use | |||
33782 | // the generic VSELECT anymore. Otherwise, we may perform wrong | |||
33783 | // optimizations as we messed with the actual expectation for the vector | |||
33784 | // boolean values. | |||
33785 | for (SDNode *U : Cond->uses()) { | |||
33786 | SDValue SB = DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(U), U->getValueType(0), | |||
33787 | Cond, U->getOperand(1), U->getOperand(2)); | |||
33788 | DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB); | |||
33789 | } | |||
33790 | DCI.CommitTargetLoweringOpt(TLO); | |||
33791 | return SDValue(N, 0); | |||
33792 | } | |||
33793 | ||||
33794 | /// Do target-specific dag combines on SELECT and VSELECT nodes. | |||
33795 | static SDValue combineSelect(SDNode *N, SelectionDAG &DAG, | |||
33796 | TargetLowering::DAGCombinerInfo &DCI, | |||
33797 | const X86Subtarget &Subtarget) { | |||
33798 | SDLoc DL(N); | |||
33799 | SDValue Cond = N->getOperand(0); | |||
33800 | SDValue LHS = N->getOperand(1); | |||
33801 | SDValue RHS = N->getOperand(2); | |||
33802 | ||||
33803 | // Try simplification again because we use this function to optimize | |||
33804 | // SHRUNKBLEND nodes that are not handled by the generic combiner. | |||
33805 | if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS)) | |||
33806 | return V; | |||
33807 | ||||
33808 | EVT VT = LHS.getValueType(); | |||
33809 | EVT CondVT = Cond.getValueType(); | |||
33810 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
33811 | ||||
33812 | // Convert vselects with constant condition into shuffles. | |||
33813 | if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) && | |||
33814 | DCI.isBeforeLegalizeOps()) { | |||
33815 | SmallVector<int, 64> Mask; | |||
33816 | if (createShuffleMaskFromVSELECT(Mask, Cond)) | |||
33817 | return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask); | |||
33818 | } | |||
33819 | ||||
33820 | // If we have SSE[12] support, try to form min/max nodes. SSE min/max | |||
33821 | // instructions match the semantics of the common C idiom x<y?x:y but not | |||
33822 | // x<=y?x:y, because of how they handle negative zero (which can be | |||
33823 | // ignored in unsafe-math mode). | |||
33824 | // We also try to create v2f32 min/max nodes, which we later widen to v4f32. | |||
33825 | if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() && | |||
33826 | VT != MVT::f80 && VT != MVT::f128 && | |||
33827 | (TLI.isTypeLegal(VT) || VT == MVT::v2f32) && | |||
33828 | (Subtarget.hasSSE2() || | |||
33829 | (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) { | |||
33830 | ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); | |||
33831 | ||||
33832 | unsigned Opcode = 0; | |||
33833 | // Check for x CC y ? x : y. | |||
33834 | if (DAG.isEqualTo(LHS, Cond.getOperand(0)) && | |||
33835 | DAG.isEqualTo(RHS, Cond.getOperand(1))) { | |||
33836 | switch (CC) { | |||
33837 | default: break; | |||
33838 | case ISD::SETULT: | |||
33839 | // Converting this to a min would handle NaNs incorrectly, and swapping | |||
33840 | // the operands would cause it to handle comparisons between positive | |||
33841 | // and negative zero incorrectly. | |||
33842 | if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { | |||
33843 | if (!DAG.getTarget().Options.UnsafeFPMath && | |||
33844 | !(DAG.isKnownNeverZeroFloat(LHS) || | |||
33845 | DAG.isKnownNeverZeroFloat(RHS))) | |||
33846 | break; | |||
33847 | std::swap(LHS, RHS); | |||
33848 | } | |||
33849 | Opcode = X86ISD::FMIN; | |||
33850 | break; | |||
33851 | case ISD::SETOLE: | |||
33852 | // Converting this to a min would handle comparisons between positive | |||
33853 | // and negative zero incorrectly. | |||
33854 | if (!DAG.getTarget().Options.UnsafeFPMath && | |||
33855 | !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS)) | |||
33856 | break; | |||
33857 | Opcode = X86ISD::FMIN; | |||
33858 | break; | |||
33859 | case ISD::SETULE: | |||
33860 | // Converting this to a min would handle both negative zeros and NaNs | |||
33861 | // incorrectly, but we can swap the operands to fix both. | |||
33862 | std::swap(LHS, RHS); | |||
33863 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
33864 | case ISD::SETOLT: | |||
33865 | case ISD::SETLT: | |||
33866 | case ISD::SETLE: | |||
33867 | Opcode = X86ISD::FMIN; | |||
33868 | break; | |||
33869 | ||||
33870 | case ISD::SETOGE: | |||
33871 | // Converting this to a max would handle comparisons between positive | |||
33872 | // and negative zero incorrectly. | |||
33873 | if (!DAG.getTarget().Options.UnsafeFPMath && | |||
33874 | !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS)) | |||
33875 | break; | |||
33876 | Opcode = X86ISD::FMAX; | |||
33877 | break; | |||
33878 | case ISD::SETUGT: | |||
33879 | // Converting this to a max would handle NaNs incorrectly, and swapping | |||
33880 | // the operands would cause it to handle comparisons between positive | |||
33881 | // and negative zero incorrectly. | |||
33882 | if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { | |||
33883 | if (!DAG.getTarget().Options.UnsafeFPMath && | |||
33884 | !(DAG.isKnownNeverZeroFloat(LHS) || | |||
33885 | DAG.isKnownNeverZeroFloat(RHS))) | |||
33886 | break; | |||
33887 | std::swap(LHS, RHS); | |||
33888 | } | |||
33889 | Opcode = X86ISD::FMAX; | |||
33890 | break; | |||
33891 | case ISD::SETUGE: | |||
33892 | // Converting this to a max would handle both negative zeros and NaNs | |||
33893 | // incorrectly, but we can swap the operands to fix both. | |||
33894 | std::swap(LHS, RHS); | |||
33895 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
33896 | case ISD::SETOGT: | |||
33897 | case ISD::SETGT: | |||
33898 | case ISD::SETGE: | |||
33899 | Opcode = X86ISD::FMAX; | |||
33900 | break; | |||
33901 | } | |||
33902 | // Check for x CC y ? y : x -- a min/max with reversed arms. | |||
33903 | } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) && | |||
33904 | DAG.isEqualTo(RHS, Cond.getOperand(0))) { | |||
33905 | switch (CC) { | |||
33906 | default: break; | |||
33907 | case ISD::SETOGE: | |||
33908 | // Converting this to a min would handle comparisons between positive | |||
33909 | // and negative zero incorrectly, and swapping the operands would | |||
33910 | // cause it to handle NaNs incorrectly. | |||
33911 | if (!DAG.getTarget().Options.UnsafeFPMath && | |||
33912 | !(DAG.isKnownNeverZeroFloat(LHS) || | |||
33913 | DAG.isKnownNeverZeroFloat(RHS))) { | |||
33914 | if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) | |||
33915 | break; | |||
33916 | std::swap(LHS, RHS); | |||
33917 | } | |||
33918 | Opcode = X86ISD::FMIN; | |||
33919 | break; | |||
33920 | case ISD::SETUGT: | |||
33921 | // Converting this to a min would handle NaNs incorrectly. | |||
33922 | if (!DAG.getTarget().Options.UnsafeFPMath && | |||
33923 | (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) | |||
33924 | break; | |||
33925 | Opcode = X86ISD::FMIN; | |||
33926 | break; | |||
33927 | case ISD::SETUGE: | |||
33928 | // Converting this to a min would handle both negative zeros and NaNs | |||
33929 | // incorrectly, but we can swap the operands to fix both. | |||
33930 | std::swap(LHS, RHS); | |||
33931 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
33932 | case ISD::SETOGT: | |||
33933 | case ISD::SETGT: | |||
33934 | case ISD::SETGE: | |||
33935 | Opcode = X86ISD::FMIN; | |||
33936 | break; | |||
33937 | ||||
33938 | case ISD::SETULT: | |||
33939 | // Converting this to a max would handle NaNs incorrectly. | |||
33940 | if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) | |||
33941 | break; | |||
33942 | Opcode = X86ISD::FMAX; | |||
33943 | break; | |||
33944 | case ISD::SETOLE: | |||
33945 | // Converting this to a max would handle comparisons between positive | |||
33946 | // and negative zero incorrectly, and swapping the operands would | |||
33947 | // cause it to handle NaNs incorrectly. | |||
33948 | if (!DAG.getTarget().Options.UnsafeFPMath && | |||
33949 | !DAG.isKnownNeverZeroFloat(LHS) && | |||
33950 | !DAG.isKnownNeverZeroFloat(RHS)) { | |||
33951 | if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) | |||
33952 | break; | |||
33953 | std::swap(LHS, RHS); | |||
33954 | } | |||
33955 | Opcode = X86ISD::FMAX; | |||
33956 | break; | |||
33957 | case ISD::SETULE: | |||
33958 | // Converting this to a max would handle both negative zeros and NaNs | |||
33959 | // incorrectly, but we can swap the operands to fix both. | |||
33960 | std::swap(LHS, RHS); | |||
33961 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
33962 | case ISD::SETOLT: | |||
33963 | case ISD::SETLT: | |||
33964 | case ISD::SETLE: | |||
33965 | Opcode = X86ISD::FMAX; | |||
33966 | break; | |||
33967 | } | |||
33968 | } | |||
33969 | ||||
33970 | if (Opcode) | |||
33971 | return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS); | |||
33972 | } | |||
33973 | ||||
33974 | // Some mask scalar intrinsics rely on checking if only one bit is set | |||
33975 | // and implement it in C code like this: | |||
33976 | // A[0] = (U & 1) ? A[0] : W[0]; | |||
33977 | // This creates some redundant instructions that break pattern matching. | |||
33978 | // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y) | |||
33979 | if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT && | |||
33980 | Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) { | |||
33981 | ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); | |||
33982 | SDValue AndNode = Cond.getOperand(0); | |||
33983 | if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ && | |||
33984 | isNullConstant(Cond.getOperand(1)) && | |||
33985 | isOneConstant(AndNode.getOperand(1))) { | |||
33986 | // LHS and RHS swapped due to | |||
33987 | // setcc outputting 1 when AND resulted in 0 and vice versa. | |||
33988 | AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8); | |||
33989 | return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS); | |||
33990 | } | |||
33991 | } | |||
33992 | ||||
33993 | // v16i8 (select v16i1, v16i8, v16i8) does not have a proper | |||
33994 | // lowering on KNL. In this case we convert it to | |||
33995 | // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction. | |||
33996 | // The same situation all vectors of i8 and i16 without BWI. | |||
33997 | // Make sure we extend these even before type legalization gets a chance to | |||
33998 | // split wide vectors. | |||
33999 | // Since SKX these selects have a proper lowering. | |||
34000 | if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() && | |||
34001 | CondVT.getVectorElementType() == MVT::i1 && | |||
34002 | (ExperimentalVectorWideningLegalization || | |||
34003 | VT.getVectorNumElements() > 4) && | |||
34004 | (VT.getVectorElementType() == MVT::i8 || | |||
34005 | VT.getVectorElementType() == MVT::i16)) { | |||
34006 | Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond); | |||
34007 | return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS); | |||
34008 | } | |||
34009 | ||||
34010 | if (SDValue V = combineSelectOfTwoConstants(N, DAG)) | |||
34011 | return V; | |||
34012 | ||||
34013 | // Canonicalize max and min: | |||
34014 | // (x > y) ? x : y -> (x >= y) ? x : y | |||
34015 | // (x < y) ? x : y -> (x <= y) ? x : y | |||
34016 | // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates | |||
34017 | // the need for an extra compare | |||
34018 | // against zero. e.g. | |||
34019 | // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0 | |||
34020 | // subl %esi, %edi | |||
34021 | // testl %edi, %edi | |||
34022 | // movl $0, %eax | |||
34023 | // cmovgl %edi, %eax | |||
34024 | // => | |||
34025 | // xorl %eax, %eax | |||
34026 | // subl %esi, $edi | |||
34027 | // cmovsl %eax, %edi | |||
34028 | if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC && | |||
34029 | DAG.isEqualTo(LHS, Cond.getOperand(0)) && | |||
34030 | DAG.isEqualTo(RHS, Cond.getOperand(1))) { | |||
34031 | ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); | |||
34032 | switch (CC) { | |||
34033 | default: break; | |||
34034 | case ISD::SETLT: | |||
34035 | case ISD::SETGT: { | |||
34036 | ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE; | |||
34037 | Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(), | |||
34038 | Cond.getOperand(0), Cond.getOperand(1), NewCC); | |||
34039 | return DAG.getSelect(DL, VT, Cond, LHS, RHS); | |||
34040 | } | |||
34041 | } | |||
34042 | } | |||
34043 | ||||
34044 | // Match VSELECTs into subs with unsigned saturation. | |||
34045 | if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC && | |||
34046 | // psubus is available in SSE2 for i8 and i16 vectors. | |||
34047 | Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 && | |||
34048 | isPowerOf2_32(VT.getVectorNumElements()) && | |||
34049 | (VT.getVectorElementType() == MVT::i8 || | |||
34050 | VT.getVectorElementType() == MVT::i16)) { | |||
34051 | ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); | |||
34052 | ||||
34053 | // Check if one of the arms of the VSELECT is a zero vector. If it's on the | |||
34054 | // left side invert the predicate to simplify logic below. | |||
34055 | SDValue Other; | |||
34056 | if (ISD::isBuildVectorAllZeros(LHS.getNode())) { | |||
34057 | Other = RHS; | |||
34058 | CC = ISD::getSetCCInverse(CC, true); | |||
34059 | } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) { | |||
34060 | Other = LHS; | |||
34061 | } | |||
34062 | ||||
34063 | if (Other.getNode() && Other->getNumOperands() == 2 && | |||
34064 | Other->getOperand(0) == Cond.getOperand(0)) { | |||
34065 | SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1); | |||
34066 | SDValue CondRHS = Cond->getOperand(1); | |||
34067 | ||||
34068 | // Look for a general sub with unsigned saturation first. | |||
34069 | // x >= y ? x-y : 0 --> subus x, y | |||
34070 | // x > y ? x-y : 0 --> subus x, y | |||
34071 | if ((CC == ISD::SETUGE || CC == ISD::SETUGT) && | |||
34072 | Other->getOpcode() == ISD::SUB && OpRHS == CondRHS) | |||
34073 | return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS); | |||
34074 | ||||
34075 | if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) { | |||
34076 | if (isa<BuildVectorSDNode>(CondRHS)) { | |||
34077 | // If the RHS is a constant we have to reverse the const | |||
34078 | // canonicalization. | |||
34079 | // x > C-1 ? x+-C : 0 --> subus x, C | |||
34080 | // TODO: Handle build_vectors with undef elements. | |||
34081 | auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) { | |||
34082 | return Cond->getAPIntValue() == (-Op->getAPIntValue() - 1); | |||
34083 | }; | |||
34084 | if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD && | |||
34085 | ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT)) { | |||
34086 | OpRHS = DAG.getNode(ISD::SUB, DL, VT, | |||
34087 | DAG.getConstant(0, DL, VT), OpRHS); | |||
34088 | return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS); | |||
34089 | } | |||
34090 | ||||
34091 | // Another special case: If C was a sign bit, the sub has been | |||
34092 | // canonicalized into a xor. | |||
34093 | // FIXME: Would it be better to use computeKnownBits to determine | |||
34094 | // whether it's safe to decanonicalize the xor? | |||
34095 | // x s< 0 ? x^C : 0 --> subus x, C | |||
34096 | if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) { | |||
34097 | if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR && | |||
34098 | ISD::isBuildVectorAllZeros(CondRHS.getNode()) && | |||
34099 | OpRHSConst->getAPIntValue().isSignMask()) { | |||
34100 | // Note that we have to rebuild the RHS constant here to ensure we | |||
34101 | // don't rely on particular values of undef lanes. | |||
34102 | OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT); | |||
34103 | return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS); | |||
34104 | } | |||
34105 | } | |||
34106 | } | |||
34107 | } | |||
34108 | } | |||
34109 | } | |||
34110 | ||||
34111 | // Match VSELECTs into add with unsigned saturation. | |||
34112 | if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC && | |||
34113 | // paddus is available in SSE2 for i8 and i16 vectors. | |||
34114 | Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 && | |||
34115 | isPowerOf2_32(VT.getVectorNumElements()) && | |||
34116 | (VT.getVectorElementType() == MVT::i8 || | |||
34117 | VT.getVectorElementType() == MVT::i16)) { | |||
34118 | ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); | |||
34119 | ||||
34120 | SDValue CondLHS = Cond->getOperand(0); | |||
34121 | SDValue CondRHS = Cond->getOperand(1); | |||
34122 | ||||
34123 | // Check if one of the arms of the VSELECT is vector with all bits set. | |||
34124 | // If it's on the left side invert the predicate to simplify logic below. | |||
34125 | SDValue Other; | |||
34126 | if (ISD::isBuildVectorAllOnes(LHS.getNode())) { | |||
34127 | Other = RHS; | |||
34128 | CC = ISD::getSetCCInverse(CC, true); | |||
34129 | } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) { | |||
34130 | Other = LHS; | |||
34131 | } | |||
34132 | ||||
34133 | if (Other.getNode() && Other.getOpcode() == ISD::ADD) { | |||
34134 | SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1); | |||
34135 | ||||
34136 | // Canonicalize condition operands. | |||
34137 | if (CC == ISD::SETUGE) { | |||
34138 | std::swap(CondLHS, CondRHS); | |||
34139 | CC = ISD::SETULE; | |||
34140 | } | |||
34141 | ||||
34142 | // We can test against either of the addition operands. | |||
34143 | // x <= x+y ? x+y : ~0 --> addus x, y | |||
34144 | // x+y >= x ? x+y : ~0 --> addus x, y | |||
34145 | if (CC == ISD::SETULE && Other == CondRHS && | |||
34146 | (OpLHS == CondLHS || OpRHS == CondLHS)) | |||
34147 | return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS); | |||
34148 | ||||
34149 | if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) && | |||
34150 | CondLHS == OpLHS) { | |||
34151 | // If the RHS is a constant we have to reverse the const | |||
34152 | // canonicalization. | |||
34153 | // x > ~C ? x+C : ~0 --> addus x, C | |||
34154 | auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) { | |||
34155 | return Cond->getAPIntValue() == ~Op->getAPIntValue(); | |||
34156 | }; | |||
34157 | if (CC == ISD::SETULE && | |||
34158 | ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT)) | |||
34159 | return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS); | |||
34160 | } | |||
34161 | } | |||
34162 | } | |||
34163 | ||||
34164 | // Early exit check | |||
34165 | if (!TLI.isTypeLegal(VT)) | |||
34166 | return SDValue(); | |||
34167 | ||||
34168 | if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget)) | |||
34169 | return V; | |||
34170 | ||||
34171 | if (SDValue V = combineVSelectToShrunkBlend(N, DAG, DCI, Subtarget)) | |||
34172 | return V; | |||
34173 | ||||
34174 | // Custom action for SELECT MMX | |||
34175 | if (VT == MVT::x86mmx) { | |||
34176 | LHS = DAG.getBitcast(MVT::i64, LHS); | |||
34177 | RHS = DAG.getBitcast(MVT::i64, RHS); | |||
34178 | SDValue newSelect = DAG.getNode(ISD::SELECT, DL, MVT::i64, Cond, LHS, RHS); | |||
34179 | return DAG.getBitcast(VT, newSelect); | |||
34180 | } | |||
34181 | ||||
34182 | return SDValue(); | |||
34183 | } | |||
34184 | ||||
34185 | /// Combine: | |||
34186 | /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S) | |||
34187 | /// to: | |||
34188 | /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE) | |||
34189 | /// i.e., reusing the EFLAGS produced by the LOCKed instruction. | |||
34190 | /// Note that this is only legal for some op/cc combinations. | |||
34191 | static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC, | |||
34192 | SelectionDAG &DAG, | |||
34193 | const X86Subtarget &Subtarget) { | |||
34194 | // This combine only operates on CMP-like nodes. | |||
34195 | if (!(Cmp.getOpcode() == X86ISD::CMP || | |||
34196 | (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0)))) | |||
34197 | return SDValue(); | |||
34198 | ||||
34199 | // Can't replace the cmp if it has more uses than the one we're looking at. | |||
34200 | // FIXME: We would like to be able to handle this, but would need to make sure | |||
34201 | // all uses were updated. | |||
34202 | if (!Cmp.hasOneUse()) | |||
34203 | return SDValue(); | |||
34204 | ||||
34205 | // This only applies to variations of the common case: | |||
34206 | // (icmp slt x, 0) -> (icmp sle (add x, 1), 0) | |||
34207 | // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0) | |||
34208 | // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0) | |||
34209 | // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0) | |||
34210 | // Using the proper condcodes (see below), overflow is checked for. | |||
34211 | ||||
34212 | // FIXME: We can generalize both constraints: | |||
34213 | // - XOR/OR/AND (if they were made to survive AtomicExpand) | |||
34214 | // - LHS != 1 | |||
34215 | // if the result is compared. | |||
34216 | ||||
34217 | SDValue CmpLHS = Cmp.getOperand(0); | |||
34218 | SDValue CmpRHS = Cmp.getOperand(1); | |||
34219 | ||||
34220 | if (!CmpLHS.hasOneUse()) | |||
34221 | return SDValue(); | |||
34222 | ||||
34223 | unsigned Opc = CmpLHS.getOpcode(); | |||
34224 | if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB) | |||
34225 | return SDValue(); | |||
34226 | ||||
34227 | SDValue OpRHS = CmpLHS.getOperand(2); | |||
34228 | auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS); | |||
34229 | if (!OpRHSC) | |||
34230 | return SDValue(); | |||
34231 | ||||
34232 | APInt Addend = OpRHSC->getAPIntValue(); | |||
34233 | if (Opc == ISD::ATOMIC_LOAD_SUB) | |||
34234 | Addend = -Addend; | |||
34235 | ||||
34236 | auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS); | |||
34237 | if (!CmpRHSC) | |||
34238 | return SDValue(); | |||
34239 | ||||
34240 | APInt Comparison = CmpRHSC->getAPIntValue(); | |||
34241 | ||||
34242 | // If the addend is the negation of the comparison value, then we can do | |||
34243 | // a full comparison by emitting the atomic arithmetic as a locked sub. | |||
34244 | if (Comparison == -Addend) { | |||
34245 | // The CC is fine, but we need to rewrite the LHS of the comparison as an | |||
34246 | // atomic sub. | |||
34247 | auto *AN = cast<AtomicSDNode>(CmpLHS.getNode()); | |||
34248 | auto AtomicSub = DAG.getAtomic( | |||
34249 | ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(), | |||
34250 | /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1), | |||
34251 | /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()), | |||
34252 | AN->getMemOperand()); | |||
34253 | // If the comparision uses the CF flag we can't use INC/DEC instructions. | |||
34254 | bool NeedCF = false; | |||
34255 | switch (CC) { | |||
34256 | default: break; | |||
34257 | case X86::COND_A: case X86::COND_AE: | |||
34258 | case X86::COND_B: case X86::COND_BE: | |||
34259 | NeedCF = true; | |||
34260 | break; | |||
34261 | } | |||
34262 | auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget, !NeedCF); | |||
34263 | DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), | |||
34264 | DAG.getUNDEF(CmpLHS.getValueType())); | |||
34265 | DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1)); | |||
34266 | return LockOp; | |||
34267 | } | |||
34268 | ||||
34269 | // We can handle comparisons with zero in a number of cases by manipulating | |||
34270 | // the CC used. | |||
34271 | if (!Comparison.isNullValue()) | |||
34272 | return SDValue(); | |||
34273 | ||||
34274 | if (CC == X86::COND_S && Addend == 1) | |||
34275 | CC = X86::COND_LE; | |||
34276 | else if (CC == X86::COND_NS && Addend == 1) | |||
34277 | CC = X86::COND_G; | |||
34278 | else if (CC == X86::COND_G && Addend == -1) | |||
34279 | CC = X86::COND_GE; | |||
34280 | else if (CC == X86::COND_LE && Addend == -1) | |||
34281 | CC = X86::COND_L; | |||
34282 | else | |||
34283 | return SDValue(); | |||
34284 | ||||
34285 | SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget); | |||
34286 | DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), | |||
34287 | DAG.getUNDEF(CmpLHS.getValueType())); | |||
34288 | DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1)); | |||
34289 | return LockOp; | |||
34290 | } | |||
34291 | ||||
34292 | // Check whether a boolean test is testing a boolean value generated by | |||
34293 | // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition | |||
34294 | // code. | |||
34295 | // | |||
34296 | // Simplify the following patterns: | |||
34297 | // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or | |||
34298 | // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ) | |||
34299 | // to (Op EFLAGS Cond) | |||
34300 | // | |||
34301 | // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or | |||
34302 | // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ) | |||
34303 | // to (Op EFLAGS !Cond) | |||
34304 | // | |||
34305 | // where Op could be BRCOND or CMOV. | |||
34306 | // | |||
34307 | static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) { | |||
34308 | // This combine only operates on CMP-like nodes. | |||
34309 | if (!(Cmp.getOpcode() == X86ISD::CMP || | |||
34310 | (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0)))) | |||
34311 | return SDValue(); | |||
34312 | ||||
34313 | // Quit if not used as a boolean value. | |||
34314 | if (CC != X86::COND_E && CC != X86::COND_NE) | |||
34315 | return SDValue(); | |||
34316 | ||||
34317 | // Check CMP operands. One of them should be 0 or 1 and the other should be | |||
34318 | // an SetCC or extended from it. | |||
34319 | SDValue Op1 = Cmp.getOperand(0); | |||
34320 | SDValue Op2 = Cmp.getOperand(1); | |||
34321 | ||||
34322 | SDValue SetCC; | |||
34323 | const ConstantSDNode* C = nullptr; | |||
34324 | bool needOppositeCond = (CC == X86::COND_E); | |||
34325 | bool checkAgainstTrue = false; // Is it a comparison against 1? | |||
34326 | ||||
34327 | if ((C = dyn_cast<ConstantSDNode>(Op1))) | |||
34328 | SetCC = Op2; | |||
34329 | else if ((C = dyn_cast<ConstantSDNode>(Op2))) | |||
34330 | SetCC = Op1; | |||
34331 | else // Quit if all operands are not constants. | |||
34332 | return SDValue(); | |||
34333 | ||||
34334 | if (C->getZExtValue() == 1) { | |||
34335 | needOppositeCond = !needOppositeCond; | |||
34336 | checkAgainstTrue = true; | |||
34337 | } else if (C->getZExtValue() != 0) | |||
34338 | // Quit if the constant is neither 0 or 1. | |||
34339 | return SDValue(); | |||
34340 | ||||
34341 | bool truncatedToBoolWithAnd = false; | |||
34342 | // Skip (zext $x), (trunc $x), or (and $x, 1) node. | |||
34343 | while (SetCC.getOpcode() == ISD::ZERO_EXTEND || | |||
34344 | SetCC.getOpcode() == ISD::TRUNCATE || | |||
34345 | SetCC.getOpcode() == ISD::AND) { | |||
34346 | if (SetCC.getOpcode() == ISD::AND) { | |||
34347 | int OpIdx = -1; | |||
34348 | if (isOneConstant(SetCC.getOperand(0))) | |||
34349 | OpIdx = 1; | |||
34350 | if (isOneConstant(SetCC.getOperand(1))) | |||
34351 | OpIdx = 0; | |||
34352 | if (OpIdx < 0) | |||
34353 | break; | |||
34354 | SetCC = SetCC.getOperand(OpIdx); | |||
34355 | truncatedToBoolWithAnd = true; | |||
34356 | } else | |||
34357 | SetCC = SetCC.getOperand(0); | |||
34358 | } | |||
34359 | ||||
34360 | switch (SetCC.getOpcode()) { | |||
34361 | case X86ISD::SETCC_CARRY: | |||
34362 | // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to | |||
34363 | // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1, | |||
34364 | // i.e. it's a comparison against true but the result of SETCC_CARRY is not | |||
34365 | // truncated to i1 using 'and'. | |||
34366 | if (checkAgainstTrue && !truncatedToBoolWithAnd) | |||
34367 | break; | |||
34368 | assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&((X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && "Invalid use of SETCC_CARRY!") ? static_cast<void > (0) : __assert_fail ("X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && \"Invalid use of SETCC_CARRY!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 34369, __PRETTY_FUNCTION__)) | |||
34369 | "Invalid use of SETCC_CARRY!")((X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && "Invalid use of SETCC_CARRY!") ? static_cast<void > (0) : __assert_fail ("X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B && \"Invalid use of SETCC_CARRY!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 34369, __PRETTY_FUNCTION__)); | |||
34370 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
34371 | case X86ISD::SETCC: | |||
34372 | // Set the condition code or opposite one if necessary. | |||
34373 | CC = X86::CondCode(SetCC.getConstantOperandVal(0)); | |||
34374 | if (needOppositeCond) | |||
34375 | CC = X86::GetOppositeBranchCondition(CC); | |||
34376 | return SetCC.getOperand(1); | |||
34377 | case X86ISD::CMOV: { | |||
34378 | // Check whether false/true value has canonical one, i.e. 0 or 1. | |||
34379 | ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0)); | |||
34380 | ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1)); | |||
34381 | // Quit if true value is not a constant. | |||
34382 | if (!TVal) | |||
34383 | return SDValue(); | |||
34384 | // Quit if false value is not a constant. | |||
34385 | if (!FVal) { | |||
34386 | SDValue Op = SetCC.getOperand(0); | |||
34387 | // Skip 'zext' or 'trunc' node. | |||
34388 | if (Op.getOpcode() == ISD::ZERO_EXTEND || | |||
34389 | Op.getOpcode() == ISD::TRUNCATE) | |||
34390 | Op = Op.getOperand(0); | |||
34391 | // A special case for rdrand/rdseed, where 0 is set if false cond is | |||
34392 | // found. | |||
34393 | if ((Op.getOpcode() != X86ISD::RDRAND && | |||
34394 | Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0) | |||
34395 | return SDValue(); | |||
34396 | } | |||
34397 | // Quit if false value is not the constant 0 or 1. | |||
34398 | bool FValIsFalse = true; | |||
34399 | if (FVal && FVal->getZExtValue() != 0) { | |||
34400 | if (FVal->getZExtValue() != 1) | |||
34401 | return SDValue(); | |||
34402 | // If FVal is 1, opposite cond is needed. | |||
34403 | needOppositeCond = !needOppositeCond; | |||
34404 | FValIsFalse = false; | |||
34405 | } | |||
34406 | // Quit if TVal is not the constant opposite of FVal. | |||
34407 | if (FValIsFalse && TVal->getZExtValue() != 1) | |||
34408 | return SDValue(); | |||
34409 | if (!FValIsFalse && TVal->getZExtValue() != 0) | |||
34410 | return SDValue(); | |||
34411 | CC = X86::CondCode(SetCC.getConstantOperandVal(2)); | |||
34412 | if (needOppositeCond) | |||
34413 | CC = X86::GetOppositeBranchCondition(CC); | |||
34414 | return SetCC.getOperand(3); | |||
34415 | } | |||
34416 | } | |||
34417 | ||||
34418 | return SDValue(); | |||
34419 | } | |||
34420 | ||||
34421 | /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS. | |||
34422 | /// Match: | |||
34423 | /// (X86or (X86setcc) (X86setcc)) | |||
34424 | /// (X86cmp (and (X86setcc) (X86setcc)), 0) | |||
34425 | static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0, | |||
34426 | X86::CondCode &CC1, SDValue &Flags, | |||
34427 | bool &isAnd) { | |||
34428 | if (Cond->getOpcode() == X86ISD::CMP) { | |||
34429 | if (!isNullConstant(Cond->getOperand(1))) | |||
34430 | return false; | |||
34431 | ||||
34432 | Cond = Cond->getOperand(0); | |||
34433 | } | |||
34434 | ||||
34435 | isAnd = false; | |||
34436 | ||||
34437 | SDValue SetCC0, SetCC1; | |||
34438 | switch (Cond->getOpcode()) { | |||
34439 | default: return false; | |||
34440 | case ISD::AND: | |||
34441 | case X86ISD::AND: | |||
34442 | isAnd = true; | |||
34443 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
34444 | case ISD::OR: | |||
34445 | case X86ISD::OR: | |||
34446 | SetCC0 = Cond->getOperand(0); | |||
34447 | SetCC1 = Cond->getOperand(1); | |||
34448 | break; | |||
34449 | }; | |||
34450 | ||||
34451 | // Make sure we have SETCC nodes, using the same flags value. | |||
34452 | if (SetCC0.getOpcode() != X86ISD::SETCC || | |||
34453 | SetCC1.getOpcode() != X86ISD::SETCC || | |||
34454 | SetCC0->getOperand(1) != SetCC1->getOperand(1)) | |||
34455 | return false; | |||
34456 | ||||
34457 | CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0); | |||
34458 | CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0); | |||
34459 | Flags = SetCC0->getOperand(1); | |||
34460 | return true; | |||
34461 | } | |||
34462 | ||||
34463 | // When legalizing carry, we create carries via add X, -1 | |||
34464 | // If that comes from an actual carry, via setcc, we use the | |||
34465 | // carry directly. | |||
34466 | static SDValue combineCarryThroughADD(SDValue EFLAGS) { | |||
34467 | if (EFLAGS.getOpcode() == X86ISD::ADD) { | |||
34468 | if (isAllOnesConstant(EFLAGS.getOperand(1))) { | |||
34469 | SDValue Carry = EFLAGS.getOperand(0); | |||
34470 | while (Carry.getOpcode() == ISD::TRUNCATE || | |||
34471 | Carry.getOpcode() == ISD::ZERO_EXTEND || | |||
34472 | Carry.getOpcode() == ISD::SIGN_EXTEND || | |||
34473 | Carry.getOpcode() == ISD::ANY_EXTEND || | |||
34474 | (Carry.getOpcode() == ISD::AND && | |||
34475 | isOneConstant(Carry.getOperand(1)))) | |||
34476 | Carry = Carry.getOperand(0); | |||
34477 | if (Carry.getOpcode() == X86ISD::SETCC || | |||
34478 | Carry.getOpcode() == X86ISD::SETCC_CARRY) { | |||
34479 | if (Carry.getConstantOperandVal(0) == X86::COND_B) | |||
34480 | return Carry.getOperand(1); | |||
34481 | } | |||
34482 | } | |||
34483 | } | |||
34484 | ||||
34485 | return SDValue(); | |||
34486 | } | |||
34487 | ||||
34488 | /// Optimize an EFLAGS definition used according to the condition code \p CC | |||
34489 | /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing | |||
34490 | /// uses of chain values. | |||
34491 | static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC, | |||
34492 | SelectionDAG &DAG, | |||
34493 | const X86Subtarget &Subtarget) { | |||
34494 | if (CC == X86::COND_B) | |||
34495 | if (SDValue Flags = combineCarryThroughADD(EFLAGS)) | |||
34496 | return Flags; | |||
34497 | ||||
34498 | if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC)) | |||
34499 | return R; | |||
34500 | return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget); | |||
34501 | } | |||
34502 | ||||
34503 | /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL] | |||
34504 | static SDValue combineCMov(SDNode *N, SelectionDAG &DAG, | |||
34505 | TargetLowering::DAGCombinerInfo &DCI, | |||
34506 | const X86Subtarget &Subtarget) { | |||
34507 | SDLoc DL(N); | |||
34508 | ||||
34509 | SDValue FalseOp = N->getOperand(0); | |||
34510 | SDValue TrueOp = N->getOperand(1); | |||
34511 | X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2); | |||
34512 | SDValue Cond = N->getOperand(3); | |||
34513 | ||||
34514 | // Try to simplify the EFLAGS and condition code operands. | |||
34515 | // We can't always do this as FCMOV only supports a subset of X86 cond. | |||
34516 | if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) { | |||
34517 | if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) { | |||
34518 | SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8), | |||
34519 | Flags}; | |||
34520 | return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops); | |||
34521 | } | |||
34522 | } | |||
34523 | ||||
34524 | // If this is a select between two integer constants, try to do some | |||
34525 | // optimizations. Note that the operands are ordered the opposite of SELECT | |||
34526 | // operands. | |||
34527 | if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) { | |||
34528 | if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) { | |||
34529 | // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is | |||
34530 | // larger than FalseC (the false value). | |||
34531 | if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) { | |||
34532 | CC = X86::GetOppositeBranchCondition(CC); | |||
34533 | std::swap(TrueC, FalseC); | |||
34534 | std::swap(TrueOp, FalseOp); | |||
34535 | } | |||
34536 | ||||
34537 | // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0. | |||
34538 | // This is efficient for any integer data type (including i8/i16) and | |||
34539 | // shift amount. | |||
34540 | if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) { | |||
34541 | Cond = getSETCC(CC, Cond, DL, DAG); | |||
34542 | ||||
34543 | // Zero extend the condition if needed. | |||
34544 | Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond); | |||
34545 | ||||
34546 | unsigned ShAmt = TrueC->getAPIntValue().logBase2(); | |||
34547 | Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond, | |||
34548 | DAG.getConstant(ShAmt, DL, MVT::i8)); | |||
34549 | return Cond; | |||
34550 | } | |||
34551 | ||||
34552 | // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient | |||
34553 | // for any integer data type, including i8/i16. | |||
34554 | if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) { | |||
34555 | Cond = getSETCC(CC, Cond, DL, DAG); | |||
34556 | ||||
34557 | // Zero extend the condition if needed. | |||
34558 | Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, | |||
34559 | FalseC->getValueType(0), Cond); | |||
34560 | Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, | |||
34561 | SDValue(FalseC, 0)); | |||
34562 | return Cond; | |||
34563 | } | |||
34564 | ||||
34565 | // Optimize cases that will turn into an LEA instruction. This requires | |||
34566 | // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9). | |||
34567 | if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) { | |||
34568 | uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue(); | |||
34569 | if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff; | |||
34570 | ||||
34571 | bool isFastMultiplier = false; | |||
34572 | if (Diff < 10) { | |||
34573 | switch ((unsigned char)Diff) { | |||
34574 | default: break; | |||
34575 | case 1: // result = add base, cond | |||
34576 | case 2: // result = lea base( , cond*2) | |||
34577 | case 3: // result = lea base(cond, cond*2) | |||
34578 | case 4: // result = lea base( , cond*4) | |||
34579 | case 5: // result = lea base(cond, cond*4) | |||
34580 | case 8: // result = lea base( , cond*8) | |||
34581 | case 9: // result = lea base(cond, cond*8) | |||
34582 | isFastMultiplier = true; | |||
34583 | break; | |||
34584 | } | |||
34585 | } | |||
34586 | ||||
34587 | if (isFastMultiplier) { | |||
34588 | APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue(); | |||
34589 | Cond = getSETCC(CC, Cond, DL ,DAG); | |||
34590 | // Zero extend the condition if needed. | |||
34591 | Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0), | |||
34592 | Cond); | |||
34593 | // Scale the condition by the difference. | |||
34594 | if (Diff != 1) | |||
34595 | Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond, | |||
34596 | DAG.getConstant(Diff, DL, Cond.getValueType())); | |||
34597 | ||||
34598 | // Add the base if non-zero. | |||
34599 | if (FalseC->getAPIntValue() != 0) | |||
34600 | Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond, | |||
34601 | SDValue(FalseC, 0)); | |||
34602 | return Cond; | |||
34603 | } | |||
34604 | } | |||
34605 | } | |||
34606 | } | |||
34607 | ||||
34608 | // Handle these cases: | |||
34609 | // (select (x != c), e, c) -> select (x != c), e, x), | |||
34610 | // (select (x == c), c, e) -> select (x == c), x, e) | |||
34611 | // where the c is an integer constant, and the "select" is the combination | |||
34612 | // of CMOV and CMP. | |||
34613 | // | |||
34614 | // The rationale for this change is that the conditional-move from a constant | |||
34615 | // needs two instructions, however, conditional-move from a register needs | |||
34616 | // only one instruction. | |||
34617 | // | |||
34618 | // CAVEAT: By replacing a constant with a symbolic value, it may obscure | |||
34619 | // some instruction-combining opportunities. This opt needs to be | |||
34620 | // postponed as late as possible. | |||
34621 | // | |||
34622 | if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) { | |||
34623 | // the DCI.xxxx conditions are provided to postpone the optimization as | |||
34624 | // late as possible. | |||
34625 | ||||
34626 | ConstantSDNode *CmpAgainst = nullptr; | |||
34627 | if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) && | |||
34628 | (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) && | |||
34629 | !isa<ConstantSDNode>(Cond.getOperand(0))) { | |||
34630 | ||||
34631 | if (CC == X86::COND_NE && | |||
34632 | CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) { | |||
34633 | CC = X86::GetOppositeBranchCondition(CC); | |||
34634 | std::swap(TrueOp, FalseOp); | |||
34635 | } | |||
34636 | ||||
34637 | if (CC == X86::COND_E && | |||
34638 | CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) { | |||
34639 | SDValue Ops[] = { FalseOp, Cond.getOperand(0), | |||
34640 | DAG.getConstant(CC, DL, MVT::i8), Cond }; | |||
34641 | return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops); | |||
34642 | } | |||
34643 | } | |||
34644 | } | |||
34645 | ||||
34646 | // Fold and/or of setcc's to double CMOV: | |||
34647 | // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2) | |||
34648 | // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2) | |||
34649 | // | |||
34650 | // This combine lets us generate: | |||
34651 | // cmovcc1 (jcc1 if we don't have CMOV) | |||
34652 | // cmovcc2 (same) | |||
34653 | // instead of: | |||
34654 | // setcc1 | |||
34655 | // setcc2 | |||
34656 | // and/or | |||
34657 | // cmovne (jne if we don't have CMOV) | |||
34658 | // When we can't use the CMOV instruction, it might increase branch | |||
34659 | // mispredicts. | |||
34660 | // When we can use CMOV, or when there is no mispredict, this improves | |||
34661 | // throughput and reduces register pressure. | |||
34662 | // | |||
34663 | if (CC == X86::COND_NE) { | |||
34664 | SDValue Flags; | |||
34665 | X86::CondCode CC0, CC1; | |||
34666 | bool isAndSetCC; | |||
34667 | if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) { | |||
34668 | if (isAndSetCC) { | |||
34669 | std::swap(FalseOp, TrueOp); | |||
34670 | CC0 = X86::GetOppositeBranchCondition(CC0); | |||
34671 | CC1 = X86::GetOppositeBranchCondition(CC1); | |||
34672 | } | |||
34673 | ||||
34674 | SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8), | |||
34675 | Flags}; | |||
34676 | SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps); | |||
34677 | SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags}; | |||
34678 | SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops); | |||
34679 | return CMOV; | |||
34680 | } | |||
34681 | } | |||
34682 | ||||
34683 | // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) -> | |||
34684 | // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2) | |||
34685 | // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) -> | |||
34686 | // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2) | |||
34687 | if ((CC == X86::COND_NE || CC == X86::COND_E) && | |||
34688 | Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) { | |||
34689 | SDValue Add = TrueOp; | |||
34690 | SDValue Const = FalseOp; | |||
34691 | // Canonicalize the condition code for easier matching and output. | |||
34692 | if (CC == X86::COND_E) | |||
34693 | std::swap(Add, Const); | |||
34694 | ||||
34695 | // We might have replaced the constant in the cmov with the LHS of the | |||
34696 | // compare. If so change it to the RHS of the compare. | |||
34697 | if (Const == Cond.getOperand(0)) | |||
34698 | Const = Cond.getOperand(1); | |||
34699 | ||||
34700 | // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant. | |||
34701 | if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD && | |||
34702 | Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) && | |||
34703 | (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF || | |||
34704 | Add.getOperand(0).getOpcode() == ISD::CTTZ) && | |||
34705 | Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) { | |||
34706 | EVT VT = N->getValueType(0); | |||
34707 | // This should constant fold. | |||
34708 | SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1)); | |||
34709 | SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0), | |||
34710 | DAG.getConstant(X86::COND_NE, DL, MVT::i8), | |||
34711 | Cond); | |||
34712 | return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1)); | |||
34713 | } | |||
34714 | } | |||
34715 | ||||
34716 | return SDValue(); | |||
34717 | } | |||
34718 | ||||
34719 | /// Different mul shrinking modes. | |||
34720 | enum ShrinkMode { MULS8, MULU8, MULS16, MULU16 }; | |||
34721 | ||||
34722 | static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) { | |||
34723 | EVT VT = N->getOperand(0).getValueType(); | |||
34724 | if (VT.getScalarSizeInBits() != 32) | |||
34725 | return false; | |||
34726 | ||||
34727 | assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2")((N->getNumOperands() == 2 && "NumOperands of Mul are 2" ) ? static_cast<void> (0) : __assert_fail ("N->getNumOperands() == 2 && \"NumOperands of Mul are 2\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 34727, __PRETTY_FUNCTION__)); | |||
34728 | unsigned SignBits[2] = {1, 1}; | |||
34729 | bool IsPositive[2] = {false, false}; | |||
34730 | for (unsigned i = 0; i < 2; i++) { | |||
34731 | SDValue Opd = N->getOperand(i); | |||
34732 | ||||
34733 | SignBits[i] = DAG.ComputeNumSignBits(Opd); | |||
34734 | IsPositive[i] = DAG.SignBitIsZero(Opd); | |||
34735 | } | |||
34736 | ||||
34737 | bool AllPositive = IsPositive[0] && IsPositive[1]; | |||
34738 | unsigned MinSignBits = std::min(SignBits[0], SignBits[1]); | |||
34739 | // When ranges are from -128 ~ 127, use MULS8 mode. | |||
34740 | if (MinSignBits >= 25) | |||
34741 | Mode = MULS8; | |||
34742 | // When ranges are from 0 ~ 255, use MULU8 mode. | |||
34743 | else if (AllPositive && MinSignBits >= 24) | |||
34744 | Mode = MULU8; | |||
34745 | // When ranges are from -32768 ~ 32767, use MULS16 mode. | |||
34746 | else if (MinSignBits >= 17) | |||
34747 | Mode = MULS16; | |||
34748 | // When ranges are from 0 ~ 65535, use MULU16 mode. | |||
34749 | else if (AllPositive && MinSignBits >= 16) | |||
34750 | Mode = MULU16; | |||
34751 | else | |||
34752 | return false; | |||
34753 | return true; | |||
34754 | } | |||
34755 | ||||
34756 | /// When the operands of vector mul are extended from smaller size values, | |||
34757 | /// like i8 and i16, the type of mul may be shrinked to generate more | |||
34758 | /// efficient code. Two typical patterns are handled: | |||
34759 | /// Pattern1: | |||
34760 | /// %2 = sext/zext <N x i8> %1 to <N x i32> | |||
34761 | /// %4 = sext/zext <N x i8> %3 to <N x i32> | |||
34762 | // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants) | |||
34763 | /// %5 = mul <N x i32> %2, %4 | |||
34764 | /// | |||
34765 | /// Pattern2: | |||
34766 | /// %2 = zext/sext <N x i16> %1 to <N x i32> | |||
34767 | /// %4 = zext/sext <N x i16> %3 to <N x i32> | |||
34768 | /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants) | |||
34769 | /// %5 = mul <N x i32> %2, %4 | |||
34770 | /// | |||
34771 | /// There are four mul shrinking modes: | |||
34772 | /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is | |||
34773 | /// -128 to 128, and the scalar value range of %4 is also -128 to 128, | |||
34774 | /// generate pmullw+sext32 for it (MULS8 mode). | |||
34775 | /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is | |||
34776 | /// 0 to 255, and the scalar value range of %4 is also 0 to 255, | |||
34777 | /// generate pmullw+zext32 for it (MULU8 mode). | |||
34778 | /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is | |||
34779 | /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767, | |||
34780 | /// generate pmullw+pmulhw for it (MULS16 mode). | |||
34781 | /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is | |||
34782 | /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535, | |||
34783 | /// generate pmullw+pmulhuw for it (MULU16 mode). | |||
34784 | static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG, | |||
34785 | const X86Subtarget &Subtarget) { | |||
34786 | // Check for legality | |||
34787 | // pmullw/pmulhw are not supported by SSE. | |||
34788 | if (!Subtarget.hasSSE2()) | |||
34789 | return SDValue(); | |||
34790 | ||||
34791 | // Check for profitability | |||
34792 | // pmulld is supported since SSE41. It is better to use pmulld | |||
34793 | // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than | |||
34794 | // the expansion. | |||
34795 | bool OptForMinSize = DAG.getMachineFunction().getFunction().optForMinSize(); | |||
34796 | if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow())) | |||
34797 | return SDValue(); | |||
34798 | ||||
34799 | ShrinkMode Mode; | |||
34800 | if (!canReduceVMulWidth(N, DAG, Mode)) | |||
34801 | return SDValue(); | |||
34802 | ||||
34803 | SDLoc DL(N); | |||
34804 | SDValue N0 = N->getOperand(0); | |||
34805 | SDValue N1 = N->getOperand(1); | |||
34806 | EVT VT = N->getOperand(0).getValueType(); | |||
34807 | unsigned NumElts = VT.getVectorNumElements(); | |||
34808 | if ((NumElts % 2) != 0) | |||
34809 | return SDValue(); | |||
34810 | ||||
34811 | unsigned RegSize = 128; | |||
34812 | MVT OpsVT = MVT::getVectorVT(MVT::i16, RegSize / 16); | |||
34813 | EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts); | |||
34814 | ||||
34815 | // Shrink the operands of mul. | |||
34816 | SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0); | |||
34817 | SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1); | |||
34818 | ||||
34819 | if (ExperimentalVectorWideningLegalization || | |||
34820 | NumElts >= OpsVT.getVectorNumElements()) { | |||
34821 | // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the | |||
34822 | // lower part is needed. | |||
34823 | SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1); | |||
34824 | if (Mode == MULU8 || Mode == MULS8) | |||
34825 | return DAG.getNode((Mode == MULU8) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND, | |||
34826 | DL, VT, MulLo); | |||
34827 | ||||
34828 | MVT ResVT = MVT::getVectorVT(MVT::i32, NumElts / 2); | |||
34829 | // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16, | |||
34830 | // the higher part is also needed. | |||
34831 | SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL, | |||
34832 | ReducedVT, NewN0, NewN1); | |||
34833 | ||||
34834 | // Repack the lower part and higher part result of mul into a wider | |||
34835 | // result. | |||
34836 | // Generate shuffle functioning as punpcklwd. | |||
34837 | SmallVector<int, 16> ShuffleMask(NumElts); | |||
34838 | for (unsigned i = 0, e = NumElts / 2; i < e; i++) { | |||
34839 | ShuffleMask[2 * i] = i; | |||
34840 | ShuffleMask[2 * i + 1] = i + NumElts; | |||
34841 | } | |||
34842 | SDValue ResLo = | |||
34843 | DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask); | |||
34844 | ResLo = DAG.getBitcast(ResVT, ResLo); | |||
34845 | // Generate shuffle functioning as punpckhwd. | |||
34846 | for (unsigned i = 0, e = NumElts / 2; i < e; i++) { | |||
34847 | ShuffleMask[2 * i] = i + NumElts / 2; | |||
34848 | ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2; | |||
34849 | } | |||
34850 | SDValue ResHi = | |||
34851 | DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask); | |||
34852 | ResHi = DAG.getBitcast(ResVT, ResHi); | |||
34853 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi); | |||
34854 | } | |||
34855 | ||||
34856 | // When VT.getVectorNumElements() < OpsVT.getVectorNumElements(), we want | |||
34857 | // to legalize the mul explicitly because implicit legalization for type | |||
34858 | // <4 x i16> to <4 x i32> sometimes involves unnecessary unpack | |||
34859 | // instructions which will not exist when we explicitly legalize it by | |||
34860 | // extending <4 x i16> to <8 x i16> (concatenating the <4 x i16> val with | |||
34861 | // <4 x i16> undef). | |||
34862 | // | |||
34863 | // Legalize the operands of mul. | |||
34864 | // FIXME: We may be able to handle non-concatenated vectors by insertion. | |||
34865 | unsigned ReducedSizeInBits = ReducedVT.getSizeInBits(); | |||
34866 | if ((RegSize % ReducedSizeInBits) != 0) | |||
34867 | return SDValue(); | |||
34868 | ||||
34869 | SmallVector<SDValue, 16> Ops(RegSize / ReducedSizeInBits, | |||
34870 | DAG.getUNDEF(ReducedVT)); | |||
34871 | Ops[0] = NewN0; | |||
34872 | NewN0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops); | |||
34873 | Ops[0] = NewN1; | |||
34874 | NewN1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, OpsVT, Ops); | |||
34875 | ||||
34876 | if (Mode == MULU8 || Mode == MULS8) { | |||
34877 | // Generate lower part of mul: pmullw. For MULU8/MULS8, only the lower | |||
34878 | // part is needed. | |||
34879 | SDValue Mul = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1); | |||
34880 | ||||
34881 | // convert the type of mul result to VT. | |||
34882 | MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32); | |||
34883 | SDValue Res = DAG.getNode(Mode == MULU8 ? ISD::ZERO_EXTEND_VECTOR_INREG | |||
34884 | : ISD::SIGN_EXTEND_VECTOR_INREG, | |||
34885 | DL, ResVT, Mul); | |||
34886 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res, | |||
34887 | DAG.getIntPtrConstant(0, DL)); | |||
34888 | } | |||
34889 | ||||
34890 | // Generate the lower and higher part of mul: pmulhw/pmulhuw. For | |||
34891 | // MULU16/MULS16, both parts are needed. | |||
34892 | SDValue MulLo = DAG.getNode(ISD::MUL, DL, OpsVT, NewN0, NewN1); | |||
34893 | SDValue MulHi = DAG.getNode(Mode == MULS16 ? ISD::MULHS : ISD::MULHU, DL, | |||
34894 | OpsVT, NewN0, NewN1); | |||
34895 | ||||
34896 | // Repack the lower part and higher part result of mul into a wider | |||
34897 | // result. Make sure the type of mul result is VT. | |||
34898 | MVT ResVT = MVT::getVectorVT(MVT::i32, RegSize / 32); | |||
34899 | SDValue Res = getUnpackl(DAG, DL, OpsVT, MulLo, MulHi); | |||
34900 | Res = DAG.getBitcast(ResVT, Res); | |||
34901 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res, | |||
34902 | DAG.getIntPtrConstant(0, DL)); | |||
34903 | } | |||
34904 | ||||
34905 | static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG, | |||
34906 | EVT VT, const SDLoc &DL) { | |||
34907 | ||||
34908 | auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) { | |||
34909 | SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), | |||
34910 | DAG.getConstant(Mult, DL, VT)); | |||
34911 | Result = DAG.getNode(ISD::SHL, DL, VT, Result, | |||
34912 | DAG.getConstant(Shift, DL, MVT::i8)); | |||
34913 | Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result, | |||
34914 | N->getOperand(0)); | |||
34915 | return Result; | |||
34916 | }; | |||
34917 | ||||
34918 | auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) { | |||
34919 | SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), | |||
34920 | DAG.getConstant(Mul1, DL, VT)); | |||
34921 | Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result, | |||
34922 | DAG.getConstant(Mul2, DL, VT)); | |||
34923 | Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result, | |||
34924 | N->getOperand(0)); | |||
34925 | return Result; | |||
34926 | }; | |||
34927 | ||||
34928 | switch (MulAmt) { | |||
34929 | default: | |||
34930 | break; | |||
34931 | case 11: | |||
34932 | // mul x, 11 => add ((shl (mul x, 5), 1), x) | |||
34933 | return combineMulShlAddOrSub(5, 1, /*isAdd*/ true); | |||
34934 | case 21: | |||
34935 | // mul x, 21 => add ((shl (mul x, 5), 2), x) | |||
34936 | return combineMulShlAddOrSub(5, 2, /*isAdd*/ true); | |||
34937 | case 41: | |||
34938 | // mul x, 41 => add ((shl (mul x, 5), 3), x) | |||
34939 | return combineMulShlAddOrSub(5, 3, /*isAdd*/ true); | |||
34940 | case 22: | |||
34941 | // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x) | |||
34942 | return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), | |||
34943 | combineMulShlAddOrSub(5, 2, /*isAdd*/ true)); | |||
34944 | case 19: | |||
34945 | // mul x, 19 => add ((shl (mul x, 9), 1), x) | |||
34946 | return combineMulShlAddOrSub(9, 1, /*isAdd*/ true); | |||
34947 | case 37: | |||
34948 | // mul x, 37 => add ((shl (mul x, 9), 2), x) | |||
34949 | return combineMulShlAddOrSub(9, 2, /*isAdd*/ true); | |||
34950 | case 73: | |||
34951 | // mul x, 73 => add ((shl (mul x, 9), 3), x) | |||
34952 | return combineMulShlAddOrSub(9, 3, /*isAdd*/ true); | |||
34953 | case 13: | |||
34954 | // mul x, 13 => add ((shl (mul x, 3), 2), x) | |||
34955 | return combineMulShlAddOrSub(3, 2, /*isAdd*/ true); | |||
34956 | case 23: | |||
34957 | // mul x, 23 => sub ((shl (mul x, 3), 3), x) | |||
34958 | return combineMulShlAddOrSub(3, 3, /*isAdd*/ false); | |||
34959 | case 26: | |||
34960 | // mul x, 26 => add ((mul (mul x, 5), 5), x) | |||
34961 | return combineMulMulAddOrSub(5, 5, /*isAdd*/ true); | |||
34962 | case 28: | |||
34963 | // mul x, 28 => add ((mul (mul x, 9), 3), x) | |||
34964 | return combineMulMulAddOrSub(9, 3, /*isAdd*/ true); | |||
34965 | case 29: | |||
34966 | // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x) | |||
34967 | return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), | |||
34968 | combineMulMulAddOrSub(9, 3, /*isAdd*/ true)); | |||
34969 | } | |||
34970 | ||||
34971 | // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed | |||
34972 | // by a single LEA. | |||
34973 | // First check if this a sum of two power of 2s because that's easy. Then | |||
34974 | // count how many zeros are up to the first bit. | |||
34975 | // TODO: We can do this even without LEA at a cost of two shifts and an add. | |||
34976 | if (isPowerOf2_64(MulAmt & (MulAmt - 1))) { | |||
34977 | unsigned ScaleShift = countTrailingZeros(MulAmt); | |||
34978 | if (ScaleShift >= 1 && ScaleShift < 4) { | |||
34979 | unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1))); | |||
34980 | SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), | |||
34981 | DAG.getConstant(ShiftAmt, DL, MVT::i8)); | |||
34982 | SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), | |||
34983 | DAG.getConstant(ScaleShift, DL, MVT::i8)); | |||
34984 | return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2); | |||
34985 | } | |||
34986 | } | |||
34987 | ||||
34988 | return SDValue(); | |||
34989 | } | |||
34990 | ||||
34991 | // If the upper 17 bits of each element are zero then we can use PMADDWD, | |||
34992 | // which is always at least as quick as PMULLD, except on KNL. | |||
34993 | static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG, | |||
34994 | const X86Subtarget &Subtarget) { | |||
34995 | if (!Subtarget.hasSSE2()) | |||
34996 | return SDValue(); | |||
34997 | ||||
34998 | if (Subtarget.isPMADDWDSlow()) | |||
34999 | return SDValue(); | |||
35000 | ||||
35001 | EVT VT = N->getValueType(0); | |||
35002 | ||||
35003 | // Only support vXi32 vectors. | |||
35004 | if (!VT.isVector() || VT.getVectorElementType() != MVT::i32) | |||
35005 | return SDValue(); | |||
35006 | ||||
35007 | // Make sure the vXi16 type is legal. This covers the AVX512 without BWI case. | |||
35008 | // Also allow v2i32 if it will be widened. | |||
35009 | MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements()); | |||
35010 | if (!((ExperimentalVectorWideningLegalization && VT == MVT::v2i32) || | |||
35011 | DAG.getTargetLoweringInfo().isTypeLegal(WVT))) | |||
35012 | return SDValue(); | |||
35013 | ||||
35014 | SDValue N0 = N->getOperand(0); | |||
35015 | SDValue N1 = N->getOperand(1); | |||
35016 | ||||
35017 | // If we are zero extending two steps without SSE4.1, its better to reduce | |||
35018 | // the vmul width instead. | |||
35019 | if (!Subtarget.hasSSE41() && | |||
35020 | (N0.getOpcode() == ISD::ZERO_EXTEND && | |||
35021 | N0.getOperand(0).getScalarValueSizeInBits() <= 8) && | |||
35022 | (N1.getOpcode() == ISD::ZERO_EXTEND && | |||
35023 | N1.getOperand(0).getScalarValueSizeInBits() <= 8)) | |||
35024 | return SDValue(); | |||
35025 | ||||
35026 | APInt Mask17 = APInt::getHighBitsSet(32, 17); | |||
35027 | if (!DAG.MaskedValueIsZero(N1, Mask17) || | |||
35028 | !DAG.MaskedValueIsZero(N0, Mask17)) | |||
35029 | return SDValue(); | |||
35030 | ||||
35031 | // Use SplitOpsAndApply to handle AVX splitting. | |||
35032 | auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
35033 | ArrayRef<SDValue> Ops) { | |||
35034 | MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32); | |||
35035 | return DAG.getNode(X86ISD::VPMADDWD, DL, VT, Ops); | |||
35036 | }; | |||
35037 | return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, | |||
35038 | { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) }, | |||
35039 | PMADDWDBuilder); | |||
35040 | } | |||
35041 | ||||
35042 | static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG, | |||
35043 | const X86Subtarget &Subtarget) { | |||
35044 | if (!Subtarget.hasSSE2()) | |||
35045 | return SDValue(); | |||
35046 | ||||
35047 | EVT VT = N->getValueType(0); | |||
35048 | ||||
35049 | // Only support vXi64 vectors. | |||
35050 | if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 || | |||
35051 | !DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
35052 | return SDValue(); | |||
35053 | ||||
35054 | SDValue N0 = N->getOperand(0); | |||
35055 | SDValue N1 = N->getOperand(1); | |||
35056 | ||||
35057 | // MULDQ returns the 64-bit result of the signed multiplication of the lower | |||
35058 | // 32-bits. We can lower with this if the sign bits stretch that far. | |||
35059 | if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 && | |||
35060 | DAG.ComputeNumSignBits(N1) > 32) { | |||
35061 | auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
35062 | ArrayRef<SDValue> Ops) { | |||
35063 | return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops); | |||
35064 | }; | |||
35065 | return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 }, | |||
35066 | PMULDQBuilder, /*CheckBWI*/false); | |||
35067 | } | |||
35068 | ||||
35069 | // If the upper bits are zero we can use a single pmuludq. | |||
35070 | APInt Mask = APInt::getHighBitsSet(64, 32); | |||
35071 | if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) { | |||
35072 | auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
35073 | ArrayRef<SDValue> Ops) { | |||
35074 | return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops); | |||
35075 | }; | |||
35076 | return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 }, | |||
35077 | PMULUDQBuilder, /*CheckBWI*/false); | |||
35078 | } | |||
35079 | ||||
35080 | return SDValue(); | |||
35081 | } | |||
35082 | ||||
35083 | /// Optimize a single multiply with constant into two operations in order to | |||
35084 | /// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA. | |||
35085 | static SDValue combineMul(SDNode *N, SelectionDAG &DAG, | |||
35086 | TargetLowering::DAGCombinerInfo &DCI, | |||
35087 | const X86Subtarget &Subtarget) { | |||
35088 | EVT VT = N->getValueType(0); | |||
35089 | ||||
35090 | // Look for multiply of 2 identical shuffles with a zero vector. Shuffle the | |||
35091 | // result and insert the zero there instead. This can occur due to | |||
35092 | // type legalization of v2i32 multiply to a PMULUDQ pattern. | |||
35093 | SDValue LHS = N->getOperand(0); | |||
35094 | SDValue RHS = N->getOperand(1); | |||
35095 | if (!DCI.isBeforeLegalize() && isa<ShuffleVectorSDNode>(LHS) && | |||
35096 | isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() && | |||
35097 | LHS.getOperand(1) == RHS.getOperand(1) && | |||
35098 | ISD::isBuildVectorAllZeros(LHS.getOperand(1).getNode())) { | |||
35099 | ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS); | |||
35100 | ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS); | |||
35101 | if (SVN0->getMask().equals(SVN1->getMask())) { | |||
35102 | SDLoc dl(N); | |||
35103 | SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, LHS.getOperand(0), | |||
35104 | RHS.getOperand(0)); | |||
35105 | return DAG.getVectorShuffle(VT, dl, Mul, DAG.getConstant(0, dl, VT), | |||
35106 | SVN0->getMask()); | |||
35107 | } | |||
35108 | } | |||
35109 | ||||
35110 | if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget)) | |||
35111 | return V; | |||
35112 | ||||
35113 | if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget)) | |||
35114 | return V; | |||
35115 | ||||
35116 | if (DCI.isBeforeLegalize() && VT.isVector()) | |||
35117 | return reduceVMULWidth(N, DAG, Subtarget); | |||
35118 | ||||
35119 | if (!MulConstantOptimization) | |||
35120 | return SDValue(); | |||
35121 | // An imul is usually smaller than the alternative sequence. | |||
35122 | if (DAG.getMachineFunction().getFunction().optForMinSize()) | |||
35123 | return SDValue(); | |||
35124 | ||||
35125 | if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) | |||
35126 | return SDValue(); | |||
35127 | ||||
35128 | if (VT != MVT::i64 && VT != MVT::i32) | |||
35129 | return SDValue(); | |||
35130 | ||||
35131 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
35132 | if (!C) | |||
35133 | return SDValue(); | |||
35134 | if (isPowerOf2_64(C->getZExtValue())) | |||
35135 | return SDValue(); | |||
35136 | ||||
35137 | int64_t SignMulAmt = C->getSExtValue(); | |||
35138 | assert(SignMulAmt != INT64_MIN && "Int min should have been handled!")((SignMulAmt != (-9223372036854775807L -1) && "Int min should have been handled!" ) ? static_cast<void> (0) : __assert_fail ("SignMulAmt != INT64_MIN && \"Int min should have been handled!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35138, __PRETTY_FUNCTION__)); | |||
35139 | uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt; | |||
35140 | ||||
35141 | SDLoc DL(N); | |||
35142 | if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) { | |||
35143 | SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), | |||
35144 | DAG.getConstant(AbsMulAmt, DL, VT)); | |||
35145 | if (SignMulAmt < 0) | |||
35146 | NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), | |||
35147 | NewMul); | |||
35148 | ||||
35149 | return NewMul; | |||
35150 | } | |||
35151 | ||||
35152 | uint64_t MulAmt1 = 0; | |||
35153 | uint64_t MulAmt2 = 0; | |||
35154 | if ((AbsMulAmt % 9) == 0) { | |||
35155 | MulAmt1 = 9; | |||
35156 | MulAmt2 = AbsMulAmt / 9; | |||
35157 | } else if ((AbsMulAmt % 5) == 0) { | |||
35158 | MulAmt1 = 5; | |||
35159 | MulAmt2 = AbsMulAmt / 5; | |||
35160 | } else if ((AbsMulAmt % 3) == 0) { | |||
35161 | MulAmt1 = 3; | |||
35162 | MulAmt2 = AbsMulAmt / 3; | |||
35163 | } | |||
35164 | ||||
35165 | SDValue NewMul; | |||
35166 | // For negative multiply amounts, only allow MulAmt2 to be a power of 2. | |||
35167 | if (MulAmt2 && | |||
35168 | (isPowerOf2_64(MulAmt2) || | |||
35169 | (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) { | |||
35170 | ||||
35171 | if (isPowerOf2_64(MulAmt2) && | |||
35172 | !(SignMulAmt >= 0 && N->hasOneUse() && | |||
35173 | N->use_begin()->getOpcode() == ISD::ADD)) | |||
35174 | // If second multiplifer is pow2, issue it first. We want the multiply by | |||
35175 | // 3, 5, or 9 to be folded into the addressing mode unless the lone use | |||
35176 | // is an add. Only do this for positive multiply amounts since the | |||
35177 | // negate would prevent it from being used as an address mode anyway. | |||
35178 | std::swap(MulAmt1, MulAmt2); | |||
35179 | ||||
35180 | if (isPowerOf2_64(MulAmt1)) | |||
35181 | NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), | |||
35182 | DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8)); | |||
35183 | else | |||
35184 | NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0), | |||
35185 | DAG.getConstant(MulAmt1, DL, VT)); | |||
35186 | ||||
35187 | if (isPowerOf2_64(MulAmt2)) | |||
35188 | NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul, | |||
35189 | DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8)); | |||
35190 | else | |||
35191 | NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul, | |||
35192 | DAG.getConstant(MulAmt2, DL, VT)); | |||
35193 | ||||
35194 | // Negate the result. | |||
35195 | if (SignMulAmt < 0) | |||
35196 | NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), | |||
35197 | NewMul); | |||
35198 | } else if (!Subtarget.slowLEA()) | |||
35199 | NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL); | |||
35200 | ||||
35201 | if (!NewMul) { | |||
35202 | assert(C->getZExtValue() != 0 &&((C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? (18446744073709551615UL) : (4294967295U)) && "Both cases that could cause potential overflows should have " "already been handled.") ? static_cast<void> (0) : __assert_fail ("C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) && \"Both cases that could cause potential overflows should have \" \"already been handled.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35205, __PRETTY_FUNCTION__)) | |||
35203 | C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&((C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? (18446744073709551615UL) : (4294967295U)) && "Both cases that could cause potential overflows should have " "already been handled.") ? static_cast<void> (0) : __assert_fail ("C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) && \"Both cases that could cause potential overflows should have \" \"already been handled.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35205, __PRETTY_FUNCTION__)) | |||
35204 | "Both cases that could cause potential overflows should have "((C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? (18446744073709551615UL) : (4294967295U)) && "Both cases that could cause potential overflows should have " "already been handled.") ? static_cast<void> (0) : __assert_fail ("C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) && \"Both cases that could cause potential overflows should have \" \"already been handled.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35205, __PRETTY_FUNCTION__)) | |||
35205 | "already been handled.")((C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? (18446744073709551615UL) : (4294967295U)) && "Both cases that could cause potential overflows should have " "already been handled.") ? static_cast<void> (0) : __assert_fail ("C->getZExtValue() != 0 && C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) && \"Both cases that could cause potential overflows should have \" \"already been handled.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35205, __PRETTY_FUNCTION__)); | |||
35206 | if (isPowerOf2_64(AbsMulAmt - 1)) { | |||
35207 | // (mul x, 2^N + 1) => (add (shl x, N), x) | |||
35208 | NewMul = DAG.getNode( | |||
35209 | ISD::ADD, DL, VT, N->getOperand(0), | |||
35210 | DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), | |||
35211 | DAG.getConstant(Log2_64(AbsMulAmt - 1), DL, | |||
35212 | MVT::i8))); | |||
35213 | // To negate, subtract the number from zero | |||
35214 | if (SignMulAmt < 0) | |||
35215 | NewMul = DAG.getNode(ISD::SUB, DL, VT, | |||
35216 | DAG.getConstant(0, DL, VT), NewMul); | |||
35217 | } else if (isPowerOf2_64(AbsMulAmt + 1)) { | |||
35218 | // (mul x, 2^N - 1) => (sub (shl x, N), x) | |||
35219 | NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), | |||
35220 | DAG.getConstant(Log2_64(AbsMulAmt + 1), | |||
35221 | DL, MVT::i8)); | |||
35222 | // To negate, reverse the operands of the subtract. | |||
35223 | if (SignMulAmt < 0) | |||
35224 | NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul); | |||
35225 | else | |||
35226 | NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0)); | |||
35227 | } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) { | |||
35228 | // (mul x, 2^N + 2) => (add (add (shl x, N), x), x) | |||
35229 | NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), | |||
35230 | DAG.getConstant(Log2_64(AbsMulAmt - 2), | |||
35231 | DL, MVT::i8)); | |||
35232 | NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0)); | |||
35233 | NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0)); | |||
35234 | } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) { | |||
35235 | // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x) | |||
35236 | NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0), | |||
35237 | DAG.getConstant(Log2_64(AbsMulAmt + 2), | |||
35238 | DL, MVT::i8)); | |||
35239 | NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0)); | |||
35240 | NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0)); | |||
35241 | } | |||
35242 | } | |||
35243 | ||||
35244 | return NewMul; | |||
35245 | } | |||
35246 | ||||
35247 | static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) { | |||
35248 | SDValue N0 = N->getOperand(0); | |||
35249 | SDValue N1 = N->getOperand(1); | |||
35250 | ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); | |||
35251 | EVT VT = N0.getValueType(); | |||
35252 | ||||
35253 | // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2)) | |||
35254 | // since the result of setcc_c is all zero's or all ones. | |||
35255 | if (VT.isInteger() && !VT.isVector() && | |||
35256 | N1C && N0.getOpcode() == ISD::AND && | |||
35257 | N0.getOperand(1).getOpcode() == ISD::Constant) { | |||
35258 | SDValue N00 = N0.getOperand(0); | |||
35259 | APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); | |||
35260 | Mask <<= N1C->getAPIntValue(); | |||
35261 | bool MaskOK = false; | |||
35262 | // We can handle cases concerning bit-widening nodes containing setcc_c if | |||
35263 | // we carefully interrogate the mask to make sure we are semantics | |||
35264 | // preserving. | |||
35265 | // The transform is not safe if the result of C1 << C2 exceeds the bitwidth | |||
35266 | // of the underlying setcc_c operation if the setcc_c was zero extended. | |||
35267 | // Consider the following example: | |||
35268 | // zext(setcc_c) -> i32 0x0000FFFF | |||
35269 | // c1 -> i32 0x0000FFFF | |||
35270 | // c2 -> i32 0x00000001 | |||
35271 | // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE | |||
35272 | // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE | |||
35273 | if (N00.getOpcode() == X86ISD::SETCC_CARRY) { | |||
35274 | MaskOK = true; | |||
35275 | } else if (N00.getOpcode() == ISD::SIGN_EXTEND && | |||
35276 | N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { | |||
35277 | MaskOK = true; | |||
35278 | } else if ((N00.getOpcode() == ISD::ZERO_EXTEND || | |||
35279 | N00.getOpcode() == ISD::ANY_EXTEND) && | |||
35280 | N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) { | |||
35281 | MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits()); | |||
35282 | } | |||
35283 | if (MaskOK && Mask != 0) { | |||
35284 | SDLoc DL(N); | |||
35285 | return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT)); | |||
35286 | } | |||
35287 | } | |||
35288 | ||||
35289 | // Hardware support for vector shifts is sparse which makes us scalarize the | |||
35290 | // vector operations in many cases. Also, on sandybridge ADD is faster than | |||
35291 | // shl. | |||
35292 | // (shl V, 1) -> add V,V | |||
35293 | if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1)) | |||
35294 | if (auto *N1SplatC = N1BV->getConstantSplatNode()) { | |||
35295 | assert(N0.getValueType().isVector() && "Invalid vector shift type")((N0.getValueType().isVector() && "Invalid vector shift type" ) ? static_cast<void> (0) : __assert_fail ("N0.getValueType().isVector() && \"Invalid vector shift type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35295, __PRETTY_FUNCTION__)); | |||
35296 | // We shift all of the values by one. In many cases we do not have | |||
35297 | // hardware support for this operation. This is better expressed as an ADD | |||
35298 | // of two values. | |||
35299 | if (N1SplatC->getAPIntValue() == 1) | |||
35300 | return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0); | |||
35301 | } | |||
35302 | ||||
35303 | return SDValue(); | |||
35304 | } | |||
35305 | ||||
35306 | static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG) { | |||
35307 | SDValue N0 = N->getOperand(0); | |||
35308 | SDValue N1 = N->getOperand(1); | |||
35309 | EVT VT = N0.getValueType(); | |||
35310 | unsigned Size = VT.getSizeInBits(); | |||
35311 | ||||
35312 | // fold (ashr (shl, a, [56,48,32,24,16]), SarConst) | |||
35313 | // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or | |||
35314 | // into (lshr, (sext (a), SarConst - [56,48,32,24,16])) | |||
35315 | // depending on sign of (SarConst - [56,48,32,24,16]) | |||
35316 | ||||
35317 | // sexts in X86 are MOVs. The MOVs have the same code size | |||
35318 | // as above SHIFTs (only SHIFT on 1 has lower code size). | |||
35319 | // However the MOVs have 2 advantages to a SHIFT: | |||
35320 | // 1. MOVs can write to a register that differs from source | |||
35321 | // 2. MOVs accept memory operands | |||
35322 | ||||
35323 | if (VT.isVector() || N1.getOpcode() != ISD::Constant || | |||
35324 | N0.getOpcode() != ISD::SHL || !N0.hasOneUse() || | |||
35325 | N0.getOperand(1).getOpcode() != ISD::Constant) | |||
35326 | return SDValue(); | |||
35327 | ||||
35328 | SDValue N00 = N0.getOperand(0); | |||
35329 | SDValue N01 = N0.getOperand(1); | |||
35330 | APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue(); | |||
35331 | APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue(); | |||
35332 | EVT CVT = N1.getValueType(); | |||
35333 | ||||
35334 | if (SarConst.isNegative()) | |||
35335 | return SDValue(); | |||
35336 | ||||
35337 | for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) { | |||
35338 | unsigned ShiftSize = SVT.getSizeInBits(); | |||
35339 | // skipping types without corresponding sext/zext and | |||
35340 | // ShlConst that is not one of [56,48,32,24,16] | |||
35341 | if (ShiftSize >= Size || ShlConst != Size - ShiftSize) | |||
35342 | continue; | |||
35343 | SDLoc DL(N); | |||
35344 | SDValue NN = | |||
35345 | DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT)); | |||
35346 | SarConst = SarConst - (Size - ShiftSize); | |||
35347 | if (SarConst == 0) | |||
35348 | return NN; | |||
35349 | else if (SarConst.isNegative()) | |||
35350 | return DAG.getNode(ISD::SHL, DL, VT, NN, | |||
35351 | DAG.getConstant(-SarConst, DL, CVT)); | |||
35352 | else | |||
35353 | return DAG.getNode(ISD::SRA, DL, VT, NN, | |||
35354 | DAG.getConstant(SarConst, DL, CVT)); | |||
35355 | } | |||
35356 | return SDValue(); | |||
35357 | } | |||
35358 | ||||
35359 | static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG, | |||
35360 | TargetLowering::DAGCombinerInfo &DCI) { | |||
35361 | SDValue N0 = N->getOperand(0); | |||
35362 | SDValue N1 = N->getOperand(1); | |||
35363 | EVT VT = N0.getValueType(); | |||
35364 | ||||
35365 | // Only do this on the last DAG combine as it can interfere with other | |||
35366 | // combines. | |||
35367 | if (!DCI.isAfterLegalizeDAG()) | |||
35368 | return SDValue(); | |||
35369 | ||||
35370 | // Try to improve a sequence of srl (and X, C1), C2 by inverting the order. | |||
35371 | // TODO: This is a generic DAG combine that became an x86-only combine to | |||
35372 | // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and | |||
35373 | // and-not ('andn'). | |||
35374 | if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) | |||
35375 | return SDValue(); | |||
35376 | ||||
35377 | auto *ShiftC = dyn_cast<ConstantSDNode>(N1); | |||
35378 | auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1)); | |||
35379 | if (!ShiftC || !AndC) | |||
35380 | return SDValue(); | |||
35381 | ||||
35382 | // If we can shrink the constant mask below 8-bits or 32-bits, then this | |||
35383 | // transform should reduce code size. It may also enable secondary transforms | |||
35384 | // from improved known-bits analysis or instruction selection. | |||
35385 | APInt MaskVal = AndC->getAPIntValue(); | |||
35386 | ||||
35387 | // If this can be matched by a zero extend, don't optimize. | |||
35388 | if (MaskVal.isMask()) { | |||
35389 | unsigned TO = MaskVal.countTrailingOnes(); | |||
35390 | if (TO >= 8 && isPowerOf2_32(TO)) | |||
35391 | return SDValue(); | |||
35392 | } | |||
35393 | ||||
35394 | APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue()); | |||
35395 | unsigned OldMaskSize = MaskVal.getMinSignedBits(); | |||
35396 | unsigned NewMaskSize = NewMaskVal.getMinSignedBits(); | |||
35397 | if ((OldMaskSize > 8 && NewMaskSize <= 8) || | |||
35398 | (OldMaskSize > 32 && NewMaskSize <= 32)) { | |||
35399 | // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC) | |||
35400 | SDLoc DL(N); | |||
35401 | SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT); | |||
35402 | SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1); | |||
35403 | return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask); | |||
35404 | } | |||
35405 | return SDValue(); | |||
35406 | } | |||
35407 | ||||
35408 | static SDValue combineShift(SDNode* N, SelectionDAG &DAG, | |||
35409 | TargetLowering::DAGCombinerInfo &DCI, | |||
35410 | const X86Subtarget &Subtarget) { | |||
35411 | if (N->getOpcode() == ISD::SHL) | |||
35412 | if (SDValue V = combineShiftLeft(N, DAG)) | |||
35413 | return V; | |||
35414 | ||||
35415 | if (N->getOpcode() == ISD::SRA) | |||
35416 | if (SDValue V = combineShiftRightArithmetic(N, DAG)) | |||
35417 | return V; | |||
35418 | ||||
35419 | if (N->getOpcode() == ISD::SRL) | |||
35420 | if (SDValue V = combineShiftRightLogical(N, DAG, DCI)) | |||
35421 | return V; | |||
35422 | ||||
35423 | return SDValue(); | |||
35424 | } | |||
35425 | ||||
35426 | static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG, | |||
35427 | TargetLowering::DAGCombinerInfo &DCI, | |||
35428 | const X86Subtarget &Subtarget) { | |||
35429 | unsigned Opcode = N->getOpcode(); | |||
35430 | assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&(((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) && "Unexpected shift opcode") ? static_cast<void> (0) : __assert_fail ("(X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) && \"Unexpected shift opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35431, __PRETTY_FUNCTION__)) | |||
35431 | "Unexpected shift opcode")(((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) && "Unexpected shift opcode") ? static_cast<void> (0) : __assert_fail ("(X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) && \"Unexpected shift opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35431, __PRETTY_FUNCTION__)); | |||
35432 | ||||
35433 | EVT VT = N->getValueType(0); | |||
35434 | SDValue N0 = N->getOperand(0); | |||
35435 | SDValue N1 = N->getOperand(1); | |||
35436 | unsigned DstBitsPerElt = VT.getScalarSizeInBits(); | |||
35437 | unsigned SrcBitsPerElt = 2 * DstBitsPerElt; | |||
35438 | assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&((N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1 .getScalarValueSizeInBits() == SrcBitsPerElt && "Unexpected PACKSS/PACKUS input type" ) ? static_cast<void> (0) : __assert_fail ("N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1.getScalarValueSizeInBits() == SrcBitsPerElt && \"Unexpected PACKSS/PACKUS input type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35440, __PRETTY_FUNCTION__)) | |||
35439 | N1.getScalarValueSizeInBits() == SrcBitsPerElt &&((N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1 .getScalarValueSizeInBits() == SrcBitsPerElt && "Unexpected PACKSS/PACKUS input type" ) ? static_cast<void> (0) : __assert_fail ("N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1.getScalarValueSizeInBits() == SrcBitsPerElt && \"Unexpected PACKSS/PACKUS input type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35440, __PRETTY_FUNCTION__)) | |||
35440 | "Unexpected PACKSS/PACKUS input type")((N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1 .getScalarValueSizeInBits() == SrcBitsPerElt && "Unexpected PACKSS/PACKUS input type" ) ? static_cast<void> (0) : __assert_fail ("N0.getScalarValueSizeInBits() == SrcBitsPerElt && N1.getScalarValueSizeInBits() == SrcBitsPerElt && \"Unexpected PACKSS/PACKUS input type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35440, __PRETTY_FUNCTION__)); | |||
35441 | ||||
35442 | bool IsSigned = (X86ISD::PACKSS == Opcode); | |||
35443 | ||||
35444 | // Constant Folding. | |||
35445 | APInt UndefElts0, UndefElts1; | |||
35446 | SmallVector<APInt, 32> EltBits0, EltBits1; | |||
35447 | if ((N0->isUndef() || N->isOnlyUserOf(N0.getNode())) && | |||
35448 | (N1->isUndef() || N->isOnlyUserOf(N1.getNode())) && | |||
35449 | getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) && | |||
35450 | getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) { | |||
35451 | unsigned NumLanes = VT.getSizeInBits() / 128; | |||
35452 | unsigned NumDstElts = VT.getVectorNumElements(); | |||
35453 | unsigned NumSrcElts = NumDstElts / 2; | |||
35454 | unsigned NumDstEltsPerLane = NumDstElts / NumLanes; | |||
35455 | unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes; | |||
35456 | ||||
35457 | APInt Undefs(NumDstElts, 0); | |||
35458 | SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt)); | |||
35459 | for (unsigned Lane = 0; Lane != NumLanes; ++Lane) { | |||
35460 | for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) { | |||
35461 | unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane; | |||
35462 | auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0); | |||
35463 | auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0); | |||
35464 | ||||
35465 | if (UndefElts[SrcIdx]) { | |||
35466 | Undefs.setBit(Lane * NumDstEltsPerLane + Elt); | |||
35467 | continue; | |||
35468 | } | |||
35469 | ||||
35470 | APInt &Val = EltBits[SrcIdx]; | |||
35471 | if (IsSigned) { | |||
35472 | // PACKSS: Truncate signed value with signed saturation. | |||
35473 | // Source values less than dst minint are saturated to minint. | |||
35474 | // Source values greater than dst maxint are saturated to maxint. | |||
35475 | if (Val.isSignedIntN(DstBitsPerElt)) | |||
35476 | Val = Val.trunc(DstBitsPerElt); | |||
35477 | else if (Val.isNegative()) | |||
35478 | Val = APInt::getSignedMinValue(DstBitsPerElt); | |||
35479 | else | |||
35480 | Val = APInt::getSignedMaxValue(DstBitsPerElt); | |||
35481 | } else { | |||
35482 | // PACKUS: Truncate signed value with unsigned saturation. | |||
35483 | // Source values less than zero are saturated to zero. | |||
35484 | // Source values greater than dst maxuint are saturated to maxuint. | |||
35485 | if (Val.isIntN(DstBitsPerElt)) | |||
35486 | Val = Val.trunc(DstBitsPerElt); | |||
35487 | else if (Val.isNegative()) | |||
35488 | Val = APInt::getNullValue(DstBitsPerElt); | |||
35489 | else | |||
35490 | Val = APInt::getAllOnesValue(DstBitsPerElt); | |||
35491 | } | |||
35492 | Bits[Lane * NumDstEltsPerLane + Elt] = Val; | |||
35493 | } | |||
35494 | } | |||
35495 | ||||
35496 | return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N)); | |||
35497 | } | |||
35498 | ||||
35499 | // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular | |||
35500 | // truncate to create a larger truncate. | |||
35501 | if (Subtarget.hasAVX512() && | |||
35502 | N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 && | |||
35503 | N0.getOperand(0).getValueType() == MVT::v8i32) { | |||
35504 | if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) || | |||
35505 | (!IsSigned && | |||
35506 | DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) { | |||
35507 | if (Subtarget.hasVLX()) | |||
35508 | return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0)); | |||
35509 | ||||
35510 | // Widen input to v16i32 so we can truncate that. | |||
35511 | SDLoc dl(N); | |||
35512 | SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32, | |||
35513 | N0.getOperand(0), DAG.getUNDEF(MVT::v8i32)); | |||
35514 | return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat); | |||
35515 | } | |||
35516 | } | |||
35517 | ||||
35518 | // Attempt to combine as shuffle. | |||
35519 | SDValue Op(N, 0); | |||
35520 | if (SDValue Res = | |||
35521 | combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 1, | |||
35522 | /*HasVarMask*/ false, | |||
35523 | /*AllowVarMask*/ true, DAG, Subtarget)) | |||
35524 | return Res; | |||
35525 | ||||
35526 | return SDValue(); | |||
35527 | } | |||
35528 | ||||
35529 | static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG, | |||
35530 | TargetLowering::DAGCombinerInfo &DCI, | |||
35531 | const X86Subtarget &Subtarget) { | |||
35532 | assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||(((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N-> getOpcode() || X86ISD::VSRL == N->getOpcode()) && "Unexpected shift opcode" ) ? static_cast<void> (0) : __assert_fail ("(X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() || X86ISD::VSRL == N->getOpcode()) && \"Unexpected shift opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35534, __PRETTY_FUNCTION__)) | |||
35533 | X86ISD::VSRL == N->getOpcode()) &&(((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N-> getOpcode() || X86ISD::VSRL == N->getOpcode()) && "Unexpected shift opcode" ) ? static_cast<void> (0) : __assert_fail ("(X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() || X86ISD::VSRL == N->getOpcode()) && \"Unexpected shift opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35534, __PRETTY_FUNCTION__)) | |||
35534 | "Unexpected shift opcode")(((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N-> getOpcode() || X86ISD::VSRL == N->getOpcode()) && "Unexpected shift opcode" ) ? static_cast<void> (0) : __assert_fail ("(X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() || X86ISD::VSRL == N->getOpcode()) && \"Unexpected shift opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35534, __PRETTY_FUNCTION__)); | |||
35535 | EVT VT = N->getValueType(0); | |||
35536 | ||||
35537 | // Shift zero -> zero. | |||
35538 | if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode())) | |||
35539 | return DAG.getConstant(0, SDLoc(N), VT); | |||
35540 | ||||
35541 | APInt KnownUndef, KnownZero; | |||
35542 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
35543 | APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); | |||
35544 | if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef, | |||
35545 | KnownZero, DCI)) | |||
35546 | return SDValue(N, 0); | |||
35547 | ||||
35548 | return SDValue(); | |||
35549 | } | |||
35550 | ||||
35551 | static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG, | |||
35552 | TargetLowering::DAGCombinerInfo &DCI, | |||
35553 | const X86Subtarget &Subtarget) { | |||
35554 | unsigned Opcode = N->getOpcode(); | |||
35555 | assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||(((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD ::VSRLI == Opcode) && "Unexpected shift opcode") ? static_cast <void> (0) : __assert_fail ("(X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD::VSRLI == Opcode) && \"Unexpected shift opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35557, __PRETTY_FUNCTION__)) | |||
35556 | X86ISD::VSRLI == Opcode) &&(((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD ::VSRLI == Opcode) && "Unexpected shift opcode") ? static_cast <void> (0) : __assert_fail ("(X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD::VSRLI == Opcode) && \"Unexpected shift opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35557, __PRETTY_FUNCTION__)) | |||
35557 | "Unexpected shift opcode")(((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD ::VSRLI == Opcode) && "Unexpected shift opcode") ? static_cast <void> (0) : __assert_fail ("(X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode || X86ISD::VSRLI == Opcode) && \"Unexpected shift opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35557, __PRETTY_FUNCTION__)); | |||
35558 | bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode; | |||
35559 | EVT VT = N->getValueType(0); | |||
35560 | SDValue N0 = N->getOperand(0); | |||
35561 | SDValue N1 = N->getOperand(1); | |||
35562 | unsigned NumBitsPerElt = VT.getScalarSizeInBits(); | |||
35563 | assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&((VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 && "Unexpected value type") ? static_cast<void> (0) : __assert_fail ("VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35564, __PRETTY_FUNCTION__)) | |||
35564 | "Unexpected value type")((VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 && "Unexpected value type") ? static_cast<void> (0) : __assert_fail ("VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 && \"Unexpected value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35564, __PRETTY_FUNCTION__)); | |||
35565 | assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type")((N1.getValueType() == MVT::i8 && "Unexpected shift amount type" ) ? static_cast<void> (0) : __assert_fail ("N1.getValueType() == MVT::i8 && \"Unexpected shift amount type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35565, __PRETTY_FUNCTION__)); | |||
35566 | ||||
35567 | // Out of range logical bit shifts are guaranteed to be zero. | |||
35568 | // Out of range arithmetic bit shifts splat the sign bit. | |||
35569 | unsigned ShiftVal = cast<ConstantSDNode>(N1)->getZExtValue(); | |||
35570 | if (ShiftVal >= NumBitsPerElt) { | |||
35571 | if (LogicalShift) | |||
35572 | return DAG.getConstant(0, SDLoc(N), VT); | |||
35573 | else | |||
35574 | ShiftVal = NumBitsPerElt - 1; | |||
35575 | } | |||
35576 | ||||
35577 | // Shift N0 by zero -> N0. | |||
35578 | if (!ShiftVal) | |||
35579 | return N0; | |||
35580 | ||||
35581 | // Shift zero -> zero. | |||
35582 | if (ISD::isBuildVectorAllZeros(N0.getNode())) | |||
35583 | return DAG.getConstant(0, SDLoc(N), VT); | |||
35584 | ||||
35585 | // Fold (VSRAI (VSRAI X, C1), C2) --> (VSRAI X, (C1 + C2)) with (C1 + C2) | |||
35586 | // clamped to (NumBitsPerElt - 1). | |||
35587 | if (Opcode == X86ISD::VSRAI && N0.getOpcode() == X86ISD::VSRAI) { | |||
35588 | unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); | |||
35589 | unsigned NewShiftVal = ShiftVal + ShiftVal2; | |||
35590 | if (NewShiftVal >= NumBitsPerElt) | |||
35591 | NewShiftVal = NumBitsPerElt - 1; | |||
35592 | return DAG.getNode(X86ISD::VSRAI, SDLoc(N), VT, N0.getOperand(0), | |||
35593 | DAG.getConstant(NewShiftVal, SDLoc(N), MVT::i8)); | |||
35594 | } | |||
35595 | ||||
35596 | // We can decode 'whole byte' logical bit shifts as shuffles. | |||
35597 | if (LogicalShift && (ShiftVal % 8) == 0) { | |||
35598 | SDValue Op(N, 0); | |||
35599 | if (SDValue Res = combineX86ShufflesRecursively( | |||
35600 | {Op}, 0, Op, {0}, {}, /*Depth*/ 1, | |||
35601 | /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget)) | |||
35602 | return Res; | |||
35603 | } | |||
35604 | ||||
35605 | // Constant Folding. | |||
35606 | APInt UndefElts; | |||
35607 | SmallVector<APInt, 32> EltBits; | |||
35608 | if (N->isOnlyUserOf(N0.getNode()) && | |||
35609 | getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) { | |||
35610 | assert(EltBits.size() == VT.getVectorNumElements() &&((EltBits.size() == VT.getVectorNumElements() && "Unexpected shift value type" ) ? static_cast<void> (0) : __assert_fail ("EltBits.size() == VT.getVectorNumElements() && \"Unexpected shift value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35611, __PRETTY_FUNCTION__)) | |||
35611 | "Unexpected shift value type")((EltBits.size() == VT.getVectorNumElements() && "Unexpected shift value type" ) ? static_cast<void> (0) : __assert_fail ("EltBits.size() == VT.getVectorNumElements() && \"Unexpected shift value type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35611, __PRETTY_FUNCTION__)); | |||
35612 | for (APInt &Elt : EltBits) { | |||
35613 | if (X86ISD::VSHLI == Opcode) | |||
35614 | Elt <<= ShiftVal; | |||
35615 | else if (X86ISD::VSRAI == Opcode) | |||
35616 | Elt.ashrInPlace(ShiftVal); | |||
35617 | else | |||
35618 | Elt.lshrInPlace(ShiftVal); | |||
35619 | } | |||
35620 | return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N)); | |||
35621 | } | |||
35622 | ||||
35623 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
35624 | if (TLI.SimplifyDemandedBits(SDValue(N, 0), | |||
35625 | APInt::getAllOnesValue(NumBitsPerElt), DCI)) | |||
35626 | return SDValue(N, 0); | |||
35627 | ||||
35628 | return SDValue(); | |||
35629 | } | |||
35630 | ||||
35631 | static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG, | |||
35632 | TargetLowering::DAGCombinerInfo &DCI, | |||
35633 | const X86Subtarget &Subtarget) { | |||
35634 | assert(((((N->getOpcode() == X86ISD::PINSRB && N->getValueType (0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && "Unexpected vector insertion" ) ? static_cast<void> (0) : __assert_fail ("((N->getOpcode() == X86ISD::PINSRB && N->getValueType(0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && \"Unexpected vector insertion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35638, __PRETTY_FUNCTION__)) | |||
35635 | ((N->getOpcode() == X86ISD::PINSRB && N->getValueType(0) == MVT::v16i8) ||((((N->getOpcode() == X86ISD::PINSRB && N->getValueType (0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && "Unexpected vector insertion" ) ? static_cast<void> (0) : __assert_fail ("((N->getOpcode() == X86ISD::PINSRB && N->getValueType(0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && \"Unexpected vector insertion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35638, __PRETTY_FUNCTION__)) | |||
35636 | (N->getOpcode() == X86ISD::PINSRW &&((((N->getOpcode() == X86ISD::PINSRB && N->getValueType (0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && "Unexpected vector insertion" ) ? static_cast<void> (0) : __assert_fail ("((N->getOpcode() == X86ISD::PINSRB && N->getValueType(0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && \"Unexpected vector insertion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35638, __PRETTY_FUNCTION__)) | |||
35637 | N->getValueType(0) == MVT::v8i16)) &&((((N->getOpcode() == X86ISD::PINSRB && N->getValueType (0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && "Unexpected vector insertion" ) ? static_cast<void> (0) : __assert_fail ("((N->getOpcode() == X86ISD::PINSRB && N->getValueType(0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && \"Unexpected vector insertion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35638, __PRETTY_FUNCTION__)) | |||
35638 | "Unexpected vector insertion")((((N->getOpcode() == X86ISD::PINSRB && N->getValueType (0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && "Unexpected vector insertion" ) ? static_cast<void> (0) : __assert_fail ("((N->getOpcode() == X86ISD::PINSRB && N->getValueType(0) == MVT::v16i8) || (N->getOpcode() == X86ISD::PINSRW && N->getValueType(0) == MVT::v8i16)) && \"Unexpected vector insertion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35638, __PRETTY_FUNCTION__)); | |||
35639 | ||||
35640 | // Attempt to combine PINSRB/PINSRW patterns to a shuffle. | |||
35641 | SDValue Op(N, 0); | |||
35642 | if (SDValue Res = | |||
35643 | combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 1, | |||
35644 | /*HasVarMask*/ false, | |||
35645 | /*AllowVarMask*/ true, DAG, Subtarget)) | |||
35646 | return Res; | |||
35647 | ||||
35648 | return SDValue(); | |||
35649 | } | |||
35650 | ||||
35651 | /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs | |||
35652 | /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for | |||
35653 | /// OR -> CMPNEQSS. | |||
35654 | static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG, | |||
35655 | TargetLowering::DAGCombinerInfo &DCI, | |||
35656 | const X86Subtarget &Subtarget) { | |||
35657 | unsigned opcode; | |||
35658 | ||||
35659 | // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but | |||
35660 | // we're requiring SSE2 for both. | |||
35661 | if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) { | |||
35662 | SDValue N0 = N->getOperand(0); | |||
35663 | SDValue N1 = N->getOperand(1); | |||
35664 | SDValue CMP0 = N0->getOperand(1); | |||
35665 | SDValue CMP1 = N1->getOperand(1); | |||
35666 | SDLoc DL(N); | |||
35667 | ||||
35668 | // The SETCCs should both refer to the same CMP. | |||
35669 | if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1) | |||
35670 | return SDValue(); | |||
35671 | ||||
35672 | SDValue CMP00 = CMP0->getOperand(0); | |||
35673 | SDValue CMP01 = CMP0->getOperand(1); | |||
35674 | EVT VT = CMP00.getValueType(); | |||
35675 | ||||
35676 | if (VT == MVT::f32 || VT == MVT::f64) { | |||
35677 | bool ExpectingFlags = false; | |||
35678 | // Check for any users that want flags: | |||
35679 | for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); | |||
35680 | !ExpectingFlags && UI != UE; ++UI) | |||
35681 | switch (UI->getOpcode()) { | |||
35682 | default: | |||
35683 | case ISD::BR_CC: | |||
35684 | case ISD::BRCOND: | |||
35685 | case ISD::SELECT: | |||
35686 | ExpectingFlags = true; | |||
35687 | break; | |||
35688 | case ISD::CopyToReg: | |||
35689 | case ISD::SIGN_EXTEND: | |||
35690 | case ISD::ZERO_EXTEND: | |||
35691 | case ISD::ANY_EXTEND: | |||
35692 | break; | |||
35693 | } | |||
35694 | ||||
35695 | if (!ExpectingFlags) { | |||
35696 | enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0); | |||
35697 | enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0); | |||
35698 | ||||
35699 | if (cc1 == X86::COND_E || cc1 == X86::COND_NE) { | |||
35700 | X86::CondCode tmp = cc0; | |||
35701 | cc0 = cc1; | |||
35702 | cc1 = tmp; | |||
35703 | } | |||
35704 | ||||
35705 | if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) || | |||
35706 | (cc0 == X86::COND_NE && cc1 == X86::COND_P)) { | |||
35707 | // FIXME: need symbolic constants for these magic numbers. | |||
35708 | // See X86ATTInstPrinter.cpp:printSSECC(). | |||
35709 | unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4; | |||
35710 | if (Subtarget.hasAVX512()) { | |||
35711 | SDValue FSetCC = | |||
35712 | DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01, | |||
35713 | DAG.getConstant(x86cc, DL, MVT::i8)); | |||
35714 | // Need to fill with zeros to ensure the bitcast will produce zeroes | |||
35715 | // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that. | |||
35716 | SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1, | |||
35717 | DAG.getConstant(0, DL, MVT::v16i1), | |||
35718 | FSetCC, DAG.getIntPtrConstant(0, DL)); | |||
35719 | return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL, | |||
35720 | N->getSimpleValueType(0)); | |||
35721 | } | |||
35722 | SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL, | |||
35723 | CMP00.getValueType(), CMP00, CMP01, | |||
35724 | DAG.getConstant(x86cc, DL, | |||
35725 | MVT::i8)); | |||
35726 | ||||
35727 | bool is64BitFP = (CMP00.getValueType() == MVT::f64); | |||
35728 | MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32; | |||
35729 | ||||
35730 | if (is64BitFP && !Subtarget.is64Bit()) { | |||
35731 | // On a 32-bit target, we cannot bitcast the 64-bit float to a | |||
35732 | // 64-bit integer, since that's not a legal type. Since | |||
35733 | // OnesOrZeroesF is all ones of all zeroes, we don't need all the | |||
35734 | // bits, but can do this little dance to extract the lowest 32 bits | |||
35735 | // and work with those going forward. | |||
35736 | SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, | |||
35737 | OnesOrZeroesF); | |||
35738 | SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64); | |||
35739 | OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, | |||
35740 | Vector32, DAG.getIntPtrConstant(0, DL)); | |||
35741 | IntVT = MVT::i32; | |||
35742 | } | |||
35743 | ||||
35744 | SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF); | |||
35745 | SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI, | |||
35746 | DAG.getConstant(1, DL, IntVT)); | |||
35747 | SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, | |||
35748 | ANDed); | |||
35749 | return OneBitOfTruth; | |||
35750 | } | |||
35751 | } | |||
35752 | } | |||
35753 | } | |||
35754 | return SDValue(); | |||
35755 | } | |||
35756 | ||||
35757 | /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y). | |||
35758 | static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) { | |||
35759 | assert(N->getOpcode() == ISD::AND)((N->getOpcode() == ISD::AND) ? static_cast<void> (0 ) : __assert_fail ("N->getOpcode() == ISD::AND", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35759, __PRETTY_FUNCTION__)); | |||
35760 | ||||
35761 | MVT VT = N->getSimpleValueType(0); | |||
35762 | if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector()) | |||
35763 | return SDValue(); | |||
35764 | ||||
35765 | SDValue X, Y; | |||
35766 | SDValue N0 = peekThroughBitcasts(N->getOperand(0)); | |||
35767 | SDValue N1 = peekThroughBitcasts(N->getOperand(1)); | |||
35768 | if (N0.getOpcode() == ISD::XOR && | |||
35769 | ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode())) { | |||
35770 | X = N0.getOperand(0); | |||
35771 | Y = N1; | |||
35772 | } else if (N1.getOpcode() == ISD::XOR && | |||
35773 | ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode())) { | |||
35774 | X = N1.getOperand(0); | |||
35775 | Y = N0; | |||
35776 | } else | |||
35777 | return SDValue(); | |||
35778 | ||||
35779 | X = DAG.getBitcast(VT, X); | |||
35780 | Y = DAG.getBitcast(VT, Y); | |||
35781 | return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y); | |||
35782 | } | |||
35783 | ||||
35784 | // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized | |||
35785 | // register. In most cases we actually compare or select YMM-sized registers | |||
35786 | // and mixing the two types creates horrible code. This method optimizes | |||
35787 | // some of the transition sequences. | |||
35788 | // Even with AVX-512 this is still useful for removing casts around logical | |||
35789 | // operations on vXi1 mask types. | |||
35790 | static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG, | |||
35791 | const X86Subtarget &Subtarget) { | |||
35792 | EVT VT = N->getValueType(0); | |||
35793 | assert(VT.isVector() && "Expected vector type")((VT.isVector() && "Expected vector type") ? static_cast <void> (0) : __assert_fail ("VT.isVector() && \"Expected vector type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35793, __PRETTY_FUNCTION__)); | |||
35794 | ||||
35795 | assert((N->getOpcode() == ISD::ANY_EXTEND ||(((N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node") ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) && \"Invalid Node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35797, __PRETTY_FUNCTION__)) | |||
35796 | N->getOpcode() == ISD::ZERO_EXTEND ||(((N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node") ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) && \"Invalid Node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35797, __PRETTY_FUNCTION__)) | |||
35797 | N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node")(((N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node") ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::ANY_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND || N->getOpcode() == ISD::SIGN_EXTEND) && \"Invalid Node\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35797, __PRETTY_FUNCTION__)); | |||
35798 | ||||
35799 | SDValue Narrow = N->getOperand(0); | |||
35800 | EVT NarrowVT = Narrow.getValueType(); | |||
35801 | ||||
35802 | if (Narrow->getOpcode() != ISD::XOR && | |||
35803 | Narrow->getOpcode() != ISD::AND && | |||
35804 | Narrow->getOpcode() != ISD::OR) | |||
35805 | return SDValue(); | |||
35806 | ||||
35807 | SDValue N0 = Narrow->getOperand(0); | |||
35808 | SDValue N1 = Narrow->getOperand(1); | |||
35809 | SDLoc DL(Narrow); | |||
35810 | ||||
35811 | // The Left side has to be a trunc. | |||
35812 | if (N0.getOpcode() != ISD::TRUNCATE) | |||
35813 | return SDValue(); | |||
35814 | ||||
35815 | // The type of the truncated inputs. | |||
35816 | if (N0->getOperand(0).getValueType() != VT) | |||
35817 | return SDValue(); | |||
35818 | ||||
35819 | // The right side has to be a 'trunc' or a constant vector. | |||
35820 | bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE && | |||
35821 | N1.getOperand(0).getValueType() == VT; | |||
35822 | if (!RHSTrunc && | |||
35823 | !ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) | |||
35824 | return SDValue(); | |||
35825 | ||||
35826 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
35827 | ||||
35828 | if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), VT)) | |||
35829 | return SDValue(); | |||
35830 | ||||
35831 | // Set N0 and N1 to hold the inputs to the new wide operation. | |||
35832 | N0 = N0->getOperand(0); | |||
35833 | if (RHSTrunc) | |||
35834 | N1 = N1->getOperand(0); | |||
35835 | else | |||
35836 | N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1); | |||
35837 | ||||
35838 | // Generate the wide operation. | |||
35839 | SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1); | |||
35840 | unsigned Opcode = N->getOpcode(); | |||
35841 | switch (Opcode) { | |||
35842 | default: llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35842); | |||
35843 | case ISD::ANY_EXTEND: | |||
35844 | return Op; | |||
35845 | case ISD::ZERO_EXTEND: | |||
35846 | return DAG.getZeroExtendInReg(Op, DL, NarrowVT.getScalarType()); | |||
35847 | case ISD::SIGN_EXTEND: | |||
35848 | return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, | |||
35849 | Op, DAG.getValueType(NarrowVT)); | |||
35850 | } | |||
35851 | } | |||
35852 | ||||
35853 | /// If both input operands of a logic op are being cast from floating point | |||
35854 | /// types, try to convert this into a floating point logic node to avoid | |||
35855 | /// unnecessary moves from SSE to integer registers. | |||
35856 | static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG, | |||
35857 | const X86Subtarget &Subtarget) { | |||
35858 | unsigned FPOpcode = ISD::DELETED_NODE; | |||
35859 | if (N->getOpcode() == ISD::AND) | |||
35860 | FPOpcode = X86ISD::FAND; | |||
35861 | else if (N->getOpcode() == ISD::OR) | |||
35862 | FPOpcode = X86ISD::FOR; | |||
35863 | else if (N->getOpcode() == ISD::XOR) | |||
35864 | FPOpcode = X86ISD::FXOR; | |||
35865 | ||||
35866 | assert(FPOpcode != ISD::DELETED_NODE &&((FPOpcode != ISD::DELETED_NODE && "Unexpected input node for FP logic conversion" ) ? static_cast<void> (0) : __assert_fail ("FPOpcode != ISD::DELETED_NODE && \"Unexpected input node for FP logic conversion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35867, __PRETTY_FUNCTION__)) | |||
35867 | "Unexpected input node for FP logic conversion")((FPOpcode != ISD::DELETED_NODE && "Unexpected input node for FP logic conversion" ) ? static_cast<void> (0) : __assert_fail ("FPOpcode != ISD::DELETED_NODE && \"Unexpected input node for FP logic conversion\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 35867, __PRETTY_FUNCTION__)); | |||
35868 | ||||
35869 | EVT VT = N->getValueType(0); | |||
35870 | SDValue N0 = N->getOperand(0); | |||
35871 | SDValue N1 = N->getOperand(1); | |||
35872 | SDLoc DL(N); | |||
35873 | if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST && | |||
35874 | ((Subtarget.hasSSE1() && VT == MVT::i32) || | |||
35875 | (Subtarget.hasSSE2() && VT == MVT::i64))) { | |||
35876 | SDValue N00 = N0.getOperand(0); | |||
35877 | SDValue N10 = N1.getOperand(0); | |||
35878 | EVT N00Type = N00.getValueType(); | |||
35879 | EVT N10Type = N10.getValueType(); | |||
35880 | if (N00Type.isFloatingPoint() && N10Type.isFloatingPoint()) { | |||
35881 | SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10); | |||
35882 | return DAG.getBitcast(VT, FPLogic); | |||
35883 | } | |||
35884 | } | |||
35885 | return SDValue(); | |||
35886 | } | |||
35887 | ||||
35888 | /// If this is a zero/all-bits result that is bitwise-anded with a low bits | |||
35889 | /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and' | |||
35890 | /// with a shift-right to eliminate loading the vector constant mask value. | |||
35891 | static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG, | |||
35892 | const X86Subtarget &Subtarget) { | |||
35893 | SDValue Op0 = peekThroughBitcasts(N->getOperand(0)); | |||
35894 | SDValue Op1 = peekThroughBitcasts(N->getOperand(1)); | |||
35895 | EVT VT0 = Op0.getValueType(); | |||
35896 | EVT VT1 = Op1.getValueType(); | |||
35897 | ||||
35898 | if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger()) | |||
35899 | return SDValue(); | |||
35900 | ||||
35901 | APInt SplatVal; | |||
35902 | if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) || | |||
35903 | !SplatVal.isMask()) | |||
35904 | return SDValue(); | |||
35905 | ||||
35906 | // Don't prevent creation of ANDN. | |||
35907 | if (isBitwiseNot(Op0)) | |||
35908 | return SDValue(); | |||
35909 | ||||
35910 | if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL)) | |||
35911 | return SDValue(); | |||
35912 | ||||
35913 | unsigned EltBitWidth = VT0.getScalarSizeInBits(); | |||
35914 | if (EltBitWidth != DAG.ComputeNumSignBits(Op0)) | |||
35915 | return SDValue(); | |||
35916 | ||||
35917 | SDLoc DL(N); | |||
35918 | unsigned ShiftVal = SplatVal.countTrailingOnes(); | |||
35919 | SDValue ShAmt = DAG.getConstant(EltBitWidth - ShiftVal, DL, MVT::i8); | |||
35920 | SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt); | |||
35921 | return DAG.getBitcast(N->getValueType(0), Shift); | |||
35922 | } | |||
35923 | ||||
35924 | // Get the index node from the lowered DAG of a GEP IR instruction with one | |||
35925 | // indexing dimension. | |||
35926 | static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) { | |||
35927 | if (Ld->isIndexed()) | |||
35928 | return SDValue(); | |||
35929 | ||||
35930 | SDValue Base = Ld->getBasePtr(); | |||
35931 | ||||
35932 | if (Base.getOpcode() != ISD::ADD) | |||
35933 | return SDValue(); | |||
35934 | ||||
35935 | SDValue ShiftedIndex = Base.getOperand(0); | |||
35936 | ||||
35937 | if (ShiftedIndex.getOpcode() != ISD::SHL) | |||
35938 | return SDValue(); | |||
35939 | ||||
35940 | return ShiftedIndex.getOperand(0); | |||
35941 | ||||
35942 | } | |||
35943 | ||||
35944 | static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) { | |||
35945 | if (Subtarget.hasBMI2() && VT.isScalarInteger()) { | |||
35946 | switch (VT.getSizeInBits()) { | |||
35947 | default: return false; | |||
35948 | case 64: return Subtarget.is64Bit() ? true : false; | |||
35949 | case 32: return true; | |||
35950 | } | |||
35951 | } | |||
35952 | return false; | |||
35953 | } | |||
35954 | ||||
35955 | // This function recognizes cases where X86 bzhi instruction can replace and | |||
35956 | // 'and-load' sequence. | |||
35957 | // In case of loading integer value from an array of constants which is defined | |||
35958 | // as follows: | |||
35959 | // | |||
35960 | // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1} | |||
35961 | // | |||
35962 | // then applying a bitwise and on the result with another input. | |||
35963 | // It's equivalent to performing bzhi (zero high bits) on the input, with the | |||
35964 | // same index of the load. | |||
35965 | static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG, | |||
35966 | const X86Subtarget &Subtarget) { | |||
35967 | MVT VT = Node->getSimpleValueType(0); | |||
35968 | SDLoc dl(Node); | |||
35969 | ||||
35970 | // Check if subtarget has BZHI instruction for the node's type | |||
35971 | if (!hasBZHI(Subtarget, VT)) | |||
35972 | return SDValue(); | |||
35973 | ||||
35974 | // Try matching the pattern for both operands. | |||
35975 | for (unsigned i = 0; i < 2; i++) { | |||
35976 | SDValue N = Node->getOperand(i); | |||
35977 | LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode()); | |||
35978 | ||||
35979 | // continue if the operand is not a load instruction | |||
35980 | if (!Ld) | |||
35981 | return SDValue(); | |||
35982 | ||||
35983 | const Value *MemOp = Ld->getMemOperand()->getValue(); | |||
35984 | ||||
35985 | if (!MemOp) | |||
35986 | return SDValue(); | |||
35987 | ||||
35988 | if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) { | |||
35989 | if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) { | |||
35990 | if (GV->isConstant() && GV->hasDefinitiveInitializer()) { | |||
35991 | ||||
35992 | Constant *Init = GV->getInitializer(); | |||
35993 | Type *Ty = Init->getType(); | |||
35994 | if (!isa<ConstantDataArray>(Init) || | |||
35995 | !Ty->getArrayElementType()->isIntegerTy() || | |||
35996 | Ty->getArrayElementType()->getScalarSizeInBits() != | |||
35997 | VT.getSizeInBits() || | |||
35998 | Ty->getArrayNumElements() > | |||
35999 | Ty->getArrayElementType()->getScalarSizeInBits()) | |||
36000 | continue; | |||
36001 | ||||
36002 | // Check if the array's constant elements are suitable to our case. | |||
36003 | uint64_t ArrayElementCount = Init->getType()->getArrayNumElements(); | |||
36004 | bool ConstantsMatch = true; | |||
36005 | for (uint64_t j = 0; j < ArrayElementCount; j++) { | |||
36006 | ConstantInt *Elem = | |||
36007 | dyn_cast<ConstantInt>(Init->getAggregateElement(j)); | |||
36008 | if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) { | |||
36009 | ConstantsMatch = false; | |||
36010 | break; | |||
36011 | } | |||
36012 | } | |||
36013 | if (!ConstantsMatch) | |||
36014 | continue; | |||
36015 | ||||
36016 | // Do the transformation (For 32-bit type): | |||
36017 | // -> (and (load arr[idx]), inp) | |||
36018 | // <- (and (srl 0xFFFFFFFF, (sub 32, idx))) | |||
36019 | // that will be replaced with one bzhi instruction. | |||
36020 | SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0); | |||
36021 | SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32); | |||
36022 | ||||
36023 | // Get the Node which indexes into the array. | |||
36024 | SDValue Index = getIndexFromUnindexedLoad(Ld); | |||
36025 | if (!Index) | |||
36026 | return SDValue(); | |||
36027 | Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32); | |||
36028 | ||||
36029 | SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index); | |||
36030 | Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub); | |||
36031 | ||||
36032 | SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); | |||
36033 | SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub); | |||
36034 | ||||
36035 | return DAG.getNode(ISD::AND, dl, VT, Inp, LShr); | |||
36036 | } | |||
36037 | } | |||
36038 | } | |||
36039 | } | |||
36040 | return SDValue(); | |||
36041 | } | |||
36042 | ||||
36043 | // Look for (and (ctpop X), 1) which is the IR form of __builtin_parity. | |||
36044 | // Turn it into series of XORs and a setnp. | |||
36045 | static SDValue combineParity(SDNode *N, SelectionDAG &DAG, | |||
36046 | const X86Subtarget &Subtarget) { | |||
36047 | EVT VT = N->getValueType(0); | |||
36048 | ||||
36049 | // We only support 64-bit and 32-bit. 64-bit requires special handling | |||
36050 | // unless the 64-bit popcnt instruction is legal. | |||
36051 | if (VT != MVT::i32 && VT != MVT::i64) | |||
36052 | return SDValue(); | |||
36053 | ||||
36054 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
36055 | if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT)) | |||
36056 | return SDValue(); | |||
36057 | ||||
36058 | SDValue N0 = N->getOperand(0); | |||
36059 | SDValue N1 = N->getOperand(1); | |||
36060 | ||||
36061 | // LHS needs to be a single use CTPOP. | |||
36062 | if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse()) | |||
36063 | return SDValue(); | |||
36064 | ||||
36065 | // RHS needs to be 1. | |||
36066 | if (!isOneConstant(N1)) | |||
36067 | return SDValue(); | |||
36068 | ||||
36069 | SDLoc DL(N); | |||
36070 | SDValue X = N0.getOperand(0); | |||
36071 | ||||
36072 | // If this is 64-bit, its always best to xor the two 32-bit pieces together | |||
36073 | // even if we have popcnt. | |||
36074 | if (VT == MVT::i64) { | |||
36075 | SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, | |||
36076 | DAG.getNode(ISD::SRL, DL, VT, X, | |||
36077 | DAG.getConstant(32, DL, MVT::i8))); | |||
36078 | SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X); | |||
36079 | X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi); | |||
36080 | // Generate a 32-bit parity idiom. This will bring us back here if we need | |||
36081 | // to expand it too. | |||
36082 | SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32, | |||
36083 | DAG.getNode(ISD::CTPOP, DL, MVT::i32, X), | |||
36084 | DAG.getConstant(1, DL, MVT::i32)); | |||
36085 | return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity); | |||
36086 | } | |||
36087 | assert(VT == MVT::i32 && "Unexpected VT!")((VT == MVT::i32 && "Unexpected VT!") ? static_cast< void> (0) : __assert_fail ("VT == MVT::i32 && \"Unexpected VT!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 36087, __PRETTY_FUNCTION__)); | |||
36088 | ||||
36089 | // Xor the high and low 16-bits together using a 32-bit operation. | |||
36090 | SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X, | |||
36091 | DAG.getConstant(16, DL, MVT::i8)); | |||
36092 | X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16); | |||
36093 | ||||
36094 | // Finally xor the low 2 bytes together and use a 8-bit flag setting xor. | |||
36095 | // This should allow an h-reg to be used to save a shift. | |||
36096 | // FIXME: We only get an h-reg in 32-bit mode. | |||
36097 | SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, | |||
36098 | DAG.getNode(ISD::SRL, DL, VT, X, | |||
36099 | DAG.getConstant(8, DL, MVT::i8))); | |||
36100 | SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X); | |||
36101 | SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32); | |||
36102 | SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1); | |||
36103 | ||||
36104 | // Copy the inverse of the parity flag into a register with setcc. | |||
36105 | SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG); | |||
36106 | // Zero extend to original type. | |||
36107 | return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp); | |||
36108 | } | |||
36109 | ||||
36110 | static SDValue combineAnd(SDNode *N, SelectionDAG &DAG, | |||
36111 | TargetLowering::DAGCombinerInfo &DCI, | |||
36112 | const X86Subtarget &Subtarget) { | |||
36113 | EVT VT = N->getValueType(0); | |||
36114 | ||||
36115 | // If this is SSE1 only convert to FAND to avoid scalarization. | |||
36116 | if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) { | |||
36117 | return DAG.getBitcast( | |||
36118 | MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32, | |||
36119 | DAG.getBitcast(MVT::v4f32, N->getOperand(0)), | |||
36120 | DAG.getBitcast(MVT::v4f32, N->getOperand(1)))); | |||
36121 | } | |||
36122 | ||||
36123 | // Use a 32-bit and+zext if upper bits known zero. | |||
36124 | if (VT == MVT::i64 && Subtarget.is64Bit() && | |||
36125 | !isa<ConstantSDNode>(N->getOperand(1))) { | |||
36126 | APInt HiMask = APInt::getHighBitsSet(64, 32); | |||
36127 | if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) || | |||
36128 | DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) { | |||
36129 | SDLoc dl(N); | |||
36130 | SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0)); | |||
36131 | SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1)); | |||
36132 | return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, | |||
36133 | DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS)); | |||
36134 | } | |||
36135 | } | |||
36136 | ||||
36137 | // This must be done before legalization has expanded the ctpop. | |||
36138 | if (SDValue V = combineParity(N, DAG, Subtarget)) | |||
36139 | return V; | |||
36140 | ||||
36141 | if (DCI.isBeforeLegalizeOps()) | |||
36142 | return SDValue(); | |||
36143 | ||||
36144 | if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget)) | |||
36145 | return R; | |||
36146 | ||||
36147 | if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget)) | |||
36148 | return FPLogic; | |||
36149 | ||||
36150 | if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG)) | |||
36151 | return R; | |||
36152 | ||||
36153 | if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget)) | |||
36154 | return ShiftRight; | |||
36155 | ||||
36156 | if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget)) | |||
36157 | return R; | |||
36158 | ||||
36159 | // Attempt to recursively combine a bitmask AND with shuffles. | |||
36160 | if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) { | |||
36161 | SDValue Op(N, 0); | |||
36162 | if (SDValue Res = combineX86ShufflesRecursively( | |||
36163 | {Op}, 0, Op, {0}, {}, /*Depth*/ 1, | |||
36164 | /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget)) | |||
36165 | return Res; | |||
36166 | } | |||
36167 | ||||
36168 | // Attempt to combine a scalar bitmask AND with an extracted shuffle. | |||
36169 | if ((VT.getScalarSizeInBits() % 8) == 0 && | |||
36170 | N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
36171 | isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) { | |||
36172 | SDValue BitMask = N->getOperand(1); | |||
36173 | SDValue SrcVec = N->getOperand(0).getOperand(0); | |||
36174 | EVT SrcVecVT = SrcVec.getValueType(); | |||
36175 | ||||
36176 | // Check that the constant bitmask masks whole bytes. | |||
36177 | APInt UndefElts; | |||
36178 | SmallVector<APInt, 64> EltBits; | |||
36179 | if (VT == SrcVecVT.getScalarType() && | |||
36180 | N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) && | |||
36181 | getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) && | |||
36182 | llvm::all_of(EltBits, [](APInt M) { | |||
36183 | return M.isNullValue() || M.isAllOnesValue(); | |||
36184 | })) { | |||
36185 | unsigned NumElts = SrcVecVT.getVectorNumElements(); | |||
36186 | unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8; | |||
36187 | unsigned Idx = N->getOperand(0).getConstantOperandVal(1); | |||
36188 | ||||
36189 | // Create a root shuffle mask from the byte mask and the extracted index. | |||
36190 | SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef); | |||
36191 | for (unsigned i = 0; i != Scale; ++i) { | |||
36192 | if (UndefElts[i]) | |||
36193 | continue; | |||
36194 | int VecIdx = Scale * Idx + i; | |||
36195 | ShuffleMask[VecIdx] = | |||
36196 | EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx; | |||
36197 | } | |||
36198 | ||||
36199 | if (SDValue Shuffle = combineX86ShufflesRecursively( | |||
36200 | {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 2, | |||
36201 | /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget)) | |||
36202 | return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle, | |||
36203 | N->getOperand(0).getOperand(1)); | |||
36204 | } | |||
36205 | } | |||
36206 | ||||
36207 | return SDValue(); | |||
36208 | } | |||
36209 | ||||
36210 | // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern. | |||
36211 | static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) { | |||
36212 | if (N->getOpcode() != ISD::OR) | |||
36213 | return false; | |||
36214 | ||||
36215 | SDValue N0 = N->getOperand(0); | |||
36216 | SDValue N1 = N->getOperand(1); | |||
36217 | ||||
36218 | // Canonicalize AND to LHS. | |||
36219 | if (N1.getOpcode() == ISD::AND) | |||
36220 | std::swap(N0, N1); | |||
36221 | ||||
36222 | // Attempt to match OR(AND(M,Y),ANDNP(M,X)). | |||
36223 | if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP) | |||
36224 | return false; | |||
36225 | ||||
36226 | Mask = N1.getOperand(0); | |||
36227 | X = N1.getOperand(1); | |||
36228 | ||||
36229 | // Check to see if the mask appeared in both the AND and ANDNP. | |||
36230 | if (N0.getOperand(0) == Mask) | |||
36231 | Y = N0.getOperand(1); | |||
36232 | else if (N0.getOperand(1) == Mask) | |||
36233 | Y = N0.getOperand(0); | |||
36234 | else | |||
36235 | return false; | |||
36236 | ||||
36237 | // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for | |||
36238 | // ANDNP combine allows other combines to happen that prevent matching. | |||
36239 | return true; | |||
36240 | } | |||
36241 | ||||
36242 | // Try to fold: | |||
36243 | // (or (and (m, y), (pandn m, x))) | |||
36244 | // into: | |||
36245 | // (vselect m, x, y) | |||
36246 | // As a special case, try to fold: | |||
36247 | // (or (and (m, (sub 0, x)), (pandn m, x))) | |||
36248 | // into: | |||
36249 | // (sub (xor X, M), M) | |||
36250 | static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG, | |||
36251 | const X86Subtarget &Subtarget) { | |||
36252 | assert(N->getOpcode() == ISD::OR && "Unexpected Opcode")((N->getOpcode() == ISD::OR && "Unexpected Opcode" ) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::OR && \"Unexpected Opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 36252, __PRETTY_FUNCTION__)); | |||
36253 | ||||
36254 | EVT VT = N->getValueType(0); | |||
36255 | if (!((VT.is128BitVector() && Subtarget.hasSSE2()) || | |||
36256 | (VT.is256BitVector() && Subtarget.hasInt256()))) | |||
36257 | return SDValue(); | |||
36258 | ||||
36259 | SDValue X, Y, Mask; | |||
36260 | if (!matchLogicBlend(N, X, Y, Mask)) | |||
36261 | return SDValue(); | |||
36262 | ||||
36263 | // Validate that X, Y, and Mask are bitcasts, and see through them. | |||
36264 | Mask = peekThroughBitcasts(Mask); | |||
36265 | X = peekThroughBitcasts(X); | |||
36266 | Y = peekThroughBitcasts(Y); | |||
36267 | ||||
36268 | EVT MaskVT = Mask.getValueType(); | |||
36269 | unsigned EltBits = MaskVT.getScalarSizeInBits(); | |||
36270 | ||||
36271 | // TODO: Attempt to handle floating point cases as well? | |||
36272 | if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits) | |||
36273 | return SDValue(); | |||
36274 | ||||
36275 | SDLoc DL(N); | |||
36276 | ||||
36277 | // Try to match: | |||
36278 | // (or (and (M, (sub 0, X)), (pandn M, X))) | |||
36279 | // which is a special case of vselect: | |||
36280 | // (vselect M, (sub 0, X), X) | |||
36281 | // Per: | |||
36282 | // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate | |||
36283 | // We know that, if fNegate is 0 or 1: | |||
36284 | // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate) | |||
36285 | // | |||
36286 | // Here, we have a mask, M (all 1s or 0), and, similarly, we know that: | |||
36287 | // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1)) | |||
36288 | // ( M ? -X : X) == ((X ^ M ) + (M & 1)) | |||
36289 | // This lets us transform our vselect to: | |||
36290 | // (add (xor X, M), (and M, 1)) | |||
36291 | // And further to: | |||
36292 | // (sub (xor X, M), M) | |||
36293 | if (X.getValueType() == MaskVT && Y.getValueType() == MaskVT && | |||
36294 | DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT)) { | |||
36295 | auto IsNegV = [](SDNode *N, SDValue V) { | |||
36296 | return N->getOpcode() == ISD::SUB && N->getOperand(1) == V && | |||
36297 | ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()); | |||
36298 | }; | |||
36299 | SDValue V; | |||
36300 | if (IsNegV(Y.getNode(), X)) | |||
36301 | V = X; | |||
36302 | else if (IsNegV(X.getNode(), Y)) | |||
36303 | V = Y; | |||
36304 | ||||
36305 | if (V) { | |||
36306 | SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask); | |||
36307 | SDValue SubOp2 = Mask; | |||
36308 | ||||
36309 | // If the negate was on the false side of the select, then | |||
36310 | // the operands of the SUB need to be swapped. PR 27251. | |||
36311 | // This is because the pattern being matched above is | |||
36312 | // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M) | |||
36313 | // but if the pattern matched was | |||
36314 | // (vselect M, X, (sub (0, X))), that is really negation of the pattern | |||
36315 | // above, -(vselect M, (sub 0, X), X), and therefore the replacement | |||
36316 | // pattern also needs to be a negation of the replacement pattern above. | |||
36317 | // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the | |||
36318 | // sub accomplishes the negation of the replacement pattern. | |||
36319 | if (V == Y) | |||
36320 | std::swap(SubOp1, SubOp2); | |||
36321 | ||||
36322 | SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2); | |||
36323 | return DAG.getBitcast(VT, Res); | |||
36324 | } | |||
36325 | } | |||
36326 | ||||
36327 | // PBLENDVB is only available on SSE 4.1. | |||
36328 | if (!Subtarget.hasSSE41()) | |||
36329 | return SDValue(); | |||
36330 | ||||
36331 | MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8; | |||
36332 | ||||
36333 | X = DAG.getBitcast(BlendVT, X); | |||
36334 | Y = DAG.getBitcast(BlendVT, Y); | |||
36335 | Mask = DAG.getBitcast(BlendVT, Mask); | |||
36336 | Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X); | |||
36337 | return DAG.getBitcast(VT, Mask); | |||
36338 | } | |||
36339 | ||||
36340 | // Helper function for combineOrCmpEqZeroToCtlzSrl | |||
36341 | // Transforms: | |||
36342 | // seteq(cmp x, 0) | |||
36343 | // into: | |||
36344 | // srl(ctlz x), log2(bitsize(x)) | |||
36345 | // Input pattern is checked by caller. | |||
36346 | static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy, | |||
36347 | SelectionDAG &DAG) { | |||
36348 | SDValue Cmp = Op.getOperand(1); | |||
36349 | EVT VT = Cmp.getOperand(0).getValueType(); | |||
36350 | unsigned Log2b = Log2_32(VT.getSizeInBits()); | |||
36351 | SDLoc dl(Op); | |||
36352 | SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0)); | |||
36353 | // The result of the shift is true or false, and on X86, the 32-bit | |||
36354 | // encoding of shr and lzcnt is more desirable. | |||
36355 | SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32); | |||
36356 | SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc, | |||
36357 | DAG.getConstant(Log2b, dl, MVT::i8)); | |||
36358 | return DAG.getZExtOrTrunc(Scc, dl, ExtTy); | |||
36359 | } | |||
36360 | ||||
36361 | // Try to transform: | |||
36362 | // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0)))) | |||
36363 | // into: | |||
36364 | // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x)) | |||
36365 | // Will also attempt to match more generic cases, eg: | |||
36366 | // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0))) | |||
36367 | // Only applies if the target supports the FastLZCNT feature. | |||
36368 | static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG, | |||
36369 | TargetLowering::DAGCombinerInfo &DCI, | |||
36370 | const X86Subtarget &Subtarget) { | |||
36371 | if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast()) | |||
36372 | return SDValue(); | |||
36373 | ||||
36374 | auto isORCandidate = [](SDValue N) { | |||
36375 | return (N->getOpcode() == ISD::OR && N->hasOneUse()); | |||
36376 | }; | |||
36377 | ||||
36378 | // Check the zero extend is extending to 32-bit or more. The code generated by | |||
36379 | // srl(ctlz) for 16-bit or less variants of the pattern would require extra | |||
36380 | // instructions to clear the upper bits. | |||
36381 | if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) || | |||
36382 | !isORCandidate(N->getOperand(0))) | |||
36383 | return SDValue(); | |||
36384 | ||||
36385 | // Check the node matches: setcc(eq, cmp 0) | |||
36386 | auto isSetCCCandidate = [](SDValue N) { | |||
36387 | return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() && | |||
36388 | X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E && | |||
36389 | N->getOperand(1).getOpcode() == X86ISD::CMP && | |||
36390 | isNullConstant(N->getOperand(1).getOperand(1)) && | |||
36391 | N->getOperand(1).getValueType().bitsGE(MVT::i32); | |||
36392 | }; | |||
36393 | ||||
36394 | SDNode *OR = N->getOperand(0).getNode(); | |||
36395 | SDValue LHS = OR->getOperand(0); | |||
36396 | SDValue RHS = OR->getOperand(1); | |||
36397 | ||||
36398 | // Save nodes matching or(or, setcc(eq, cmp 0)). | |||
36399 | SmallVector<SDNode *, 2> ORNodes; | |||
36400 | while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) || | |||
36401 | (isORCandidate(RHS) && isSetCCCandidate(LHS)))) { | |||
36402 | ORNodes.push_back(OR); | |||
36403 | OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode(); | |||
36404 | LHS = OR->getOperand(0); | |||
36405 | RHS = OR->getOperand(1); | |||
36406 | } | |||
36407 | ||||
36408 | // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)). | |||
36409 | if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) || | |||
36410 | !isORCandidate(SDValue(OR, 0))) | |||
36411 | return SDValue(); | |||
36412 | ||||
36413 | // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it | |||
36414 | // to | |||
36415 | // or(srl(ctlz),srl(ctlz)). | |||
36416 | // The dag combiner can then fold it into: | |||
36417 | // srl(or(ctlz, ctlz)). | |||
36418 | EVT VT = OR->getValueType(0); | |||
36419 | SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG); | |||
36420 | SDValue Ret, NewRHS; | |||
36421 | if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG))) | |||
36422 | Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS); | |||
36423 | ||||
36424 | if (!Ret) | |||
36425 | return SDValue(); | |||
36426 | ||||
36427 | // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern. | |||
36428 | while (ORNodes.size() > 0) { | |||
36429 | OR = ORNodes.pop_back_val(); | |||
36430 | LHS = OR->getOperand(0); | |||
36431 | RHS = OR->getOperand(1); | |||
36432 | // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or). | |||
36433 | if (RHS->getOpcode() == ISD::OR) | |||
36434 | std::swap(LHS, RHS); | |||
36435 | EVT VT = OR->getValueType(0); | |||
36436 | SDValue NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG); | |||
36437 | if (!NewRHS) | |||
36438 | return SDValue(); | |||
36439 | Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS); | |||
36440 | } | |||
36441 | ||||
36442 | if (Ret) | |||
36443 | Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret); | |||
36444 | ||||
36445 | return Ret; | |||
36446 | } | |||
36447 | ||||
36448 | static SDValue combineOr(SDNode *N, SelectionDAG &DAG, | |||
36449 | TargetLowering::DAGCombinerInfo &DCI, | |||
36450 | const X86Subtarget &Subtarget) { | |||
36451 | SDValue N0 = N->getOperand(0); | |||
36452 | SDValue N1 = N->getOperand(1); | |||
36453 | EVT VT = N->getValueType(0); | |||
36454 | ||||
36455 | // If this is SSE1 only convert to FOR to avoid scalarization. | |||
36456 | if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) { | |||
36457 | return DAG.getBitcast(MVT::v4i32, | |||
36458 | DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32, | |||
36459 | DAG.getBitcast(MVT::v4f32, N0), | |||
36460 | DAG.getBitcast(MVT::v4f32, N1))); | |||
36461 | } | |||
36462 | ||||
36463 | if (DCI.isBeforeLegalizeOps()) | |||
36464 | return SDValue(); | |||
36465 | ||||
36466 | if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget)) | |||
36467 | return R; | |||
36468 | ||||
36469 | if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget)) | |||
36470 | return FPLogic; | |||
36471 | ||||
36472 | if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget)) | |||
36473 | return R; | |||
36474 | ||||
36475 | // Attempt to recursively combine an OR of shuffles. | |||
36476 | if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) { | |||
36477 | SDValue Op(N, 0); | |||
36478 | if (SDValue Res = combineX86ShufflesRecursively( | |||
36479 | {Op}, 0, Op, {0}, {}, /*Depth*/ 1, | |||
36480 | /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget)) | |||
36481 | return Res; | |||
36482 | } | |||
36483 | ||||
36484 | if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) | |||
36485 | return SDValue(); | |||
36486 | ||||
36487 | // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c) | |||
36488 | bool OptForSize = DAG.getMachineFunction().getFunction().optForSize(); | |||
36489 | ||||
36490 | // SHLD/SHRD instructions have lower register pressure, but on some | |||
36491 | // platforms they have higher latency than the equivalent | |||
36492 | // series of shifts/or that would otherwise be generated. | |||
36493 | // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions | |||
36494 | // have higher latencies and we are not optimizing for size. | |||
36495 | if (!OptForSize && Subtarget.isSHLDSlow()) | |||
36496 | return SDValue(); | |||
36497 | ||||
36498 | if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) | |||
36499 | std::swap(N0, N1); | |||
36500 | if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) | |||
36501 | return SDValue(); | |||
36502 | if (!N0.hasOneUse() || !N1.hasOneUse()) | |||
36503 | return SDValue(); | |||
36504 | ||||
36505 | SDValue ShAmt0 = N0.getOperand(1); | |||
36506 | if (ShAmt0.getValueType() != MVT::i8) | |||
36507 | return SDValue(); | |||
36508 | SDValue ShAmt1 = N1.getOperand(1); | |||
36509 | if (ShAmt1.getValueType() != MVT::i8) | |||
36510 | return SDValue(); | |||
36511 | if (ShAmt0.getOpcode() == ISD::TRUNCATE) | |||
36512 | ShAmt0 = ShAmt0.getOperand(0); | |||
36513 | if (ShAmt1.getOpcode() == ISD::TRUNCATE) | |||
36514 | ShAmt1 = ShAmt1.getOperand(0); | |||
36515 | ||||
36516 | SDLoc DL(N); | |||
36517 | unsigned Opc = X86ISD::SHLD; | |||
36518 | SDValue Op0 = N0.getOperand(0); | |||
36519 | SDValue Op1 = N1.getOperand(0); | |||
36520 | if (ShAmt0.getOpcode() == ISD::SUB || | |||
36521 | ShAmt0.getOpcode() == ISD::XOR) { | |||
36522 | Opc = X86ISD::SHRD; | |||
36523 | std::swap(Op0, Op1); | |||
36524 | std::swap(ShAmt0, ShAmt1); | |||
36525 | } | |||
36526 | ||||
36527 | // OR( SHL( X, C ), SRL( Y, 32 - C ) ) -> SHLD( X, Y, C ) | |||
36528 | // OR( SRL( X, C ), SHL( Y, 32 - C ) ) -> SHRD( X, Y, C ) | |||
36529 | // OR( SHL( X, C ), SRL( SRL( Y, 1 ), XOR( C, 31 ) ) ) -> SHLD( X, Y, C ) | |||
36530 | // OR( SRL( X, C ), SHL( SHL( Y, 1 ), XOR( C, 31 ) ) ) -> SHRD( X, Y, C ) | |||
36531 | unsigned Bits = VT.getScalarSizeInBits(); | |||
36532 | if (ShAmt1.getOpcode() == ISD::SUB) { | |||
36533 | SDValue Sum = ShAmt1.getOperand(0); | |||
36534 | if (auto *SumC = dyn_cast<ConstantSDNode>(Sum)) { | |||
36535 | SDValue ShAmt1Op1 = ShAmt1.getOperand(1); | |||
36536 | if (ShAmt1Op1.getOpcode() == ISD::TRUNCATE) | |||
36537 | ShAmt1Op1 = ShAmt1Op1.getOperand(0); | |||
36538 | if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0) | |||
36539 | return DAG.getNode(Opc, DL, VT, | |||
36540 | Op0, Op1, | |||
36541 | DAG.getNode(ISD::TRUNCATE, DL, | |||
36542 | MVT::i8, ShAmt0)); | |||
36543 | } | |||
36544 | } else if (auto *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) { | |||
36545 | auto *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0); | |||
36546 | if (ShAmt0C && (ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue()) == Bits) | |||
36547 | return DAG.getNode(Opc, DL, VT, | |||
36548 | N0.getOperand(0), N1.getOperand(0), | |||
36549 | DAG.getNode(ISD::TRUNCATE, DL, | |||
36550 | MVT::i8, ShAmt0)); | |||
36551 | } else if (ShAmt1.getOpcode() == ISD::XOR) { | |||
36552 | SDValue Mask = ShAmt1.getOperand(1); | |||
36553 | if (auto *MaskC = dyn_cast<ConstantSDNode>(Mask)) { | |||
36554 | unsigned InnerShift = (X86ISD::SHLD == Opc ? ISD::SRL : ISD::SHL); | |||
36555 | SDValue ShAmt1Op0 = ShAmt1.getOperand(0); | |||
36556 | if (ShAmt1Op0.getOpcode() == ISD::TRUNCATE) | |||
36557 | ShAmt1Op0 = ShAmt1Op0.getOperand(0); | |||
36558 | if (MaskC->getSExtValue() == (Bits - 1) && ShAmt1Op0 == ShAmt0) { | |||
36559 | if (Op1.getOpcode() == InnerShift && | |||
36560 | isa<ConstantSDNode>(Op1.getOperand(1)) && | |||
36561 | Op1.getConstantOperandVal(1) == 1) { | |||
36562 | return DAG.getNode(Opc, DL, VT, Op0, Op1.getOperand(0), | |||
36563 | DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0)); | |||
36564 | } | |||
36565 | // Test for ADD( Y, Y ) as an equivalent to SHL( Y, 1 ). | |||
36566 | if (InnerShift == ISD::SHL && Op1.getOpcode() == ISD::ADD && | |||
36567 | Op1.getOperand(0) == Op1.getOperand(1)) { | |||
36568 | return DAG.getNode(Opc, DL, VT, Op0, Op1.getOperand(0), | |||
36569 | DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ShAmt0)); | |||
36570 | } | |||
36571 | } | |||
36572 | } | |||
36573 | } | |||
36574 | ||||
36575 | return SDValue(); | |||
36576 | } | |||
36577 | ||||
36578 | /// Try to turn tests against the signbit in the form of: | |||
36579 | /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1) | |||
36580 | /// into: | |||
36581 | /// SETGT(X, -1) | |||
36582 | static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) { | |||
36583 | // This is only worth doing if the output type is i8 or i1. | |||
36584 | EVT ResultType = N->getValueType(0); | |||
36585 | if (ResultType != MVT::i8 && ResultType != MVT::i1) | |||
36586 | return SDValue(); | |||
36587 | ||||
36588 | SDValue N0 = N->getOperand(0); | |||
36589 | SDValue N1 = N->getOperand(1); | |||
36590 | ||||
36591 | // We should be performing an xor against a truncated shift. | |||
36592 | if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse()) | |||
36593 | return SDValue(); | |||
36594 | ||||
36595 | // Make sure we are performing an xor against one. | |||
36596 | if (!isOneConstant(N1)) | |||
36597 | return SDValue(); | |||
36598 | ||||
36599 | // SetCC on x86 zero extends so only act on this if it's a logical shift. | |||
36600 | SDValue Shift = N0.getOperand(0); | |||
36601 | if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse()) | |||
36602 | return SDValue(); | |||
36603 | ||||
36604 | // Make sure we are truncating from one of i16, i32 or i64. | |||
36605 | EVT ShiftTy = Shift.getValueType(); | |||
36606 | if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64) | |||
36607 | return SDValue(); | |||
36608 | ||||
36609 | // Make sure the shift amount extracts the sign bit. | |||
36610 | if (!isa<ConstantSDNode>(Shift.getOperand(1)) || | |||
36611 | Shift.getConstantOperandVal(1) != ShiftTy.getSizeInBits() - 1) | |||
36612 | return SDValue(); | |||
36613 | ||||
36614 | // Create a greater-than comparison against -1. | |||
36615 | // N.B. Using SETGE against 0 works but we want a canonical looking | |||
36616 | // comparison, using SETGT matches up with what TranslateX86CC. | |||
36617 | SDLoc DL(N); | |||
36618 | SDValue ShiftOp = Shift.getOperand(0); | |||
36619 | EVT ShiftOpTy = ShiftOp.getValueType(); | |||
36620 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
36621 | EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(), | |||
36622 | *DAG.getContext(), ResultType); | |||
36623 | SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp, | |||
36624 | DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT); | |||
36625 | if (SetCCResultType != ResultType) | |||
36626 | Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond); | |||
36627 | return Cond; | |||
36628 | } | |||
36629 | ||||
36630 | /// Turn vector tests of the signbit in the form of: | |||
36631 | /// xor (sra X, elt_size(X)-1), -1 | |||
36632 | /// into: | |||
36633 | /// pcmpgt X, -1 | |||
36634 | /// | |||
36635 | /// This should be called before type legalization because the pattern may not | |||
36636 | /// persist after that. | |||
36637 | static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, | |||
36638 | const X86Subtarget &Subtarget) { | |||
36639 | EVT VT = N->getValueType(0); | |||
36640 | if (!VT.isSimple()) | |||
36641 | return SDValue(); | |||
36642 | ||||
36643 | switch (VT.getSimpleVT().SimpleTy) { | |||
36644 | default: return SDValue(); | |||
36645 | case MVT::v16i8: | |||
36646 | case MVT::v8i16: | |||
36647 | case MVT::v4i32: if (!Subtarget.hasSSE2()) return SDValue(); break; | |||
36648 | case MVT::v2i64: if (!Subtarget.hasSSE42()) return SDValue(); break; | |||
36649 | case MVT::v32i8: | |||
36650 | case MVT::v16i16: | |||
36651 | case MVT::v8i32: | |||
36652 | case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break; | |||
36653 | } | |||
36654 | ||||
36655 | // There must be a shift right algebraic before the xor, and the xor must be a | |||
36656 | // 'not' operation. | |||
36657 | SDValue Shift = N->getOperand(0); | |||
36658 | SDValue Ones = N->getOperand(1); | |||
36659 | if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() || | |||
36660 | !ISD::isBuildVectorAllOnes(Ones.getNode())) | |||
36661 | return SDValue(); | |||
36662 | ||||
36663 | // The shift should be smearing the sign bit across each vector element. | |||
36664 | auto *ShiftBV = dyn_cast<BuildVectorSDNode>(Shift.getOperand(1)); | |||
36665 | if (!ShiftBV) | |||
36666 | return SDValue(); | |||
36667 | ||||
36668 | EVT ShiftEltTy = Shift.getValueType().getVectorElementType(); | |||
36669 | auto *ShiftAmt = ShiftBV->getConstantSplatNode(); | |||
36670 | if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1) | |||
36671 | return SDValue(); | |||
36672 | ||||
36673 | // Create a greater-than comparison against -1. We don't use the more obvious | |||
36674 | // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction. | |||
36675 | return DAG.getNode(X86ISD::PCMPGT, SDLoc(N), VT, Shift.getOperand(0), Ones); | |||
36676 | } | |||
36677 | ||||
36678 | /// Check if truncation with saturation form type \p SrcVT to \p DstVT | |||
36679 | /// is valid for the given \p Subtarget. | |||
36680 | static bool isSATValidOnAVX512Subtarget(EVT SrcVT, EVT DstVT, | |||
36681 | const X86Subtarget &Subtarget) { | |||
36682 | if (!Subtarget.hasAVX512()) | |||
36683 | return false; | |||
36684 | ||||
36685 | // FIXME: Scalar type may be supported if we move it to vector register. | |||
36686 | if (!SrcVT.isVector()) | |||
36687 | return false; | |||
36688 | ||||
36689 | EVT SrcElVT = SrcVT.getScalarType(); | |||
36690 | EVT DstElVT = DstVT.getScalarType(); | |||
36691 | if (DstElVT != MVT::i8 && DstElVT != MVT::i16 && DstElVT != MVT::i32) | |||
36692 | return false; | |||
36693 | if (SrcVT.is512BitVector() || Subtarget.hasVLX()) | |||
36694 | return SrcElVT.getSizeInBits() >= 32 || Subtarget.hasBWI(); | |||
36695 | return false; | |||
36696 | } | |||
36697 | ||||
36698 | /// Detect patterns of truncation with unsigned saturation: | |||
36699 | /// | |||
36700 | /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type). | |||
36701 | /// Return the source value x to be truncated or SDValue() if the pattern was | |||
36702 | /// not matched. | |||
36703 | /// | |||
36704 | /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type), | |||
36705 | /// where C1 >= 0 and C2 is unsigned max of destination type. | |||
36706 | /// | |||
36707 | /// (truncate (smax (smin (x, C2), C1)) to dest_type) | |||
36708 | /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2. | |||
36709 | /// | |||
36710 | /// These two patterns are equivalent to: | |||
36711 | /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type) | |||
36712 | /// So return the smax(x, C1) value to be truncated or SDValue() if the | |||
36713 | /// pattern was not matched. | |||
36714 | static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG, | |||
36715 | const SDLoc &DL) { | |||
36716 | EVT InVT = In.getValueType(); | |||
36717 | ||||
36718 | // Saturation with truncation. We truncate from InVT to VT. | |||
36719 | assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&((InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() && "Unexpected types for truncate operation") ? static_cast< void> (0) : __assert_fail ("InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() && \"Unexpected types for truncate operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 36720, __PRETTY_FUNCTION__)) | |||
36720 | "Unexpected types for truncate operation")((InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() && "Unexpected types for truncate operation") ? static_cast< void> (0) : __assert_fail ("InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() && \"Unexpected types for truncate operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 36720, __PRETTY_FUNCTION__)); | |||
36721 | ||||
36722 | // Match min/max and return limit value as a parameter. | |||
36723 | auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue { | |||
36724 | if (V.getOpcode() == Opcode && | |||
36725 | ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit)) | |||
36726 | return V.getOperand(0); | |||
36727 | return SDValue(); | |||
36728 | }; | |||
36729 | ||||
36730 | APInt C1, C2; | |||
36731 | if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2)) | |||
36732 | // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according | |||
36733 | // the element size of the destination type. | |||
36734 | if (C2.isMask(VT.getScalarSizeInBits())) | |||
36735 | return UMin; | |||
36736 | ||||
36737 | if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2)) | |||
36738 | if (MatchMinMax(SMin, ISD::SMAX, C1)) | |||
36739 | if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits())) | |||
36740 | return SMin; | |||
36741 | ||||
36742 | if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1)) | |||
36743 | if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2)) | |||
36744 | if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) && | |||
36745 | C2.uge(C1)) { | |||
36746 | return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1)); | |||
36747 | } | |||
36748 | ||||
36749 | return SDValue(); | |||
36750 | } | |||
36751 | ||||
36752 | /// Detect patterns of truncation with signed saturation: | |||
36753 | /// (truncate (smin ((smax (x, signed_min_of_dest_type)), | |||
36754 | /// signed_max_of_dest_type)) to dest_type) | |||
36755 | /// or: | |||
36756 | /// (truncate (smax ((smin (x, signed_max_of_dest_type)), | |||
36757 | /// signed_min_of_dest_type)) to dest_type). | |||
36758 | /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type]. | |||
36759 | /// Return the source value to be truncated or SDValue() if the pattern was not | |||
36760 | /// matched. | |||
36761 | static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) { | |||
36762 | unsigned NumDstBits = VT.getScalarSizeInBits(); | |||
36763 | unsigned NumSrcBits = In.getScalarValueSizeInBits(); | |||
36764 | assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation")((NumSrcBits > NumDstBits && "Unexpected types for truncate operation" ) ? static_cast<void> (0) : __assert_fail ("NumSrcBits > NumDstBits && \"Unexpected types for truncate operation\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 36764, __PRETTY_FUNCTION__)); | |||
36765 | ||||
36766 | auto MatchMinMax = [](SDValue V, unsigned Opcode, | |||
36767 | const APInt &Limit) -> SDValue { | |||
36768 | APInt C; | |||
36769 | if (V.getOpcode() == Opcode && | |||
36770 | ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit) | |||
36771 | return V.getOperand(0); | |||
36772 | return SDValue(); | |||
36773 | }; | |||
36774 | ||||
36775 | APInt SignedMax, SignedMin; | |||
36776 | if (MatchPackUS) { | |||
36777 | SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits); | |||
36778 | SignedMin = APInt(NumSrcBits, 0); | |||
36779 | } else { | |||
36780 | SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits); | |||
36781 | SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits); | |||
36782 | } | |||
36783 | ||||
36784 | if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax)) | |||
36785 | if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin)) | |||
36786 | return SMax; | |||
36787 | ||||
36788 | if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin)) | |||
36789 | if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax)) | |||
36790 | return SMin; | |||
36791 | ||||
36792 | return SDValue(); | |||
36793 | } | |||
36794 | ||||
36795 | /// Detect a pattern of truncation with signed saturation. | |||
36796 | /// The types should allow to use VPMOVSS* instruction on AVX512. | |||
36797 | /// Return the source value to be truncated or SDValue() if the pattern was not | |||
36798 | /// matched. | |||
36799 | static SDValue detectAVX512SSatPattern(SDValue In, EVT VT, | |||
36800 | const X86Subtarget &Subtarget, | |||
36801 | const TargetLowering &TLI) { | |||
36802 | if (!TLI.isTypeLegal(In.getValueType())) | |||
36803 | return SDValue(); | |||
36804 | if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget)) | |||
36805 | return SDValue(); | |||
36806 | return detectSSatPattern(In, VT); | |||
36807 | } | |||
36808 | ||||
36809 | /// Detect a pattern of truncation with saturation: | |||
36810 | /// (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type). | |||
36811 | /// The types should allow to use VPMOVUS* instruction on AVX512. | |||
36812 | /// Return the source value to be truncated or SDValue() if the pattern was not | |||
36813 | /// matched. | |||
36814 | static SDValue detectAVX512USatPattern(SDValue In, EVT VT, SelectionDAG &DAG, | |||
36815 | const SDLoc &DL, | |||
36816 | const X86Subtarget &Subtarget, | |||
36817 | const TargetLowering &TLI) { | |||
36818 | if (!TLI.isTypeLegal(In.getValueType())) | |||
36819 | return SDValue(); | |||
36820 | if (!isSATValidOnAVX512Subtarget(In.getValueType(), VT, Subtarget)) | |||
36821 | return SDValue(); | |||
36822 | return detectUSatPattern(In, VT, DAG, DL); | |||
36823 | } | |||
36824 | ||||
36825 | static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL, | |||
36826 | SelectionDAG &DAG, | |||
36827 | const X86Subtarget &Subtarget) { | |||
36828 | EVT SVT = VT.getScalarType(); | |||
36829 | EVT InVT = In.getValueType(); | |||
36830 | EVT InSVT = InVT.getScalarType(); | |||
36831 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
36832 | if (TLI.isTypeLegal(InVT) && TLI.isTypeLegal(VT) && | |||
36833 | isSATValidOnAVX512Subtarget(InVT, VT, Subtarget)) { | |||
36834 | if (auto SSatVal = detectSSatPattern(In, VT)) | |||
36835 | return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal); | |||
36836 | if (auto USatVal = detectUSatPattern(In, VT, DAG, DL)) | |||
36837 | return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal); | |||
36838 | } | |||
36839 | if (VT.isVector() && isPowerOf2_32(VT.getVectorNumElements()) && | |||
36840 | (SVT == MVT::i8 || SVT == MVT::i16) && | |||
36841 | (InSVT == MVT::i16 || InSVT == MVT::i32)) { | |||
36842 | if (auto USatVal = detectSSatPattern(In, VT, true)) { | |||
36843 | // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW). | |||
36844 | if (SVT == MVT::i8 && InSVT == MVT::i32) { | |||
36845 | EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, | |||
36846 | VT.getVectorNumElements()); | |||
36847 | SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL, | |||
36848 | DAG, Subtarget); | |||
36849 | if (Mid) | |||
36850 | return truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG, | |||
36851 | Subtarget); | |||
36852 | } else if (SVT == MVT::i8 || Subtarget.hasSSE41()) | |||
36853 | return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG, | |||
36854 | Subtarget); | |||
36855 | } | |||
36856 | if (auto SSatVal = detectSSatPattern(In, VT)) | |||
36857 | return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG, | |||
36858 | Subtarget); | |||
36859 | } | |||
36860 | return SDValue(); | |||
36861 | } | |||
36862 | ||||
36863 | /// This function detects the AVG pattern between vectors of unsigned i8/i16, | |||
36864 | /// which is c = (a + b + 1) / 2, and replace this operation with the efficient | |||
36865 | /// X86ISD::AVG instruction. | |||
36866 | static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG, | |||
36867 | const X86Subtarget &Subtarget, | |||
36868 | const SDLoc &DL) { | |||
36869 | if (!VT.isVector()) | |||
36870 | return SDValue(); | |||
36871 | EVT InVT = In.getValueType(); | |||
36872 | unsigned NumElems = VT.getVectorNumElements(); | |||
36873 | ||||
36874 | EVT ScalarVT = VT.getVectorElementType(); | |||
36875 | if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) && | |||
36876 | NumElems >= 2 && isPowerOf2_32(NumElems))) | |||
36877 | return SDValue(); | |||
36878 | ||||
36879 | // InScalarVT is the intermediate type in AVG pattern and it should be greater | |||
36880 | // than the original input type (i8/i16). | |||
36881 | EVT InScalarVT = InVT.getVectorElementType(); | |||
36882 | if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits()) | |||
36883 | return SDValue(); | |||
36884 | ||||
36885 | if (!Subtarget.hasSSE2()) | |||
36886 | return SDValue(); | |||
36887 | ||||
36888 | // Detect the following pattern: | |||
36889 | // | |||
36890 | // %1 = zext <N x i8> %a to <N x i32> | |||
36891 | // %2 = zext <N x i8> %b to <N x i32> | |||
36892 | // %3 = add nuw nsw <N x i32> %1, <i32 1 x N> | |||
36893 | // %4 = add nuw nsw <N x i32> %3, %2 | |||
36894 | // %5 = lshr <N x i32> %N, <i32 1 x N> | |||
36895 | // %6 = trunc <N x i32> %5 to <N x i8> | |||
36896 | // | |||
36897 | // In AVX512, the last instruction can also be a trunc store. | |||
36898 | if (In.getOpcode() != ISD::SRL) | |||
36899 | return SDValue(); | |||
36900 | ||||
36901 | // A lambda checking the given SDValue is a constant vector and each element | |||
36902 | // is in the range [Min, Max]. | |||
36903 | auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) { | |||
36904 | BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(V); | |||
36905 | if (!BV || !BV->isConstant()) | |||
36906 | return false; | |||
36907 | for (SDValue Op : V->ops()) { | |||
36908 | ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); | |||
36909 | if (!C) | |||
36910 | return false; | |||
36911 | const APInt &Val = C->getAPIntValue(); | |||
36912 | if (Val.ult(Min) || Val.ugt(Max)) | |||
36913 | return false; | |||
36914 | } | |||
36915 | return true; | |||
36916 | }; | |||
36917 | ||||
36918 | // Check if each element of the vector is left-shifted by one. | |||
36919 | auto LHS = In.getOperand(0); | |||
36920 | auto RHS = In.getOperand(1); | |||
36921 | if (!IsConstVectorInRange(RHS, 1, 1)) | |||
36922 | return SDValue(); | |||
36923 | if (LHS.getOpcode() != ISD::ADD) | |||
36924 | return SDValue(); | |||
36925 | ||||
36926 | // Detect a pattern of a + b + 1 where the order doesn't matter. | |||
36927 | SDValue Operands[3]; | |||
36928 | Operands[0] = LHS.getOperand(0); | |||
36929 | Operands[1] = LHS.getOperand(1); | |||
36930 | ||||
36931 | auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
36932 | ArrayRef<SDValue> Ops) { | |||
36933 | return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops); | |||
36934 | }; | |||
36935 | ||||
36936 | // Take care of the case when one of the operands is a constant vector whose | |||
36937 | // element is in the range [1, 256]. | |||
36938 | if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) && | |||
36939 | Operands[0].getOpcode() == ISD::ZERO_EXTEND && | |||
36940 | Operands[0].getOperand(0).getValueType() == VT) { | |||
36941 | // The pattern is detected. Subtract one from the constant vector, then | |||
36942 | // demote it and emit X86ISD::AVG instruction. | |||
36943 | SDValue VecOnes = DAG.getConstant(1, DL, InVT); | |||
36944 | Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes); | |||
36945 | Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]); | |||
36946 | return SplitOpsAndApply(DAG, Subtarget, DL, VT, | |||
36947 | { Operands[0].getOperand(0), Operands[1] }, | |||
36948 | AVGBuilder); | |||
36949 | } | |||
36950 | ||||
36951 | if (Operands[0].getOpcode() == ISD::ADD) | |||
36952 | std::swap(Operands[0], Operands[1]); | |||
36953 | else if (Operands[1].getOpcode() != ISD::ADD) | |||
36954 | return SDValue(); | |||
36955 | Operands[2] = Operands[1].getOperand(0); | |||
36956 | Operands[1] = Operands[1].getOperand(1); | |||
36957 | ||||
36958 | // Now we have three operands of two additions. Check that one of them is a | |||
36959 | // constant vector with ones, and the other two are promoted from i8/i16. | |||
36960 | for (int i = 0; i < 3; ++i) { | |||
36961 | if (!IsConstVectorInRange(Operands[i], 1, 1)) | |||
36962 | continue; | |||
36963 | std::swap(Operands[i], Operands[2]); | |||
36964 | ||||
36965 | // Check if Operands[0] and Operands[1] are results of type promotion. | |||
36966 | for (int j = 0; j < 2; ++j) | |||
36967 | if (Operands[j].getOpcode() != ISD::ZERO_EXTEND || | |||
36968 | Operands[j].getOperand(0).getValueType() != VT) | |||
36969 | return SDValue(); | |||
36970 | ||||
36971 | // The pattern is detected, emit X86ISD::AVG instruction(s). | |||
36972 | return SplitOpsAndApply(DAG, Subtarget, DL, VT, | |||
36973 | { Operands[0].getOperand(0), | |||
36974 | Operands[1].getOperand(0) }, AVGBuilder); | |||
36975 | } | |||
36976 | ||||
36977 | return SDValue(); | |||
36978 | } | |||
36979 | ||||
36980 | static SDValue combineLoad(SDNode *N, SelectionDAG &DAG, | |||
36981 | TargetLowering::DAGCombinerInfo &DCI, | |||
36982 | const X86Subtarget &Subtarget) { | |||
36983 | LoadSDNode *Ld = cast<LoadSDNode>(N); | |||
36984 | EVT RegVT = Ld->getValueType(0); | |||
36985 | EVT MemVT = Ld->getMemoryVT(); | |||
36986 | SDLoc dl(Ld); | |||
36987 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
36988 | ||||
36989 | // For chips with slow 32-byte unaligned loads, break the 32-byte operation | |||
36990 | // into two 16-byte operations. Also split non-temporal aligned loads on | |||
36991 | // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads. | |||
36992 | ISD::LoadExtType Ext = Ld->getExtensionType(); | |||
36993 | bool Fast; | |||
36994 | unsigned AddressSpace = Ld->getAddressSpace(); | |||
36995 | unsigned Alignment = Ld->getAlignment(); | |||
36996 | if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() && | |||
36997 | Ext == ISD::NON_EXTLOAD && | |||
36998 | ((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) || | |||
36999 | (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT, | |||
37000 | AddressSpace, Alignment, &Fast) && !Fast))) { | |||
37001 | unsigned NumElems = RegVT.getVectorNumElements(); | |||
37002 | if (NumElems < 2) | |||
37003 | return SDValue(); | |||
37004 | ||||
37005 | SDValue Ptr = Ld->getBasePtr(); | |||
37006 | ||||
37007 | EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), | |||
37008 | NumElems/2); | |||
37009 | SDValue Load1 = | |||
37010 | DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(), | |||
37011 | Alignment, Ld->getMemOperand()->getFlags()); | |||
37012 | ||||
37013 | Ptr = DAG.getMemBasePlusOffset(Ptr, 16, dl); | |||
37014 | SDValue Load2 = | |||
37015 | DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr, | |||
37016 | Ld->getPointerInfo().getWithOffset(16), | |||
37017 | MinAlign(Alignment, 16U), Ld->getMemOperand()->getFlags()); | |||
37018 | SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, | |||
37019 | Load1.getValue(1), | |||
37020 | Load2.getValue(1)); | |||
37021 | ||||
37022 | SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2); | |||
37023 | return DCI.CombineTo(N, NewVec, TF, true); | |||
37024 | } | |||
37025 | ||||
37026 | return SDValue(); | |||
37027 | } | |||
37028 | ||||
37029 | /// If V is a build vector of boolean constants and exactly one of those | |||
37030 | /// constants is true, return the operand index of that true element. | |||
37031 | /// Otherwise, return -1. | |||
37032 | static int getOneTrueElt(SDValue V) { | |||
37033 | // This needs to be a build vector of booleans. | |||
37034 | // TODO: Checking for the i1 type matches the IR definition for the mask, | |||
37035 | // but the mask check could be loosened to i8 or other types. That might | |||
37036 | // also require checking more than 'allOnesValue'; eg, the x86 HW | |||
37037 | // instructions only require that the MSB is set for each mask element. | |||
37038 | // The ISD::MSTORE comments/definition do not specify how the mask operand | |||
37039 | // is formatted. | |||
37040 | auto *BV = dyn_cast<BuildVectorSDNode>(V); | |||
37041 | if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1) | |||
37042 | return -1; | |||
37043 | ||||
37044 | int TrueIndex = -1; | |||
37045 | unsigned NumElts = BV->getValueType(0).getVectorNumElements(); | |||
37046 | for (unsigned i = 0; i < NumElts; ++i) { | |||
37047 | const SDValue &Op = BV->getOperand(i); | |||
37048 | if (Op.isUndef()) | |||
37049 | continue; | |||
37050 | auto *ConstNode = dyn_cast<ConstantSDNode>(Op); | |||
37051 | if (!ConstNode) | |||
37052 | return -1; | |||
37053 | if (ConstNode->getAPIntValue().isAllOnesValue()) { | |||
37054 | // If we already found a one, this is too many. | |||
37055 | if (TrueIndex >= 0) | |||
37056 | return -1; | |||
37057 | TrueIndex = i; | |||
37058 | } | |||
37059 | } | |||
37060 | return TrueIndex; | |||
37061 | } | |||
37062 | ||||
37063 | /// Given a masked memory load/store operation, return true if it has one mask | |||
37064 | /// bit set. If it has one mask bit set, then also return the memory address of | |||
37065 | /// the scalar element to load/store, the vector index to insert/extract that | |||
37066 | /// scalar element, and the alignment for the scalar memory access. | |||
37067 | static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp, | |||
37068 | SelectionDAG &DAG, SDValue &Addr, | |||
37069 | SDValue &Index, unsigned &Alignment) { | |||
37070 | int TrueMaskElt = getOneTrueElt(MaskedOp->getMask()); | |||
37071 | if (TrueMaskElt < 0) | |||
37072 | return false; | |||
37073 | ||||
37074 | // Get the address of the one scalar element that is specified by the mask | |||
37075 | // using the appropriate offset from the base pointer. | |||
37076 | EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType(); | |||
37077 | Addr = MaskedOp->getBasePtr(); | |||
37078 | if (TrueMaskElt != 0) { | |||
37079 | unsigned Offset = TrueMaskElt * EltVT.getStoreSize(); | |||
37080 | Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp)); | |||
37081 | } | |||
37082 | ||||
37083 | Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp)); | |||
37084 | Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize()); | |||
37085 | return true; | |||
37086 | } | |||
37087 | ||||
37088 | /// If exactly one element of the mask is set for a non-extending masked load, | |||
37089 | /// it is a scalar load and vector insert. | |||
37090 | /// Note: It is expected that the degenerate cases of an all-zeros or all-ones | |||
37091 | /// mask have already been optimized in IR, so we don't bother with those here. | |||
37092 | static SDValue | |||
37093 | reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG, | |||
37094 | TargetLowering::DAGCombinerInfo &DCI) { | |||
37095 | // TODO: This is not x86-specific, so it could be lifted to DAGCombiner. | |||
37096 | // However, some target hooks may need to be added to know when the transform | |||
37097 | // is profitable. Endianness would also have to be considered. | |||
37098 | ||||
37099 | SDValue Addr, VecIndex; | |||
37100 | unsigned Alignment; | |||
37101 | if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment)) | |||
37102 | return SDValue(); | |||
37103 | ||||
37104 | // Load the one scalar element that is specified by the mask using the | |||
37105 | // appropriate offset from the base pointer. | |||
37106 | SDLoc DL(ML); | |||
37107 | EVT VT = ML->getValueType(0); | |||
37108 | EVT EltVT = VT.getVectorElementType(); | |||
37109 | SDValue Load = | |||
37110 | DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(), | |||
37111 | Alignment, ML->getMemOperand()->getFlags()); | |||
37112 | ||||
37113 | // Insert the loaded element into the appropriate place in the vector. | |||
37114 | SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, | |||
37115 | ML->getPassThru(), Load, VecIndex); | |||
37116 | return DCI.CombineTo(ML, Insert, Load.getValue(1), true); | |||
37117 | } | |||
37118 | ||||
37119 | static SDValue | |||
37120 | combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG, | |||
37121 | TargetLowering::DAGCombinerInfo &DCI) { | |||
37122 | if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode())) | |||
37123 | return SDValue(); | |||
37124 | ||||
37125 | SDLoc DL(ML); | |||
37126 | EVT VT = ML->getValueType(0); | |||
37127 | ||||
37128 | // If we are loading the first and last elements of a vector, it is safe and | |||
37129 | // always faster to load the whole vector. Replace the masked load with a | |||
37130 | // vector load and select. | |||
37131 | unsigned NumElts = VT.getVectorNumElements(); | |||
37132 | BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask()); | |||
37133 | bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0)); | |||
37134 | bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1)); | |||
37135 | if (LoadFirstElt && LoadLastElt) { | |||
37136 | SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(), | |||
37137 | ML->getMemOperand()); | |||
37138 | SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd, | |||
37139 | ML->getPassThru()); | |||
37140 | return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true); | |||
37141 | } | |||
37142 | ||||
37143 | // Convert a masked load with a constant mask into a masked load and a select. | |||
37144 | // This allows the select operation to use a faster kind of select instruction | |||
37145 | // (for example, vblendvps -> vblendps). | |||
37146 | ||||
37147 | // Don't try this if the pass-through operand is already undefined. That would | |||
37148 | // cause an infinite loop because that's what we're about to create. | |||
37149 | if (ML->getPassThru().isUndef()) | |||
37150 | return SDValue(); | |||
37151 | ||||
37152 | // The new masked load has an undef pass-through operand. The select uses the | |||
37153 | // original pass-through operand. | |||
37154 | SDValue NewML = DAG.getMaskedLoad(VT, DL, ML->getChain(), ML->getBasePtr(), | |||
37155 | ML->getMask(), DAG.getUNDEF(VT), | |||
37156 | ML->getMemoryVT(), ML->getMemOperand(), | |||
37157 | ML->getExtensionType()); | |||
37158 | SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML, | |||
37159 | ML->getPassThru()); | |||
37160 | ||||
37161 | return DCI.CombineTo(ML, Blend, NewML.getValue(1), true); | |||
37162 | } | |||
37163 | ||||
37164 | static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG, | |||
37165 | TargetLowering::DAGCombinerInfo &DCI, | |||
37166 | const X86Subtarget &Subtarget) { | |||
37167 | MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N); | |||
37168 | ||||
37169 | // TODO: Expanding load with constant mask may be optimized as well. | |||
37170 | if (Mld->isExpandingLoad()) | |||
37171 | return SDValue(); | |||
37172 | ||||
37173 | if (Mld->getExtensionType() == ISD::NON_EXTLOAD) { | |||
37174 | if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI)) | |||
37175 | return ScalarLoad; | |||
37176 | // TODO: Do some AVX512 subsets benefit from this transform? | |||
37177 | if (!Subtarget.hasAVX512()) | |||
37178 | if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI)) | |||
37179 | return Blend; | |||
37180 | } | |||
37181 | ||||
37182 | if (Mld->getExtensionType() != ISD::SEXTLOAD) | |||
37183 | return SDValue(); | |||
37184 | ||||
37185 | // Resolve extending loads. | |||
37186 | EVT VT = Mld->getValueType(0); | |||
37187 | unsigned NumElems = VT.getVectorNumElements(); | |||
37188 | EVT LdVT = Mld->getMemoryVT(); | |||
37189 | SDLoc dl(Mld); | |||
37190 | ||||
37191 | assert(LdVT != VT && "Cannot extend to the same type")((LdVT != VT && "Cannot extend to the same type") ? static_cast <void> (0) : __assert_fail ("LdVT != VT && \"Cannot extend to the same type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37191, __PRETTY_FUNCTION__)); | |||
37192 | unsigned ToSz = VT.getScalarSizeInBits(); | |||
37193 | unsigned FromSz = LdVT.getScalarSizeInBits(); | |||
37194 | // From/To sizes and ElemCount must be pow of two. | |||
37195 | assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&((isPowerOf2_32(NumElems * FromSz * ToSz) && "Unexpected size for extending masked load" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(NumElems * FromSz * ToSz) && \"Unexpected size for extending masked load\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37196, __PRETTY_FUNCTION__)) | |||
37196 | "Unexpected size for extending masked load")((isPowerOf2_32(NumElems * FromSz * ToSz) && "Unexpected size for extending masked load" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(NumElems * FromSz * ToSz) && \"Unexpected size for extending masked load\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37196, __PRETTY_FUNCTION__)); | |||
37197 | ||||
37198 | unsigned SizeRatio = ToSz / FromSz; | |||
37199 | assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits())((SizeRatio * NumElems * FromSz == VT.getSizeInBits()) ? static_cast <void> (0) : __assert_fail ("SizeRatio * NumElems * FromSz == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37199, __PRETTY_FUNCTION__)); | |||
37200 | ||||
37201 | // Create a type on which we perform the shuffle. | |||
37202 | EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), | |||
37203 | LdVT.getScalarType(), NumElems*SizeRatio); | |||
37204 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits())((WideVecVT.getSizeInBits() == VT.getSizeInBits()) ? static_cast <void> (0) : __assert_fail ("WideVecVT.getSizeInBits() == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37204, __PRETTY_FUNCTION__)); | |||
37205 | ||||
37206 | // Convert PassThru value. | |||
37207 | SDValue WidePassThru = DAG.getBitcast(WideVecVT, Mld->getPassThru()); | |||
37208 | if (!Mld->getPassThru().isUndef()) { | |||
37209 | SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1); | |||
37210 | for (unsigned i = 0; i != NumElems; ++i) | |||
37211 | ShuffleVec[i] = i * SizeRatio; | |||
37212 | ||||
37213 | // Can't shuffle using an illegal type. | |||
37214 | assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&((DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) && "WideVecVT should be legal") ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) && \"WideVecVT should be legal\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37215, __PRETTY_FUNCTION__)) | |||
37215 | "WideVecVT should be legal")((DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) && "WideVecVT should be legal") ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) && \"WideVecVT should be legal\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37215, __PRETTY_FUNCTION__)); | |||
37216 | WidePassThru = DAG.getVectorShuffle(WideVecVT, dl, WidePassThru, | |||
37217 | DAG.getUNDEF(WideVecVT), ShuffleVec); | |||
37218 | } | |||
37219 | ||||
37220 | // Prepare the new mask. | |||
37221 | SDValue NewMask; | |||
37222 | SDValue Mask = Mld->getMask(); | |||
37223 | if (Mask.getValueType() == VT) { | |||
37224 | // Mask and original value have the same type. | |||
37225 | NewMask = DAG.getBitcast(WideVecVT, Mask); | |||
37226 | SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1); | |||
37227 | for (unsigned i = 0; i != NumElems; ++i) | |||
37228 | ShuffleVec[i] = i * SizeRatio; | |||
37229 | for (unsigned i = NumElems; i != NumElems * SizeRatio; ++i) | |||
37230 | ShuffleVec[i] = NumElems * SizeRatio; | |||
37231 | NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask, | |||
37232 | DAG.getConstant(0, dl, WideVecVT), | |||
37233 | ShuffleVec); | |||
37234 | } else { | |||
37235 | assert(Mask.getValueType().getVectorElementType() == MVT::i1)((Mask.getValueType().getVectorElementType() == MVT::i1) ? static_cast <void> (0) : __assert_fail ("Mask.getValueType().getVectorElementType() == MVT::i1" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37235, __PRETTY_FUNCTION__)); | |||
37236 | unsigned WidenNumElts = NumElems*SizeRatio; | |||
37237 | unsigned MaskNumElts = VT.getVectorNumElements(); | |||
37238 | EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, | |||
37239 | WidenNumElts); | |||
37240 | ||||
37241 | unsigned NumConcat = WidenNumElts / MaskNumElts; | |||
37242 | SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType()); | |||
37243 | SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal); | |||
37244 | Ops[0] = Mask; | |||
37245 | NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops); | |||
37246 | } | |||
37247 | ||||
37248 | SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(), | |||
37249 | Mld->getBasePtr(), NewMask, WidePassThru, | |||
37250 | Mld->getMemoryVT(), Mld->getMemOperand(), | |||
37251 | ISD::NON_EXTLOAD); | |||
37252 | SDValue NewVec = getExtendInVec(/*Signed*/true, dl, VT, WideLd, DAG); | |||
37253 | return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true); | |||
37254 | } | |||
37255 | ||||
37256 | /// If exactly one element of the mask is set for a non-truncating masked store, | |||
37257 | /// it is a vector extract and scalar store. | |||
37258 | /// Note: It is expected that the degenerate cases of an all-zeros or all-ones | |||
37259 | /// mask have already been optimized in IR, so we don't bother with those here. | |||
37260 | static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS, | |||
37261 | SelectionDAG &DAG) { | |||
37262 | // TODO: This is not x86-specific, so it could be lifted to DAGCombiner. | |||
37263 | // However, some target hooks may need to be added to know when the transform | |||
37264 | // is profitable. Endianness would also have to be considered. | |||
37265 | ||||
37266 | SDValue Addr, VecIndex; | |||
37267 | unsigned Alignment; | |||
37268 | if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment)) | |||
37269 | return SDValue(); | |||
37270 | ||||
37271 | // Extract the one scalar element that is actually being stored. | |||
37272 | SDLoc DL(MS); | |||
37273 | EVT VT = MS->getValue().getValueType(); | |||
37274 | EVT EltVT = VT.getVectorElementType(); | |||
37275 | SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, | |||
37276 | MS->getValue(), VecIndex); | |||
37277 | ||||
37278 | // Store that element at the appropriate offset from the base pointer. | |||
37279 | return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(), | |||
37280 | Alignment, MS->getMemOperand()->getFlags()); | |||
37281 | } | |||
37282 | ||||
37283 | static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG, | |||
37284 | TargetLowering::DAGCombinerInfo &DCI, | |||
37285 | const X86Subtarget &Subtarget) { | |||
37286 | MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N); | |||
37287 | if (Mst->isCompressingStore()) | |||
37288 | return SDValue(); | |||
37289 | ||||
37290 | EVT VT = Mst->getValue().getValueType(); | |||
37291 | if (!Mst->isTruncatingStore()) { | |||
37292 | if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG)) | |||
37293 | return ScalarStore; | |||
37294 | ||||
37295 | // If the mask value has been legalized to a non-boolean vector, try to | |||
37296 | // simplify ops leading up to it. We only demand the MSB of each lane. | |||
37297 | SDValue Mask = Mst->getMask(); | |||
37298 | if (Mask.getScalarValueSizeInBits() != 1) { | |||
37299 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
37300 | APInt DemandedMask(APInt::getSignMask(VT.getScalarSizeInBits())); | |||
37301 | if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) | |||
37302 | return SDValue(N, 0); | |||
37303 | } | |||
37304 | ||||
37305 | // TODO: AVX512 targets should also be able to simplify something like the | |||
37306 | // pattern above, but that pattern will be different. It will either need to | |||
37307 | // match setcc more generally or match PCMPGTM later (in tablegen?). | |||
37308 | ||||
37309 | return SDValue(); | |||
37310 | } | |||
37311 | ||||
37312 | // Resolve truncating stores. | |||
37313 | unsigned NumElems = VT.getVectorNumElements(); | |||
37314 | EVT StVT = Mst->getMemoryVT(); | |||
37315 | SDLoc dl(Mst); | |||
37316 | ||||
37317 | assert(StVT != VT && "Cannot truncate to the same type")((StVT != VT && "Cannot truncate to the same type") ? static_cast<void> (0) : __assert_fail ("StVT != VT && \"Cannot truncate to the same type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37317, __PRETTY_FUNCTION__)); | |||
37318 | unsigned FromSz = VT.getScalarSizeInBits(); | |||
37319 | unsigned ToSz = StVT.getScalarSizeInBits(); | |||
37320 | ||||
37321 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
37322 | ||||
37323 | // The truncating store is legal in some cases. For example | |||
37324 | // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw | |||
37325 | // are designated for truncate store. | |||
37326 | // In this case we don't need any further transformations. | |||
37327 | if (TLI.isTruncStoreLegal(VT, StVT)) | |||
37328 | return SDValue(); | |||
37329 | ||||
37330 | // From/To sizes and ElemCount must be pow of two. | |||
37331 | assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&((isPowerOf2_32(NumElems * FromSz * ToSz) && "Unexpected size for truncating masked store" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(NumElems * FromSz * ToSz) && \"Unexpected size for truncating masked store\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37332, __PRETTY_FUNCTION__)) | |||
37332 | "Unexpected size for truncating masked store")((isPowerOf2_32(NumElems * FromSz * ToSz) && "Unexpected size for truncating masked store" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(NumElems * FromSz * ToSz) && \"Unexpected size for truncating masked store\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37332, __PRETTY_FUNCTION__)); | |||
37333 | // We are going to use the original vector elt for storing. | |||
37334 | // Accumulated smaller vector elements must be a multiple of the store size. | |||
37335 | assert (((NumElems * FromSz) % ToSz) == 0 &&((((NumElems * FromSz) % ToSz) == 0 && "Unexpected ratio for truncating masked store" ) ? static_cast<void> (0) : __assert_fail ("((NumElems * FromSz) % ToSz) == 0 && \"Unexpected ratio for truncating masked store\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37336, __PRETTY_FUNCTION__)) | |||
37336 | "Unexpected ratio for truncating masked store")((((NumElems * FromSz) % ToSz) == 0 && "Unexpected ratio for truncating masked store" ) ? static_cast<void> (0) : __assert_fail ("((NumElems * FromSz) % ToSz) == 0 && \"Unexpected ratio for truncating masked store\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37336, __PRETTY_FUNCTION__)); | |||
37337 | ||||
37338 | unsigned SizeRatio = FromSz / ToSz; | |||
37339 | assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits())((SizeRatio * NumElems * ToSz == VT.getSizeInBits()) ? static_cast <void> (0) : __assert_fail ("SizeRatio * NumElems * ToSz == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37339, __PRETTY_FUNCTION__)); | |||
37340 | ||||
37341 | // Create a type on which we perform the shuffle. | |||
37342 | EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), | |||
37343 | StVT.getScalarType(), NumElems*SizeRatio); | |||
37344 | ||||
37345 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits())((WideVecVT.getSizeInBits() == VT.getSizeInBits()) ? static_cast <void> (0) : __assert_fail ("WideVecVT.getSizeInBits() == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37345, __PRETTY_FUNCTION__)); | |||
37346 | ||||
37347 | SDValue WideVec = DAG.getBitcast(WideVecVT, Mst->getValue()); | |||
37348 | SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1); | |||
37349 | for (unsigned i = 0; i != NumElems; ++i) | |||
37350 | ShuffleVec[i] = i * SizeRatio; | |||
37351 | ||||
37352 | // Can't shuffle using an illegal type. | |||
37353 | assert(DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) &&((DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) && "WideVecVT should be legal") ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) && \"WideVecVT should be legal\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37354, __PRETTY_FUNCTION__)) | |||
37354 | "WideVecVT should be legal")((DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) && "WideVecVT should be legal") ? static_cast<void> (0) : __assert_fail ("DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT) && \"WideVecVT should be legal\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37354, __PRETTY_FUNCTION__)); | |||
37355 | ||||
37356 | SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec, | |||
37357 | DAG.getUNDEF(WideVecVT), | |||
37358 | ShuffleVec); | |||
37359 | ||||
37360 | SDValue NewMask; | |||
37361 | SDValue Mask = Mst->getMask(); | |||
37362 | if (Mask.getValueType() == VT) { | |||
37363 | // Mask and original value have the same type. | |||
37364 | NewMask = DAG.getBitcast(WideVecVT, Mask); | |||
37365 | for (unsigned i = 0; i != NumElems; ++i) | |||
37366 | ShuffleVec[i] = i * SizeRatio; | |||
37367 | for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i) | |||
37368 | ShuffleVec[i] = NumElems*SizeRatio; | |||
37369 | NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask, | |||
37370 | DAG.getConstant(0, dl, WideVecVT), | |||
37371 | ShuffleVec); | |||
37372 | } else { | |||
37373 | assert(Mask.getValueType().getVectorElementType() == MVT::i1)((Mask.getValueType().getVectorElementType() == MVT::i1) ? static_cast <void> (0) : __assert_fail ("Mask.getValueType().getVectorElementType() == MVT::i1" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37373, __PRETTY_FUNCTION__)); | |||
37374 | unsigned WidenNumElts = NumElems*SizeRatio; | |||
37375 | unsigned MaskNumElts = VT.getVectorNumElements(); | |||
37376 | EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, | |||
37377 | WidenNumElts); | |||
37378 | ||||
37379 | unsigned NumConcat = WidenNumElts / MaskNumElts; | |||
37380 | SDValue ZeroVal = DAG.getConstant(0, dl, Mask.getValueType()); | |||
37381 | SmallVector<SDValue, 16> Ops(NumConcat, ZeroVal); | |||
37382 | Ops[0] = Mask; | |||
37383 | NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops); | |||
37384 | } | |||
37385 | ||||
37386 | return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, | |||
37387 | Mst->getBasePtr(), NewMask, StVT, | |||
37388 | Mst->getMemOperand(), false); | |||
37389 | } | |||
37390 | ||||
37391 | static SDValue combineStore(SDNode *N, SelectionDAG &DAG, | |||
37392 | const X86Subtarget &Subtarget) { | |||
37393 | StoreSDNode *St = cast<StoreSDNode>(N); | |||
37394 | EVT VT = St->getValue().getValueType(); | |||
37395 | EVT StVT = St->getMemoryVT(); | |||
37396 | SDLoc dl(St); | |||
37397 | SDValue StoredVal = St->getOperand(1); | |||
37398 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
37399 | ||||
37400 | // Convert a store of vXi1 into a store of iX and a bitcast. | |||
37401 | if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() && | |||
37402 | VT.getVectorElementType() == MVT::i1) { | |||
37403 | ||||
37404 | EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements()); | |||
37405 | StoredVal = DAG.getBitcast(NewVT, StoredVal); | |||
37406 | ||||
37407 | return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(), | |||
37408 | St->getPointerInfo(), St->getAlignment(), | |||
37409 | St->getMemOperand()->getFlags()); | |||
37410 | } | |||
37411 | ||||
37412 | // If this is a store of a scalar_to_vector to v1i1, just use a scalar store. | |||
37413 | // This will avoid a copy to k-register. | |||
37414 | if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() && | |||
37415 | StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR && | |||
37416 | StoredVal.getOperand(0).getValueType() == MVT::i8) { | |||
37417 | return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0), | |||
37418 | St->getBasePtr(), St->getPointerInfo(), | |||
37419 | St->getAlignment(), St->getMemOperand()->getFlags()); | |||
37420 | } | |||
37421 | ||||
37422 | // Widen v2i1/v4i1 stores to v8i1. | |||
37423 | if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT && | |||
37424 | Subtarget.hasAVX512()) { | |||
37425 | unsigned NumConcats = 8 / VT.getVectorNumElements(); | |||
37426 | SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT)); | |||
37427 | Ops[0] = StoredVal; | |||
37428 | StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops); | |||
37429 | return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(), | |||
37430 | St->getPointerInfo(), St->getAlignment(), | |||
37431 | St->getMemOperand()->getFlags()); | |||
37432 | } | |||
37433 | ||||
37434 | // Turn vXi1 stores of constants into a scalar store. | |||
37435 | if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 || | |||
37436 | VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) && | |||
37437 | ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) { | |||
37438 | // If its a v64i1 store without 64-bit support, we need two stores. | |||
37439 | if (VT == MVT::v64i1 && !Subtarget.is64Bit()) { | |||
37440 | SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl, | |||
37441 | StoredVal->ops().slice(0, 32)); | |||
37442 | Lo = combinevXi1ConstantToInteger(Lo, DAG); | |||
37443 | SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl, | |||
37444 | StoredVal->ops().slice(32, 32)); | |||
37445 | Hi = combinevXi1ConstantToInteger(Hi, DAG); | |||
37446 | ||||
37447 | unsigned Alignment = St->getAlignment(); | |||
37448 | ||||
37449 | SDValue Ptr0 = St->getBasePtr(); | |||
37450 | SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl); | |||
37451 | ||||
37452 | SDValue Ch0 = | |||
37453 | DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(), | |||
37454 | Alignment, St->getMemOperand()->getFlags()); | |||
37455 | SDValue Ch1 = | |||
37456 | DAG.getStore(St->getChain(), dl, Hi, Ptr1, | |||
37457 | St->getPointerInfo().getWithOffset(4), | |||
37458 | MinAlign(Alignment, 4U), | |||
37459 | St->getMemOperand()->getFlags()); | |||
37460 | return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); | |||
37461 | } | |||
37462 | ||||
37463 | StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG); | |||
37464 | return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(), | |||
37465 | St->getPointerInfo(), St->getAlignment(), | |||
37466 | St->getMemOperand()->getFlags()); | |||
37467 | } | |||
37468 | ||||
37469 | // If we are saving a concatenation of two XMM registers and 32-byte stores | |||
37470 | // are slow, such as on Sandy Bridge, perform two 16-byte stores. | |||
37471 | bool Fast; | |||
37472 | unsigned AddressSpace = St->getAddressSpace(); | |||
37473 | unsigned Alignment = St->getAlignment(); | |||
37474 | if (VT.is256BitVector() && StVT == VT && | |||
37475 | TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, | |||
37476 | AddressSpace, Alignment, &Fast) && | |||
37477 | !Fast) { | |||
37478 | unsigned NumElems = VT.getVectorNumElements(); | |||
37479 | if (NumElems < 2) | |||
37480 | return SDValue(); | |||
37481 | ||||
37482 | SDValue Value0 = extract128BitVector(StoredVal, 0, DAG, dl); | |||
37483 | SDValue Value1 = extract128BitVector(StoredVal, NumElems / 2, DAG, dl); | |||
37484 | ||||
37485 | SDValue Ptr0 = St->getBasePtr(); | |||
37486 | SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 16, dl); | |||
37487 | ||||
37488 | SDValue Ch0 = | |||
37489 | DAG.getStore(St->getChain(), dl, Value0, Ptr0, St->getPointerInfo(), | |||
37490 | Alignment, St->getMemOperand()->getFlags()); | |||
37491 | SDValue Ch1 = | |||
37492 | DAG.getStore(St->getChain(), dl, Value1, Ptr1, | |||
37493 | St->getPointerInfo().getWithOffset(16), | |||
37494 | MinAlign(Alignment, 16U), St->getMemOperand()->getFlags()); | |||
37495 | return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1); | |||
37496 | } | |||
37497 | ||||
37498 | // Optimize trunc store (of multiple scalars) to shuffle and store. | |||
37499 | // First, pack all of the elements in one place. Next, store to memory | |||
37500 | // in fewer chunks. | |||
37501 | if (St->isTruncatingStore() && VT.isVector()) { | |||
37502 | // Check if we can detect an AVG pattern from the truncation. If yes, | |||
37503 | // replace the trunc store by a normal store with the result of X86ISD::AVG | |||
37504 | // instruction. | |||
37505 | if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG, | |||
37506 | Subtarget, dl)) | |||
37507 | return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(), | |||
37508 | St->getPointerInfo(), St->getAlignment(), | |||
37509 | St->getMemOperand()->getFlags()); | |||
37510 | ||||
37511 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
37512 | if (SDValue Val = | |||
37513 | detectAVX512SSatPattern(St->getValue(), St->getMemoryVT(), Subtarget, | |||
37514 | TLI)) | |||
37515 | return EmitTruncSStore(true /* Signed saturation */, St->getChain(), | |||
37516 | dl, Val, St->getBasePtr(), | |||
37517 | St->getMemoryVT(), St->getMemOperand(), DAG); | |||
37518 | if (SDValue Val = detectAVX512USatPattern(St->getValue(), St->getMemoryVT(), | |||
37519 | DAG, dl, Subtarget, TLI)) | |||
37520 | return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(), | |||
37521 | dl, Val, St->getBasePtr(), | |||
37522 | St->getMemoryVT(), St->getMemOperand(), DAG); | |||
37523 | ||||
37524 | unsigned NumElems = VT.getVectorNumElements(); | |||
37525 | assert(StVT != VT && "Cannot truncate to the same type")((StVT != VT && "Cannot truncate to the same type") ? static_cast<void> (0) : __assert_fail ("StVT != VT && \"Cannot truncate to the same type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37525, __PRETTY_FUNCTION__)); | |||
37526 | unsigned FromSz = VT.getScalarSizeInBits(); | |||
37527 | unsigned ToSz = StVT.getScalarSizeInBits(); | |||
37528 | ||||
37529 | // The truncating store is legal in some cases. For example | |||
37530 | // vpmovqb, vpmovqw, vpmovqd, vpmovdb, vpmovdw | |||
37531 | // are designated for truncate store. | |||
37532 | // In this case we don't need any further transformations. | |||
37533 | if (TLI.isTruncStoreLegalOrCustom(VT, StVT)) | |||
37534 | return SDValue(); | |||
37535 | ||||
37536 | // From, To sizes and ElemCount must be pow of two | |||
37537 | if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue(); | |||
37538 | // We are going to use the original vector elt for storing. | |||
37539 | // Accumulated smaller vector elements must be a multiple of the store size. | |||
37540 | if (0 != (NumElems * FromSz) % ToSz) return SDValue(); | |||
37541 | ||||
37542 | unsigned SizeRatio = FromSz / ToSz; | |||
37543 | ||||
37544 | assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits())((SizeRatio * NumElems * ToSz == VT.getSizeInBits()) ? static_cast <void> (0) : __assert_fail ("SizeRatio * NumElems * ToSz == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37544, __PRETTY_FUNCTION__)); | |||
37545 | ||||
37546 | // Create a type on which we perform the shuffle | |||
37547 | EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), | |||
37548 | StVT.getScalarType(), NumElems*SizeRatio); | |||
37549 | ||||
37550 | assert(WideVecVT.getSizeInBits() == VT.getSizeInBits())((WideVecVT.getSizeInBits() == VT.getSizeInBits()) ? static_cast <void> (0) : __assert_fail ("WideVecVT.getSizeInBits() == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37550, __PRETTY_FUNCTION__)); | |||
37551 | ||||
37552 | SDValue WideVec = DAG.getBitcast(WideVecVT, St->getValue()); | |||
37553 | SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1); | |||
37554 | for (unsigned i = 0; i != NumElems; ++i) | |||
37555 | ShuffleVec[i] = i * SizeRatio; | |||
37556 | ||||
37557 | // Can't shuffle using an illegal type. | |||
37558 | if (!TLI.isTypeLegal(WideVecVT)) | |||
37559 | return SDValue(); | |||
37560 | ||||
37561 | SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec, | |||
37562 | DAG.getUNDEF(WideVecVT), | |||
37563 | ShuffleVec); | |||
37564 | // At this point all of the data is stored at the bottom of the | |||
37565 | // register. We now need to save it to mem. | |||
37566 | ||||
37567 | // Find the largest store unit | |||
37568 | MVT StoreType = MVT::i8; | |||
37569 | for (MVT Tp : MVT::integer_valuetypes()) { | |||
37570 | if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz) | |||
37571 | StoreType = Tp; | |||
37572 | } | |||
37573 | ||||
37574 | // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64. | |||
37575 | if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 && | |||
37576 | (64 <= NumElems * ToSz)) | |||
37577 | StoreType = MVT::f64; | |||
37578 | ||||
37579 | // Bitcast the original vector into a vector of store-size units | |||
37580 | EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(), | |||
37581 | StoreType, VT.getSizeInBits()/StoreType.getSizeInBits()); | |||
37582 | assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits())((StoreVecVT.getSizeInBits() == VT.getSizeInBits()) ? static_cast <void> (0) : __assert_fail ("StoreVecVT.getSizeInBits() == VT.getSizeInBits()" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37582, __PRETTY_FUNCTION__)); | |||
37583 | SDValue ShuffWide = DAG.getBitcast(StoreVecVT, Shuff); | |||
37584 | SmallVector<SDValue, 8> Chains; | |||
37585 | SDValue Ptr = St->getBasePtr(); | |||
37586 | ||||
37587 | // Perform one or more big stores into memory. | |||
37588 | for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) { | |||
37589 | SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, | |||
37590 | StoreType, ShuffWide, | |||
37591 | DAG.getIntPtrConstant(i, dl)); | |||
37592 | SDValue Ch = | |||
37593 | DAG.getStore(St->getChain(), dl, SubVec, Ptr, St->getPointerInfo(), | |||
37594 | St->getAlignment(), St->getMemOperand()->getFlags()); | |||
37595 | Ptr = DAG.getMemBasePlusOffset(Ptr, StoreType.getStoreSize(), dl); | |||
37596 | Chains.push_back(Ch); | |||
37597 | } | |||
37598 | ||||
37599 | return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); | |||
37600 | } | |||
37601 | ||||
37602 | // Turn load->store of MMX types into GPR load/stores. This avoids clobbering | |||
37603 | // the FP state in cases where an emms may be missing. | |||
37604 | // A preferable solution to the general problem is to figure out the right | |||
37605 | // places to insert EMMS. This qualifies as a quick hack. | |||
37606 | ||||
37607 | // Similarly, turn load->store of i64 into double load/stores in 32-bit mode. | |||
37608 | if (VT.getSizeInBits() != 64) | |||
37609 | return SDValue(); | |||
37610 | ||||
37611 | const Function &F = DAG.getMachineFunction().getFunction(); | |||
37612 | bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat); | |||
37613 | bool F64IsLegal = | |||
37614 | !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2(); | |||
37615 | if ((VT.isVector() || | |||
37616 | (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit())) && | |||
37617 | isa<LoadSDNode>(St->getValue()) && | |||
37618 | !cast<LoadSDNode>(St->getValue())->isVolatile() && | |||
37619 | St->getChain().hasOneUse() && !St->isVolatile()) { | |||
37620 | LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode()); | |||
37621 | SmallVector<SDValue, 8> Ops; | |||
37622 | ||||
37623 | if (!ISD::isNormalLoad(Ld)) | |||
37624 | return SDValue(); | |||
37625 | ||||
37626 | // If this is not the MMX case, i.e. we are just turning i64 load/store | |||
37627 | // into f64 load/store, avoid the transformation if there are multiple | |||
37628 | // uses of the loaded value. | |||
37629 | if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0)) | |||
37630 | return SDValue(); | |||
37631 | ||||
37632 | SDLoc LdDL(Ld); | |||
37633 | SDLoc StDL(N); | |||
37634 | // If we are a 64-bit capable x86, lower to a single movq load/store pair. | |||
37635 | // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store | |||
37636 | // pair instead. | |||
37637 | if (Subtarget.is64Bit() || F64IsLegal) { | |||
37638 | MVT LdVT = (Subtarget.is64Bit() && | |||
37639 | (!VT.isFloatingPoint() || !F64IsLegal)) ? MVT::i64 : MVT::f64; | |||
37640 | SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(), | |||
37641 | Ld->getMemOperand()); | |||
37642 | ||||
37643 | // Make sure new load is placed in same chain order. | |||
37644 | DAG.makeEquivalentMemoryOrdering(Ld, NewLd); | |||
37645 | return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(), | |||
37646 | St->getMemOperand()); | |||
37647 | } | |||
37648 | ||||
37649 | // Otherwise, lower to two pairs of 32-bit loads / stores. | |||
37650 | SDValue LoAddr = Ld->getBasePtr(); | |||
37651 | SDValue HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, LdDL); | |||
37652 | ||||
37653 | SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr, | |||
37654 | Ld->getPointerInfo(), Ld->getAlignment(), | |||
37655 | Ld->getMemOperand()->getFlags()); | |||
37656 | SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr, | |||
37657 | Ld->getPointerInfo().getWithOffset(4), | |||
37658 | MinAlign(Ld->getAlignment(), 4), | |||
37659 | Ld->getMemOperand()->getFlags()); | |||
37660 | // Make sure new loads are placed in same chain order. | |||
37661 | DAG.makeEquivalentMemoryOrdering(Ld, LoLd); | |||
37662 | DAG.makeEquivalentMemoryOrdering(Ld, HiLd); | |||
37663 | ||||
37664 | LoAddr = St->getBasePtr(); | |||
37665 | HiAddr = DAG.getMemBasePlusOffset(LoAddr, 4, StDL); | |||
37666 | ||||
37667 | SDValue LoSt = | |||
37668 | DAG.getStore(St->getChain(), StDL, LoLd, LoAddr, St->getPointerInfo(), | |||
37669 | St->getAlignment(), St->getMemOperand()->getFlags()); | |||
37670 | SDValue HiSt = DAG.getStore(St->getChain(), StDL, HiLd, HiAddr, | |||
37671 | St->getPointerInfo().getWithOffset(4), | |||
37672 | MinAlign(St->getAlignment(), 4), | |||
37673 | St->getMemOperand()->getFlags()); | |||
37674 | return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt); | |||
37675 | } | |||
37676 | ||||
37677 | // This is similar to the above case, but here we handle a scalar 64-bit | |||
37678 | // integer store that is extracted from a vector on a 32-bit target. | |||
37679 | // If we have SSE2, then we can treat it like a floating-point double | |||
37680 | // to get past legalization. The execution dependencies fixup pass will | |||
37681 | // choose the optimal machine instruction for the store if this really is | |||
37682 | // an integer or v2f32 rather than an f64. | |||
37683 | if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() && | |||
37684 | St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) { | |||
37685 | SDValue OldExtract = St->getOperand(1); | |||
37686 | SDValue ExtOp0 = OldExtract.getOperand(0); | |||
37687 | unsigned VecSize = ExtOp0.getValueSizeInBits(); | |||
37688 | EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64); | |||
37689 | SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0); | |||
37690 | SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, | |||
37691 | BitCast, OldExtract.getOperand(1)); | |||
37692 | return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(), | |||
37693 | St->getPointerInfo(), St->getAlignment(), | |||
37694 | St->getMemOperand()->getFlags()); | |||
37695 | } | |||
37696 | ||||
37697 | return SDValue(); | |||
37698 | } | |||
37699 | ||||
37700 | /// Return 'true' if this vector operation is "horizontal" | |||
37701 | /// and return the operands for the horizontal operation in LHS and RHS. A | |||
37702 | /// horizontal operation performs the binary operation on successive elements | |||
37703 | /// of its first operand, then on successive elements of its second operand, | |||
37704 | /// returning the resulting values in a vector. For example, if | |||
37705 | /// A = < float a0, float a1, float a2, float a3 > | |||
37706 | /// and | |||
37707 | /// B = < float b0, float b1, float b2, float b3 > | |||
37708 | /// then the result of doing a horizontal operation on A and B is | |||
37709 | /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >. | |||
37710 | /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form | |||
37711 | /// A horizontal-op B, for some already available A and B, and if so then LHS is | |||
37712 | /// set to A, RHS to B, and the routine returns 'true'. | |||
37713 | static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) { | |||
37714 | // If either operand is undef, bail out. The binop should be simplified. | |||
37715 | if (LHS.isUndef() || RHS.isUndef()) | |||
37716 | return false; | |||
37717 | ||||
37718 | // Look for the following pattern: | |||
37719 | // A = < float a0, float a1, float a2, float a3 > | |||
37720 | // B = < float b0, float b1, float b2, float b3 > | |||
37721 | // and | |||
37722 | // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6> | |||
37723 | // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7> | |||
37724 | // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 > | |||
37725 | // which is A horizontal-op B. | |||
37726 | ||||
37727 | // At least one of the operands should be a vector shuffle. | |||
37728 | if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE && | |||
37729 | RHS.getOpcode() != ISD::VECTOR_SHUFFLE) | |||
37730 | return false; | |||
37731 | ||||
37732 | MVT VT = LHS.getSimpleValueType(); | |||
37733 | assert((VT.is128BitVector() || VT.is256BitVector()) &&(((VT.is128BitVector() || VT.is256BitVector()) && "Unsupported vector type for horizontal add/sub" ) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector()) && \"Unsupported vector type for horizontal add/sub\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37734, __PRETTY_FUNCTION__)) | |||
37734 | "Unsupported vector type for horizontal add/sub")(((VT.is128BitVector() || VT.is256BitVector()) && "Unsupported vector type for horizontal add/sub" ) ? static_cast<void> (0) : __assert_fail ("(VT.is128BitVector() || VT.is256BitVector()) && \"Unsupported vector type for horizontal add/sub\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37734, __PRETTY_FUNCTION__)); | |||
37735 | ||||
37736 | // View LHS in the form | |||
37737 | // LHS = VECTOR_SHUFFLE A, B, LMask | |||
37738 | // If LHS is not a shuffle, then pretend it is the identity shuffle: | |||
37739 | // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1> | |||
37740 | // NOTE: A default initialized SDValue represents an UNDEF of type VT. | |||
37741 | unsigned NumElts = VT.getVectorNumElements(); | |||
37742 | SDValue A, B; | |||
37743 | SmallVector<int, 16> LMask(NumElts); | |||
37744 | if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) { | |||
37745 | if (!LHS.getOperand(0).isUndef()) | |||
37746 | A = LHS.getOperand(0); | |||
37747 | if (!LHS.getOperand(1).isUndef()) | |||
37748 | B = LHS.getOperand(1); | |||
37749 | ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask(); | |||
37750 | llvm::copy(Mask, LMask.begin()); | |||
37751 | } else { | |||
37752 | A = LHS; | |||
37753 | for (unsigned i = 0; i != NumElts; ++i) | |||
37754 | LMask[i] = i; | |||
37755 | } | |||
37756 | ||||
37757 | // Likewise, view RHS in the form | |||
37758 | // RHS = VECTOR_SHUFFLE C, D, RMask | |||
37759 | SDValue C, D; | |||
37760 | SmallVector<int, 16> RMask(NumElts); | |||
37761 | if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) { | |||
37762 | if (!RHS.getOperand(0).isUndef()) | |||
37763 | C = RHS.getOperand(0); | |||
37764 | if (!RHS.getOperand(1).isUndef()) | |||
37765 | D = RHS.getOperand(1); | |||
37766 | ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask(); | |||
37767 | llvm::copy(Mask, RMask.begin()); | |||
37768 | } else { | |||
37769 | C = RHS; | |||
37770 | for (unsigned i = 0; i != NumElts; ++i) | |||
37771 | RMask[i] = i; | |||
37772 | } | |||
37773 | ||||
37774 | // If A and B occur in reverse order in RHS, then canonicalize by commuting | |||
37775 | // RHS operands and shuffle mask. | |||
37776 | if (A != C) { | |||
37777 | std::swap(C, D); | |||
37778 | ShuffleVectorSDNode::commuteMask(RMask); | |||
37779 | } | |||
37780 | // Check that the shuffles are both shuffling the same vectors. | |||
37781 | if (!(A == C && B == D)) | |||
37782 | return false; | |||
37783 | ||||
37784 | // LHS and RHS are now: | |||
37785 | // LHS = shuffle A, B, LMask | |||
37786 | // RHS = shuffle A, B, RMask | |||
37787 | // Check that the masks correspond to performing a horizontal operation. | |||
37788 | // AVX defines horizontal add/sub to operate independently on 128-bit lanes, | |||
37789 | // so we just repeat the inner loop if this is a 256-bit op. | |||
37790 | unsigned Num128BitChunks = VT.getSizeInBits() / 128; | |||
37791 | unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks; | |||
37792 | assert((NumEltsPer128BitChunk % 2 == 0) &&(((NumEltsPer128BitChunk % 2 == 0) && "Vector type should have an even number of elements in each lane" ) ? static_cast<void> (0) : __assert_fail ("(NumEltsPer128BitChunk % 2 == 0) && \"Vector type should have an even number of elements in each lane\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37793, __PRETTY_FUNCTION__)) | |||
37793 | "Vector type should have an even number of elements in each lane")(((NumEltsPer128BitChunk % 2 == 0) && "Vector type should have an even number of elements in each lane" ) ? static_cast<void> (0) : __assert_fail ("(NumEltsPer128BitChunk % 2 == 0) && \"Vector type should have an even number of elements in each lane\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37793, __PRETTY_FUNCTION__)); | |||
37794 | for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) { | |||
37795 | for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) { | |||
37796 | // Ignore undefined components. | |||
37797 | int LIdx = LMask[i + j], RIdx = RMask[i + j]; | |||
37798 | if (LIdx < 0 || RIdx < 0 || | |||
37799 | (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) || | |||
37800 | (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts))) | |||
37801 | continue; | |||
37802 | ||||
37803 | // The low half of the 128-bit result must choose from A. | |||
37804 | // The high half of the 128-bit result must choose from B, | |||
37805 | // unless B is undef. In that case, we are always choosing from A. | |||
37806 | unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2; | |||
37807 | unsigned Src = B.getNode() ? i >= NumEltsPer64BitChunk : 0; | |||
37808 | ||||
37809 | // Check that successive elements are being operated on. If not, this is | |||
37810 | // not a horizontal operation. | |||
37811 | int Index = 2 * (i % NumEltsPer64BitChunk) + NumElts * Src + j; | |||
37812 | if (!(LIdx == Index && RIdx == Index + 1) && | |||
37813 | !(IsCommutative && LIdx == Index + 1 && RIdx == Index)) | |||
37814 | return false; | |||
37815 | } | |||
37816 | } | |||
37817 | ||||
37818 | LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it. | |||
37819 | RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it. | |||
37820 | return true; | |||
37821 | } | |||
37822 | ||||
37823 | /// Horizontal vector math instructions may be slower than normal math with | |||
37824 | /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch | |||
37825 | /// implementation, and likely shuffle complexity of the alternate sequence. | |||
37826 | static bool shouldCombineToHorizontalOp(bool IsSingleSource, SelectionDAG &DAG, | |||
37827 | const X86Subtarget &Subtarget) { | |||
37828 | bool IsOptimizingSize = DAG.getMachineFunction().getFunction().optForSize(); | |||
37829 | bool HasFastHOps = Subtarget.hasFastHorizontalOps(); | |||
37830 | return !IsSingleSource || IsOptimizingSize || HasFastHOps; | |||
37831 | } | |||
37832 | ||||
37833 | /// Do target-specific dag combines on floating-point adds/subs. | |||
37834 | static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG, | |||
37835 | const X86Subtarget &Subtarget) { | |||
37836 | EVT VT = N->getValueType(0); | |||
37837 | SDValue LHS = N->getOperand(0); | |||
37838 | SDValue RHS = N->getOperand(1); | |||
37839 | bool IsFadd = N->getOpcode() == ISD::FADD; | |||
37840 | assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode")(((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode" ) ? static_cast<void> (0) : __assert_fail ("(IsFadd || N->getOpcode() == ISD::FSUB) && \"Wrong opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37840, __PRETTY_FUNCTION__)); | |||
37841 | ||||
37842 | // Try to synthesize horizontal add/sub from adds/subs of shuffles. | |||
37843 | if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) || | |||
37844 | (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) && | |||
37845 | isHorizontalBinOp(LHS, RHS, IsFadd) && | |||
37846 | shouldCombineToHorizontalOp(LHS == RHS, DAG, Subtarget)) { | |||
37847 | auto NewOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB; | |||
37848 | return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); | |||
37849 | } | |||
37850 | return SDValue(); | |||
37851 | } | |||
37852 | ||||
37853 | /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify | |||
37854 | /// the codegen. | |||
37855 | /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) ) | |||
37856 | /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove | |||
37857 | /// anything that is guaranteed to be transformed by DAGCombiner. | |||
37858 | static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG, | |||
37859 | const X86Subtarget &Subtarget, | |||
37860 | const SDLoc &DL) { | |||
37861 | assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode")((N->getOpcode() == ISD::TRUNCATE && "Wrong opcode" ) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == ISD::TRUNCATE && \"Wrong opcode\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 37861, __PRETTY_FUNCTION__)); | |||
37862 | SDValue Src = N->getOperand(0); | |||
37863 | unsigned Opcode = Src.getOpcode(); | |||
37864 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
37865 | ||||
37866 | EVT VT = N->getValueType(0); | |||
37867 | EVT SrcVT = Src.getValueType(); | |||
37868 | ||||
37869 | auto IsFreeTruncation = [VT](SDValue Op) { | |||
37870 | unsigned TruncSizeInBits = VT.getScalarSizeInBits(); | |||
37871 | ||||
37872 | // See if this has been extended from a smaller/equal size to | |||
37873 | // the truncation size, allowing a truncation to combine with the extend. | |||
37874 | unsigned Opcode = Op.getOpcode(); | |||
37875 | if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND || | |||
37876 | Opcode == ISD::ZERO_EXTEND) && | |||
37877 | Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits) | |||
37878 | return true; | |||
37879 | ||||
37880 | // See if this is a single use constant which can be constant folded. | |||
37881 | SDValue BC = peekThroughOneUseBitcasts(Op); | |||
37882 | return ISD::isBuildVectorOfConstantSDNodes(BC.getNode()); | |||
37883 | }; | |||
37884 | ||||
37885 | auto TruncateArithmetic = [&](SDValue N0, SDValue N1) { | |||
37886 | SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0); | |||
37887 | SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1); | |||
37888 | return DAG.getNode(Opcode, DL, VT, Trunc0, Trunc1); | |||
37889 | }; | |||
37890 | ||||
37891 | // Don't combine if the operation has other uses. | |||
37892 | if (!Src.hasOneUse()) | |||
37893 | return SDValue(); | |||
37894 | ||||
37895 | // Only support vector truncation for now. | |||
37896 | // TODO: i64 scalar math would benefit as well. | |||
37897 | if (!VT.isVector()) | |||
37898 | return SDValue(); | |||
37899 | ||||
37900 | // In most cases its only worth pre-truncating if we're only facing the cost | |||
37901 | // of one truncation. | |||
37902 | // i.e. if one of the inputs will constant fold or the input is repeated. | |||
37903 | switch (Opcode) { | |||
37904 | case ISD::AND: | |||
37905 | case ISD::XOR: | |||
37906 | case ISD::OR: { | |||
37907 | SDValue Op0 = Src.getOperand(0); | |||
37908 | SDValue Op1 = Src.getOperand(1); | |||
37909 | if (TLI.isOperationLegalOrPromote(Opcode, VT) && | |||
37910 | (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1))) | |||
37911 | return TruncateArithmetic(Op0, Op1); | |||
37912 | break; | |||
37913 | } | |||
37914 | ||||
37915 | case ISD::MUL: | |||
37916 | // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its | |||
37917 | // better to truncate if we have the chance. | |||
37918 | if (SrcVT.getScalarType() == MVT::i64 && TLI.isOperationLegal(Opcode, VT) && | |||
37919 | !TLI.isOperationLegal(Opcode, SrcVT)) | |||
37920 | return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1)); | |||
37921 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
37922 | case ISD::ADD: { | |||
37923 | SDValue Op0 = Src.getOperand(0); | |||
37924 | SDValue Op1 = Src.getOperand(1); | |||
37925 | if (TLI.isOperationLegal(Opcode, VT) && | |||
37926 | (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1))) | |||
37927 | return TruncateArithmetic(Op0, Op1); | |||
37928 | break; | |||
37929 | } | |||
37930 | case ISD::SUB: { | |||
37931 | // TODO: ISD::SUB We are conservative and require both sides to be freely | |||
37932 | // truncatable to avoid interfering with combineSubToSubus. | |||
37933 | SDValue Op0 = Src.getOperand(0); | |||
37934 | SDValue Op1 = Src.getOperand(1); | |||
37935 | if (TLI.isOperationLegal(Opcode, VT) && | |||
37936 | (Op0 == Op1 || (IsFreeTruncation(Op0) && IsFreeTruncation(Op1)))) | |||
37937 | return TruncateArithmetic(Op0, Op1); | |||
37938 | break; | |||
37939 | } | |||
37940 | } | |||
37941 | ||||
37942 | return SDValue(); | |||
37943 | } | |||
37944 | ||||
37945 | /// Truncate using ISD::AND mask and X86ISD::PACKUS. | |||
37946 | static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL, | |||
37947 | const X86Subtarget &Subtarget, | |||
37948 | SelectionDAG &DAG) { | |||
37949 | SDValue In = N->getOperand(0); | |||
37950 | EVT InVT = In.getValueType(); | |||
37951 | EVT InSVT = InVT.getVectorElementType(); | |||
37952 | EVT OutVT = N->getValueType(0); | |||
37953 | EVT OutSVT = OutVT.getVectorElementType(); | |||
37954 | ||||
37955 | // Split a long vector into vectors of legal type and mask to unset all bits | |||
37956 | // that won't appear in the result to prevent saturation. | |||
37957 | // TODO - we should be doing this at the maximum legal size but this is | |||
37958 | // causing regressions where we're concatenating back to max width just to | |||
37959 | // perform the AND and then extracting back again..... | |||
37960 | unsigned NumSubRegs = InVT.getSizeInBits() / 128; | |||
37961 | unsigned NumSubRegElts = 128 / InSVT.getSizeInBits(); | |||
37962 | EVT SubRegVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubRegElts); | |||
37963 | SmallVector<SDValue, 8> SubVecs(NumSubRegs); | |||
37964 | ||||
37965 | APInt Mask = | |||
37966 | APInt::getLowBitsSet(InSVT.getSizeInBits(), OutSVT.getSizeInBits()); | |||
37967 | SDValue MaskVal = DAG.getConstant(Mask, DL, SubRegVT); | |||
37968 | ||||
37969 | for (unsigned i = 0; i < NumSubRegs; i++) { | |||
37970 | SDValue Sub = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubRegVT, In, | |||
37971 | DAG.getIntPtrConstant(i * NumSubRegElts, DL)); | |||
37972 | SubVecs[i] = DAG.getNode(ISD::AND, DL, SubRegVT, Sub, MaskVal); | |||
37973 | } | |||
37974 | In = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, SubVecs); | |||
37975 | ||||
37976 | return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget); | |||
37977 | } | |||
37978 | ||||
37979 | /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS. | |||
37980 | static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL, | |||
37981 | const X86Subtarget &Subtarget, | |||
37982 | SelectionDAG &DAG) { | |||
37983 | SDValue In = N->getOperand(0); | |||
37984 | EVT InVT = In.getValueType(); | |||
37985 | EVT OutVT = N->getValueType(0); | |||
37986 | In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In, | |||
37987 | DAG.getValueType(OutVT)); | |||
37988 | return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget); | |||
37989 | } | |||
37990 | ||||
37991 | /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into | |||
37992 | /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type | |||
37993 | /// legalization the truncation will be translated into a BUILD_VECTOR with each | |||
37994 | /// element that is extracted from a vector and then truncated, and it is | |||
37995 | /// difficult to do this optimization based on them. | |||
37996 | static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG, | |||
37997 | const X86Subtarget &Subtarget) { | |||
37998 | EVT OutVT = N->getValueType(0); | |||
37999 | if (!OutVT.isVector()) | |||
38000 | return SDValue(); | |||
38001 | ||||
38002 | SDValue In = N->getOperand(0); | |||
38003 | if (!In.getValueType().isSimple()) | |||
38004 | return SDValue(); | |||
38005 | ||||
38006 | EVT InVT = In.getValueType(); | |||
38007 | unsigned NumElems = OutVT.getVectorNumElements(); | |||
38008 | ||||
38009 | // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on | |||
38010 | // SSE2, and we need to take care of it specially. | |||
38011 | // AVX512 provides vpmovdb. | |||
38012 | if (!Subtarget.hasSSE2() || Subtarget.hasAVX2()) | |||
38013 | return SDValue(); | |||
38014 | ||||
38015 | EVT OutSVT = OutVT.getVectorElementType(); | |||
38016 | EVT InSVT = InVT.getVectorElementType(); | |||
38017 | if (!((InSVT == MVT::i32 || InSVT == MVT::i64) && | |||
38018 | (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) && | |||
38019 | NumElems >= 8)) | |||
38020 | return SDValue(); | |||
38021 | ||||
38022 | // SSSE3's pshufb results in less instructions in the cases below. | |||
38023 | if (Subtarget.hasSSSE3() && NumElems == 8 && | |||
38024 | ((OutSVT == MVT::i8 && InSVT != MVT::i64) || | |||
38025 | (InSVT == MVT::i32 && OutSVT == MVT::i16))) | |||
38026 | return SDValue(); | |||
38027 | ||||
38028 | SDLoc DL(N); | |||
38029 | // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS | |||
38030 | // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to | |||
38031 | // truncate 2 x v4i32 to v8i16. | |||
38032 | if (Subtarget.hasSSE41() || OutSVT == MVT::i8) | |||
38033 | return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG); | |||
38034 | if (InSVT == MVT::i32) | |||
38035 | return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG); | |||
38036 | ||||
38037 | return SDValue(); | |||
38038 | } | |||
38039 | ||||
38040 | /// This function transforms vector truncation of 'extended sign-bits' or | |||
38041 | /// 'extended zero-bits' values. | |||
38042 | /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations. | |||
38043 | static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL, | |||
38044 | SelectionDAG &DAG, | |||
38045 | const X86Subtarget &Subtarget) { | |||
38046 | // Requires SSE2 but AVX512 has fast truncate. | |||
38047 | if (!Subtarget.hasSSE2() || Subtarget.hasAVX512()) | |||
38048 | return SDValue(); | |||
38049 | ||||
38050 | if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple()) | |||
38051 | return SDValue(); | |||
38052 | ||||
38053 | SDValue In = N->getOperand(0); | |||
38054 | if (!In.getValueType().isSimple()) | |||
38055 | return SDValue(); | |||
38056 | ||||
38057 | MVT VT = N->getValueType(0).getSimpleVT(); | |||
38058 | MVT SVT = VT.getScalarType(); | |||
38059 | ||||
38060 | MVT InVT = In.getValueType().getSimpleVT(); | |||
38061 | MVT InSVT = InVT.getScalarType(); | |||
38062 | ||||
38063 | // Check we have a truncation suited for PACKSS/PACKUS. | |||
38064 | if (!VT.is128BitVector() && !VT.is256BitVector()) | |||
38065 | return SDValue(); | |||
38066 | if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32) | |||
38067 | return SDValue(); | |||
38068 | if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64) | |||
38069 | return SDValue(); | |||
38070 | ||||
38071 | unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16); | |||
38072 | unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8; | |||
38073 | ||||
38074 | // Use PACKUS if the input has zero-bits that extend all the way to the | |||
38075 | // packed/truncated value. e.g. masks, zext_in_reg, etc. | |||
38076 | KnownBits Known = DAG.computeKnownBits(In); | |||
38077 | unsigned NumLeadingZeroBits = Known.countMinLeadingZeros(); | |||
38078 | if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits)) | |||
38079 | return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget); | |||
38080 | ||||
38081 | // Use PACKSS if the input has sign-bits that extend all the way to the | |||
38082 | // packed/truncated value. e.g. Comparison result, sext_in_reg, etc. | |||
38083 | unsigned NumSignBits = DAG.ComputeNumSignBits(In); | |||
38084 | if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits)) | |||
38085 | return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget); | |||
38086 | ||||
38087 | return SDValue(); | |||
38088 | } | |||
38089 | ||||
38090 | // Try to form a MULHU or MULHS node by looking for | |||
38091 | // (trunc (srl (mul ext, ext), 16)) | |||
38092 | // TODO: This is X86 specific because we want to be able to handle wide types | |||
38093 | // before type legalization. But we can only do it if the vector will be | |||
38094 | // legalized via widening/splitting. Type legalization can't handle promotion | |||
38095 | // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG | |||
38096 | // combiner. | |||
38097 | static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL, | |||
38098 | SelectionDAG &DAG, const X86Subtarget &Subtarget) { | |||
38099 | // First instruction should be a right shift of a multiply. | |||
38100 | if (Src.getOpcode() != ISD::SRL || | |||
38101 | Src.getOperand(0).getOpcode() != ISD::MUL) | |||
38102 | return SDValue(); | |||
38103 | ||||
38104 | if (!Subtarget.hasSSE2()) | |||
38105 | return SDValue(); | |||
38106 | ||||
38107 | // Only handle vXi16 types that are at least 128-bits unless they will be | |||
38108 | // widened. | |||
38109 | if (!VT.isVector() || VT.getVectorElementType() != MVT::i16 || | |||
38110 | (!ExperimentalVectorWideningLegalization && | |||
38111 | VT.getVectorNumElements() < 8)) | |||
38112 | return SDValue(); | |||
38113 | ||||
38114 | // Input type should be vXi32. | |||
38115 | EVT InVT = Src.getValueType(); | |||
38116 | if (InVT.getVectorElementType() != MVT::i32) | |||
38117 | return SDValue(); | |||
38118 | ||||
38119 | // Need a shift by 16. | |||
38120 | APInt ShiftAmt; | |||
38121 | if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) || | |||
38122 | ShiftAmt != 16) | |||
38123 | return SDValue(); | |||
38124 | ||||
38125 | SDValue LHS = Src.getOperand(0).getOperand(0); | |||
38126 | SDValue RHS = Src.getOperand(0).getOperand(1); | |||
38127 | ||||
38128 | unsigned ExtOpc = LHS.getOpcode(); | |||
38129 | if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) || | |||
38130 | RHS.getOpcode() != ExtOpc) | |||
38131 | return SDValue(); | |||
38132 | ||||
38133 | // Peek through the extends. | |||
38134 | LHS = LHS.getOperand(0); | |||
38135 | RHS = RHS.getOperand(0); | |||
38136 | ||||
38137 | // Ensure the input types match. | |||
38138 | if (LHS.getValueType() != VT || RHS.getValueType() != VT) | |||
38139 | return SDValue(); | |||
38140 | ||||
38141 | unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU; | |||
38142 | return DAG.getNode(Opc, DL, VT, LHS, RHS); | |||
38143 | } | |||
38144 | ||||
38145 | // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes | |||
38146 | // from one vector with signed bytes from another vector, adds together | |||
38147 | // adjacent pairs of 16-bit products, and saturates the result before | |||
38148 | // truncating to 16-bits. | |||
38149 | // | |||
38150 | // Which looks something like this: | |||
38151 | // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))), | |||
38152 | // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B)))))))) | |||
38153 | static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG, | |||
38154 | const X86Subtarget &Subtarget, | |||
38155 | const SDLoc &DL) { | |||
38156 | if (!VT.isVector() || !Subtarget.hasSSSE3()) | |||
38157 | return SDValue(); | |||
38158 | ||||
38159 | unsigned NumElems = VT.getVectorNumElements(); | |||
38160 | EVT ScalarVT = VT.getVectorElementType(); | |||
38161 | if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems)) | |||
38162 | return SDValue(); | |||
38163 | ||||
38164 | SDValue SSatVal = detectSSatPattern(In, VT); | |||
38165 | if (!SSatVal || SSatVal.getOpcode() != ISD::ADD) | |||
38166 | return SDValue(); | |||
38167 | ||||
38168 | // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs | |||
38169 | // of multiplies from even/odd elements. | |||
38170 | SDValue N0 = SSatVal.getOperand(0); | |||
38171 | SDValue N1 = SSatVal.getOperand(1); | |||
38172 | ||||
38173 | if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL) | |||
38174 | return SDValue(); | |||
38175 | ||||
38176 | SDValue N00 = N0.getOperand(0); | |||
38177 | SDValue N01 = N0.getOperand(1); | |||
38178 | SDValue N10 = N1.getOperand(0); | |||
38179 | SDValue N11 = N1.getOperand(1); | |||
38180 | ||||
38181 | // TODO: Handle constant vectors and use knownbits/computenumsignbits? | |||
38182 | // Canonicalize zero_extend to LHS. | |||
38183 | if (N01.getOpcode() == ISD::ZERO_EXTEND) | |||
38184 | std::swap(N00, N01); | |||
38185 | if (N11.getOpcode() == ISD::ZERO_EXTEND) | |||
38186 | std::swap(N10, N11); | |||
38187 | ||||
38188 | // Ensure we have a zero_extend and a sign_extend. | |||
38189 | if (N00.getOpcode() != ISD::ZERO_EXTEND || | |||
38190 | N01.getOpcode() != ISD::SIGN_EXTEND || | |||
38191 | N10.getOpcode() != ISD::ZERO_EXTEND || | |||
38192 | N11.getOpcode() != ISD::SIGN_EXTEND) | |||
38193 | return SDValue(); | |||
38194 | ||||
38195 | // Peek through the extends. | |||
38196 | N00 = N00.getOperand(0); | |||
38197 | N01 = N01.getOperand(0); | |||
38198 | N10 = N10.getOperand(0); | |||
38199 | N11 = N11.getOperand(0); | |||
38200 | ||||
38201 | // Ensure the extend is from vXi8. | |||
38202 | if (N00.getValueType().getVectorElementType() != MVT::i8 || | |||
38203 | N01.getValueType().getVectorElementType() != MVT::i8 || | |||
38204 | N10.getValueType().getVectorElementType() != MVT::i8 || | |||
38205 | N11.getValueType().getVectorElementType() != MVT::i8) | |||
38206 | return SDValue(); | |||
38207 | ||||
38208 | // All inputs should be build_vectors. | |||
38209 | if (N00.getOpcode() != ISD::BUILD_VECTOR || | |||
38210 | N01.getOpcode() != ISD::BUILD_VECTOR || | |||
38211 | N10.getOpcode() != ISD::BUILD_VECTOR || | |||
38212 | N11.getOpcode() != ISD::BUILD_VECTOR) | |||
38213 | return SDValue(); | |||
38214 | ||||
38215 | // N00/N10 are zero extended. N01/N11 are sign extended. | |||
38216 | ||||
38217 | // For each element, we need to ensure we have an odd element from one vector | |||
38218 | // multiplied by the odd element of another vector and the even element from | |||
38219 | // one of the same vectors being multiplied by the even element from the | |||
38220 | // other vector. So we need to make sure for each element i, this operator | |||
38221 | // is being performed: | |||
38222 | // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1] | |||
38223 | SDValue ZExtIn, SExtIn; | |||
38224 | for (unsigned i = 0; i != NumElems; ++i) { | |||
38225 | SDValue N00Elt = N00.getOperand(i); | |||
38226 | SDValue N01Elt = N01.getOperand(i); | |||
38227 | SDValue N10Elt = N10.getOperand(i); | |||
38228 | SDValue N11Elt = N11.getOperand(i); | |||
38229 | // TODO: Be more tolerant to undefs. | |||
38230 | if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
38231 | N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
38232 | N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
38233 | N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
38234 | return SDValue(); | |||
38235 | auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1)); | |||
38236 | auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1)); | |||
38237 | auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1)); | |||
38238 | auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1)); | |||
38239 | if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt) | |||
38240 | return SDValue(); | |||
38241 | unsigned IdxN00 = ConstN00Elt->getZExtValue(); | |||
38242 | unsigned IdxN01 = ConstN01Elt->getZExtValue(); | |||
38243 | unsigned IdxN10 = ConstN10Elt->getZExtValue(); | |||
38244 | unsigned IdxN11 = ConstN11Elt->getZExtValue(); | |||
38245 | // Add is commutative so indices can be reordered. | |||
38246 | if (IdxN00 > IdxN10) { | |||
38247 | std::swap(IdxN00, IdxN10); | |||
38248 | std::swap(IdxN01, IdxN11); | |||
38249 | } | |||
38250 | // N0 indices be the even element. N1 indices must be the next odd element. | |||
38251 | if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 || | |||
38252 | IdxN01 != 2 * i || IdxN11 != 2 * i + 1) | |||
38253 | return SDValue(); | |||
38254 | SDValue N00In = N00Elt.getOperand(0); | |||
38255 | SDValue N01In = N01Elt.getOperand(0); | |||
38256 | SDValue N10In = N10Elt.getOperand(0); | |||
38257 | SDValue N11In = N11Elt.getOperand(0); | |||
38258 | // First time we find an input capture it. | |||
38259 | if (!ZExtIn) { | |||
38260 | ZExtIn = N00In; | |||
38261 | SExtIn = N01In; | |||
38262 | } | |||
38263 | if (ZExtIn != N00In || SExtIn != N01In || | |||
38264 | ZExtIn != N10In || SExtIn != N11In) | |||
38265 | return SDValue(); | |||
38266 | } | |||
38267 | ||||
38268 | auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
38269 | ArrayRef<SDValue> Ops) { | |||
38270 | // Shrink by adding truncate nodes and let DAGCombine fold with the | |||
38271 | // sources. | |||
38272 | EVT InVT = Ops[0].getValueType(); | |||
38273 | assert(InVT.getScalarType() == MVT::i8 &&((InVT.getScalarType() == MVT::i8 && "Unexpected scalar element type" ) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i8 && \"Unexpected scalar element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 38274, __PRETTY_FUNCTION__)) | |||
38274 | "Unexpected scalar element type")((InVT.getScalarType() == MVT::i8 && "Unexpected scalar element type" ) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i8 && \"Unexpected scalar element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 38274, __PRETTY_FUNCTION__)); | |||
38275 | assert(InVT == Ops[1].getValueType() && "Operands' types mismatch")((InVT == Ops[1].getValueType() && "Operands' types mismatch" ) ? static_cast<void> (0) : __assert_fail ("InVT == Ops[1].getValueType() && \"Operands' types mismatch\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 38275, __PRETTY_FUNCTION__)); | |||
38276 | EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, | |||
38277 | InVT.getVectorNumElements() / 2); | |||
38278 | return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]); | |||
38279 | }; | |||
38280 | return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn }, | |||
38281 | PMADDBuilder); | |||
38282 | } | |||
38283 | ||||
38284 | static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG, | |||
38285 | const X86Subtarget &Subtarget) { | |||
38286 | EVT VT = N->getValueType(0); | |||
38287 | SDValue Src = N->getOperand(0); | |||
38288 | SDLoc DL(N); | |||
38289 | ||||
38290 | // Attempt to pre-truncate inputs to arithmetic ops instead. | |||
38291 | if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL)) | |||
38292 | return V; | |||
38293 | ||||
38294 | // Try to detect AVG pattern first. | |||
38295 | if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL)) | |||
38296 | return Avg; | |||
38297 | ||||
38298 | // Try to detect PMADD | |||
38299 | if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL)) | |||
38300 | return PMAdd; | |||
38301 | ||||
38302 | // Try to combine truncation with signed/unsigned saturation. | |||
38303 | if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget)) | |||
38304 | return Val; | |||
38305 | ||||
38306 | // Try to combine PMULHUW/PMULHW for vXi16. | |||
38307 | if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget)) | |||
38308 | return V; | |||
38309 | ||||
38310 | // The bitcast source is a direct mmx result. | |||
38311 | // Detect bitcasts between i32 to x86mmx | |||
38312 | if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) { | |||
38313 | SDValue BCSrc = Src.getOperand(0); | |||
38314 | if (BCSrc.getValueType() == MVT::x86mmx) | |||
38315 | return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc); | |||
38316 | } | |||
38317 | ||||
38318 | // Try to truncate extended sign/zero bits with PACKSS/PACKUS. | |||
38319 | if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget)) | |||
38320 | return V; | |||
38321 | ||||
38322 | return combineVectorTruncation(N, DAG, Subtarget); | |||
38323 | } | |||
38324 | ||||
38325 | /// Returns the negated value if the node \p N flips sign of FP value. | |||
38326 | /// | |||
38327 | /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000) | |||
38328 | /// or FSUB(0, x) | |||
38329 | /// AVX512F does not have FXOR, so FNEG is lowered as | |||
38330 | /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))). | |||
38331 | /// In this case we go though all bitcasts. | |||
38332 | /// This also recognizes splat of a negated value and returns the splat of that | |||
38333 | /// value. | |||
38334 | static SDValue isFNEG(SelectionDAG &DAG, SDNode *N) { | |||
38335 | if (N->getOpcode() == ISD::FNEG) | |||
38336 | return N->getOperand(0); | |||
38337 | ||||
38338 | SDValue Op = peekThroughBitcasts(SDValue(N, 0)); | |||
38339 | auto VT = Op->getValueType(0); | |||
38340 | if (auto SVOp = dyn_cast<ShuffleVectorSDNode>(Op.getNode())) { | |||
38341 | // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate | |||
38342 | // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here. | |||
38343 | if (!SVOp->getOperand(1).isUndef()) | |||
38344 | return SDValue(); | |||
38345 | if (SDValue NegOp0 = isFNEG(DAG, SVOp->getOperand(0).getNode())) | |||
38346 | return DAG.getVectorShuffle(VT, SDLoc(SVOp), NegOp0, DAG.getUNDEF(VT), | |||
38347 | SVOp->getMask()); | |||
38348 | return SDValue(); | |||
38349 | } | |||
38350 | unsigned Opc = Op.getOpcode(); | |||
38351 | if (Opc == ISD::INSERT_VECTOR_ELT) { | |||
38352 | // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF, | |||
38353 | // -V, INDEX). | |||
38354 | SDValue InsVector = Op.getOperand(0); | |||
38355 | SDValue InsVal = Op.getOperand(1); | |||
38356 | if (!InsVector.isUndef()) | |||
38357 | return SDValue(); | |||
38358 | if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode())) | |||
38359 | return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector, | |||
38360 | NegInsVal, Op.getOperand(2)); | |||
38361 | return SDValue(); | |||
38362 | } | |||
38363 | ||||
38364 | if (Opc != X86ISD::FXOR && Opc != ISD::XOR && Opc != ISD::FSUB) | |||
38365 | return SDValue(); | |||
38366 | ||||
38367 | SDValue Op1 = peekThroughBitcasts(Op.getOperand(1)); | |||
38368 | if (!Op1.getValueType().isFloatingPoint()) | |||
38369 | return SDValue(); | |||
38370 | ||||
38371 | SDValue Op0 = peekThroughBitcasts(Op.getOperand(0)); | |||
38372 | ||||
38373 | // For XOR and FXOR, we want to check if constant bits of Op1 are sign bit | |||
38374 | // masks. For FSUB, we have to check if constant bits of Op0 are sign bit | |||
38375 | // masks and hence we swap the operands. | |||
38376 | if (Opc == ISD::FSUB) | |||
38377 | std::swap(Op0, Op1); | |||
38378 | ||||
38379 | APInt UndefElts; | |||
38380 | SmallVector<APInt, 16> EltBits; | |||
38381 | // Extract constant bits and see if they are all sign bit masks. Ignore the | |||
38382 | // undef elements. | |||
38383 | if (getTargetConstantBitsFromNode(Op1, Op1.getScalarValueSizeInBits(), | |||
38384 | UndefElts, EltBits, | |||
38385 | /* AllowWholeUndefs */ true, | |||
38386 | /* AllowPartialUndefs */ false)) { | |||
38387 | for (unsigned I = 0, E = EltBits.size(); I < E; I++) | |||
38388 | if (!UndefElts[I] && !EltBits[I].isSignMask()) | |||
38389 | return SDValue(); | |||
38390 | ||||
38391 | return peekThroughBitcasts(Op0); | |||
38392 | } | |||
38393 | ||||
38394 | return SDValue(); | |||
38395 | } | |||
38396 | ||||
38397 | /// Do target-specific dag combines on floating point negations. | |||
38398 | static SDValue combineFneg(SDNode *N, SelectionDAG &DAG, | |||
38399 | const X86Subtarget &Subtarget) { | |||
38400 | EVT OrigVT = N->getValueType(0); | |||
38401 | SDValue Arg = isFNEG(DAG, N); | |||
38402 | if (!Arg) | |||
38403 | return SDValue(); | |||
38404 | ||||
38405 | EVT VT = Arg.getValueType(); | |||
38406 | EVT SVT = VT.getScalarType(); | |||
38407 | SDLoc DL(N); | |||
38408 | ||||
38409 | // Let legalize expand this if it isn't a legal type yet. | |||
38410 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
38411 | return SDValue(); | |||
38412 | ||||
38413 | // If we're negating a FMUL node on a target with FMA, then we can avoid the | |||
38414 | // use of a constant by performing (-0 - A*B) instead. | |||
38415 | // FIXME: Check rounding control flags as well once it becomes available. | |||
38416 | if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) && | |||
38417 | Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) { | |||
38418 | SDValue Zero = DAG.getConstantFP(0.0, DL, VT); | |||
38419 | SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0), | |||
38420 | Arg.getOperand(1), Zero); | |||
38421 | return DAG.getBitcast(OrigVT, NewNode); | |||
38422 | } | |||
38423 | ||||
38424 | // If we're negating an FMA node, then we can adjust the | |||
38425 | // instruction to include the extra negation. | |||
38426 | unsigned NewOpcode = 0; | |||
38427 | if (Arg.hasOneUse() && Subtarget.hasAnyFMA()) { | |||
38428 | switch (Arg.getOpcode()) { | |||
38429 | case ISD::FMA: NewOpcode = X86ISD::FNMSUB; break; | |||
38430 | case X86ISD::FMSUB: NewOpcode = X86ISD::FNMADD; break; | |||
38431 | case X86ISD::FNMADD: NewOpcode = X86ISD::FMSUB; break; | |||
38432 | case X86ISD::FNMSUB: NewOpcode = ISD::FMA; break; | |||
38433 | case X86ISD::FMADD_RND: NewOpcode = X86ISD::FNMSUB_RND; break; | |||
38434 | case X86ISD::FMSUB_RND: NewOpcode = X86ISD::FNMADD_RND; break; | |||
38435 | case X86ISD::FNMADD_RND: NewOpcode = X86ISD::FMSUB_RND; break; | |||
38436 | case X86ISD::FNMSUB_RND: NewOpcode = X86ISD::FMADD_RND; break; | |||
38437 | // We can't handle scalar intrinsic node here because it would only | |||
38438 | // invert one element and not the whole vector. But we could try to handle | |||
38439 | // a negation of the lower element only. | |||
38440 | } | |||
38441 | } | |||
38442 | if (NewOpcode) | |||
38443 | return DAG.getBitcast(OrigVT, DAG.getNode(NewOpcode, DL, VT, | |||
38444 | Arg.getNode()->ops())); | |||
38445 | ||||
38446 | return SDValue(); | |||
38447 | } | |||
38448 | ||||
38449 | static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG, | |||
38450 | const X86Subtarget &Subtarget) { | |||
38451 | MVT VT = N->getSimpleValueType(0); | |||
38452 | // If we have integer vector types available, use the integer opcodes. | |||
38453 | if (!VT.isVector() || !Subtarget.hasSSE2()) | |||
38454 | return SDValue(); | |||
38455 | ||||
38456 | SDLoc dl(N); | |||
38457 | ||||
38458 | unsigned IntBits = VT.getScalarSizeInBits(); | |||
38459 | MVT IntSVT = MVT::getIntegerVT(IntBits); | |||
38460 | MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits); | |||
38461 | ||||
38462 | SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0)); | |||
38463 | SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1)); | |||
38464 | unsigned IntOpcode; | |||
38465 | switch (N->getOpcode()) { | |||
38466 | default: llvm_unreachable("Unexpected FP logic op")::llvm::llvm_unreachable_internal("Unexpected FP logic op", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 38466); | |||
38467 | case X86ISD::FOR: IntOpcode = ISD::OR; break; | |||
38468 | case X86ISD::FXOR: IntOpcode = ISD::XOR; break; | |||
38469 | case X86ISD::FAND: IntOpcode = ISD::AND; break; | |||
38470 | case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break; | |||
38471 | } | |||
38472 | SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1); | |||
38473 | return DAG.getBitcast(VT, IntOp); | |||
38474 | } | |||
38475 | ||||
38476 | ||||
38477 | /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val) | |||
38478 | static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) { | |||
38479 | if (N->getOpcode() != ISD::XOR) | |||
38480 | return SDValue(); | |||
38481 | ||||
38482 | SDValue LHS = N->getOperand(0); | |||
38483 | auto *RHSC = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |||
38484 | if (!RHSC || RHSC->getZExtValue() != 1 || LHS->getOpcode() != X86ISD::SETCC) | |||
38485 | return SDValue(); | |||
38486 | ||||
38487 | X86::CondCode NewCC = X86::GetOppositeBranchCondition( | |||
38488 | X86::CondCode(LHS->getConstantOperandVal(0))); | |||
38489 | SDLoc DL(N); | |||
38490 | return getSETCC(NewCC, LHS->getOperand(1), DL, DAG); | |||
38491 | } | |||
38492 | ||||
38493 | static SDValue combineXor(SDNode *N, SelectionDAG &DAG, | |||
38494 | TargetLowering::DAGCombinerInfo &DCI, | |||
38495 | const X86Subtarget &Subtarget) { | |||
38496 | // If this is SSE1 only convert to FXOR to avoid scalarization. | |||
38497 | if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && | |||
38498 | N->getValueType(0) == MVT::v4i32) { | |||
38499 | return DAG.getBitcast( | |||
38500 | MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32, | |||
38501 | DAG.getBitcast(MVT::v4f32, N->getOperand(0)), | |||
38502 | DAG.getBitcast(MVT::v4f32, N->getOperand(1)))); | |||
38503 | } | |||
38504 | ||||
38505 | if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget)) | |||
38506 | return Cmp; | |||
38507 | ||||
38508 | if (DCI.isBeforeLegalizeOps()) | |||
38509 | return SDValue(); | |||
38510 | ||||
38511 | if (SDValue SetCC = foldXor1SetCC(N, DAG)) | |||
38512 | return SetCC; | |||
38513 | ||||
38514 | if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG)) | |||
38515 | return RV; | |||
38516 | ||||
38517 | if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget)) | |||
38518 | return FPLogic; | |||
38519 | ||||
38520 | return combineFneg(N, DAG, Subtarget); | |||
38521 | } | |||
38522 | ||||
38523 | static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG, | |||
38524 | TargetLowering::DAGCombinerInfo &DCI, | |||
38525 | const X86Subtarget &Subtarget) { | |||
38526 | SDValue Op0 = N->getOperand(0); | |||
38527 | SDValue Op1 = N->getOperand(1); | |||
38528 | EVT VT = N->getValueType(0); | |||
38529 | unsigned NumBits = VT.getSizeInBits(); | |||
38530 | ||||
38531 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
38532 | ||||
38533 | // TODO - Constant Folding. | |||
38534 | if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) { | |||
38535 | // Reduce Cst1 to the bottom 16-bits. | |||
38536 | // NOTE: SimplifyDemandedBits won't do this for constants. | |||
38537 | const APInt &Val1 = Cst1->getAPIntValue(); | |||
38538 | APInt MaskedVal1 = Val1 & 0xFFFF; | |||
38539 | if (MaskedVal1 != Val1) | |||
38540 | return DAG.getNode(X86ISD::BEXTR, SDLoc(N), VT, Op0, | |||
38541 | DAG.getConstant(MaskedVal1, SDLoc(N), VT)); | |||
38542 | } | |||
38543 | ||||
38544 | // Only bottom 16-bits of the control bits are required. | |||
38545 | APInt DemandedMask(APInt::getLowBitsSet(NumBits, 16)); | |||
38546 | if (TLI.SimplifyDemandedBits(Op1, DemandedMask, DCI)) | |||
38547 | return SDValue(N, 0); | |||
38548 | ||||
38549 | return SDValue(); | |||
38550 | } | |||
38551 | ||||
38552 | static bool isNullFPScalarOrVectorConst(SDValue V) { | |||
38553 | return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode()); | |||
38554 | } | |||
38555 | ||||
38556 | /// If a value is a scalar FP zero or a vector FP zero (potentially including | |||
38557 | /// undefined elements), return a zero constant that may be used to fold away | |||
38558 | /// that value. In the case of a vector, the returned constant will not contain | |||
38559 | /// undefined elements even if the input parameter does. This makes it suitable | |||
38560 | /// to be used as a replacement operand with operations (eg, bitwise-and) where | |||
38561 | /// an undef should not propagate. | |||
38562 | static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG, | |||
38563 | const X86Subtarget &Subtarget) { | |||
38564 | if (!isNullFPScalarOrVectorConst(V)) | |||
38565 | return SDValue(); | |||
38566 | ||||
38567 | if (V.getValueType().isVector()) | |||
38568 | return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V)); | |||
38569 | ||||
38570 | return V; | |||
38571 | } | |||
38572 | ||||
38573 | static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG, | |||
38574 | const X86Subtarget &Subtarget) { | |||
38575 | SDValue N0 = N->getOperand(0); | |||
38576 | SDValue N1 = N->getOperand(1); | |||
38577 | EVT VT = N->getValueType(0); | |||
38578 | SDLoc DL(N); | |||
38579 | ||||
38580 | // Vector types are handled in combineANDXORWithAllOnesIntoANDNP(). | |||
38581 | if (!((VT == MVT::f32 && Subtarget.hasSSE1()) || | |||
38582 | (VT == MVT::f64 && Subtarget.hasSSE2()) || | |||
38583 | (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2()))) | |||
38584 | return SDValue(); | |||
38585 | ||||
38586 | auto isAllOnesConstantFP = [](SDValue V) { | |||
38587 | if (V.getSimpleValueType().isVector()) | |||
38588 | return ISD::isBuildVectorAllOnes(V.getNode()); | |||
38589 | auto *C = dyn_cast<ConstantFPSDNode>(V); | |||
38590 | return C && C->getConstantFPValue()->isAllOnesValue(); | |||
38591 | }; | |||
38592 | ||||
38593 | // fand (fxor X, -1), Y --> fandn X, Y | |||
38594 | if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1))) | |||
38595 | return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1); | |||
38596 | ||||
38597 | // fand X, (fxor Y, -1) --> fandn Y, X | |||
38598 | if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1))) | |||
38599 | return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0); | |||
38600 | ||||
38601 | return SDValue(); | |||
38602 | } | |||
38603 | ||||
38604 | /// Do target-specific dag combines on X86ISD::FAND nodes. | |||
38605 | static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG, | |||
38606 | const X86Subtarget &Subtarget) { | |||
38607 | // FAND(0.0, x) -> 0.0 | |||
38608 | if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget)) | |||
38609 | return V; | |||
38610 | ||||
38611 | // FAND(x, 0.0) -> 0.0 | |||
38612 | if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget)) | |||
38613 | return V; | |||
38614 | ||||
38615 | if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget)) | |||
38616 | return V; | |||
38617 | ||||
38618 | return lowerX86FPLogicOp(N, DAG, Subtarget); | |||
38619 | } | |||
38620 | ||||
38621 | /// Do target-specific dag combines on X86ISD::FANDN nodes. | |||
38622 | static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG, | |||
38623 | const X86Subtarget &Subtarget) { | |||
38624 | // FANDN(0.0, x) -> x | |||
38625 | if (isNullFPScalarOrVectorConst(N->getOperand(0))) | |||
38626 | return N->getOperand(1); | |||
38627 | ||||
38628 | // FANDN(x, 0.0) -> 0.0 | |||
38629 | if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget)) | |||
38630 | return V; | |||
38631 | ||||
38632 | return lowerX86FPLogicOp(N, DAG, Subtarget); | |||
38633 | } | |||
38634 | ||||
38635 | /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes. | |||
38636 | static SDValue combineFOr(SDNode *N, SelectionDAG &DAG, | |||
38637 | const X86Subtarget &Subtarget) { | |||
38638 | assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR)((N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD ::FXOR) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 38638, __PRETTY_FUNCTION__)); | |||
38639 | ||||
38640 | // F[X]OR(0.0, x) -> x | |||
38641 | if (isNullFPScalarOrVectorConst(N->getOperand(0))) | |||
38642 | return N->getOperand(1); | |||
38643 | ||||
38644 | // F[X]OR(x, 0.0) -> x | |||
38645 | if (isNullFPScalarOrVectorConst(N->getOperand(1))) | |||
38646 | return N->getOperand(0); | |||
38647 | ||||
38648 | if (SDValue NewVal = combineFneg(N, DAG, Subtarget)) | |||
38649 | return NewVal; | |||
38650 | ||||
38651 | return lowerX86FPLogicOp(N, DAG, Subtarget); | |||
38652 | } | |||
38653 | ||||
38654 | /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes. | |||
38655 | static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) { | |||
38656 | assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX)((N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD ::FMAX) ? static_cast<void> (0) : __assert_fail ("N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 38656, __PRETTY_FUNCTION__)); | |||
38657 | ||||
38658 | // Only perform optimizations if UnsafeMath is used. | |||
38659 | if (!DAG.getTarget().Options.UnsafeFPMath) | |||
38660 | return SDValue(); | |||
38661 | ||||
38662 | // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes | |||
38663 | // into FMINC and FMAXC, which are Commutative operations. | |||
38664 | unsigned NewOp = 0; | |||
38665 | switch (N->getOpcode()) { | |||
38666 | default: llvm_unreachable("unknown opcode")::llvm::llvm_unreachable_internal("unknown opcode", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 38666); | |||
38667 | case X86ISD::FMIN: NewOp = X86ISD::FMINC; break; | |||
38668 | case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break; | |||
38669 | } | |||
38670 | ||||
38671 | return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0), | |||
38672 | N->getOperand(0), N->getOperand(1)); | |||
38673 | } | |||
38674 | ||||
38675 | static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG, | |||
38676 | const X86Subtarget &Subtarget) { | |||
38677 | if (Subtarget.useSoftFloat()) | |||
38678 | return SDValue(); | |||
38679 | ||||
38680 | // TODO: If an operand is already known to be a NaN or not a NaN, this | |||
38681 | // should be an optional swap and FMAX/FMIN. | |||
38682 | ||||
38683 | EVT VT = N->getValueType(0); | |||
38684 | if (!((Subtarget.hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) || | |||
38685 | (Subtarget.hasSSE2() && (VT == MVT::f64 || VT == MVT::v2f64)) || | |||
38686 | (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64)))) | |||
38687 | return SDValue(); | |||
38688 | ||||
38689 | SDValue Op0 = N->getOperand(0); | |||
38690 | SDValue Op1 = N->getOperand(1); | |||
38691 | SDLoc DL(N); | |||
38692 | auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN; | |||
38693 | ||||
38694 | // If we don't have to respect NaN inputs, this is a direct translation to x86 | |||
38695 | // min/max instructions. | |||
38696 | if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs()) | |||
38697 | return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags()); | |||
38698 | ||||
38699 | // If we have to respect NaN inputs, this takes at least 3 instructions. | |||
38700 | // Favor a library call when operating on a scalar and minimizing code size. | |||
38701 | if (!VT.isVector() && DAG.getMachineFunction().getFunction().optForMinSize()) | |||
38702 | return SDValue(); | |||
38703 | ||||
38704 | EVT SetCCType = DAG.getTargetLoweringInfo().getSetCCResultType( | |||
38705 | DAG.getDataLayout(), *DAG.getContext(), VT); | |||
38706 | ||||
38707 | // There are 4 possibilities involving NaN inputs, and these are the required | |||
38708 | // outputs: | |||
38709 | // Op1 | |||
38710 | // Num NaN | |||
38711 | // ---------------- | |||
38712 | // Num | Max | Op0 | | |||
38713 | // Op0 ---------------- | |||
38714 | // NaN | Op1 | NaN | | |||
38715 | // ---------------- | |||
38716 | // | |||
38717 | // The SSE FP max/min instructions were not designed for this case, but rather | |||
38718 | // to implement: | |||
38719 | // Min = Op1 < Op0 ? Op1 : Op0 | |||
38720 | // Max = Op1 > Op0 ? Op1 : Op0 | |||
38721 | // | |||
38722 | // So they always return Op0 if either input is a NaN. However, we can still | |||
38723 | // use those instructions for fmaxnum by selecting away a NaN input. | |||
38724 | ||||
38725 | // If either operand is NaN, the 2nd source operand (Op0) is passed through. | |||
38726 | SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0); | |||
38727 | SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO); | |||
38728 | ||||
38729 | // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands | |||
38730 | // are NaN, the NaN value of Op1 is the result. | |||
38731 | return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax); | |||
38732 | } | |||
38733 | ||||
38734 | static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG, | |||
38735 | TargetLowering::DAGCombinerInfo &DCI) { | |||
38736 | EVT VT = N->getValueType(0); | |||
38737 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
38738 | ||||
38739 | APInt KnownUndef, KnownZero; | |||
38740 | APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements()); | |||
38741 | if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef, | |||
38742 | KnownZero, DCI)) | |||
38743 | return SDValue(N, 0); | |||
38744 | ||||
38745 | return SDValue(); | |||
38746 | } | |||
38747 | ||||
38748 | /// Do target-specific dag combines on X86ISD::ANDNP nodes. | |||
38749 | static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG, | |||
38750 | TargetLowering::DAGCombinerInfo &DCI, | |||
38751 | const X86Subtarget &Subtarget) { | |||
38752 | MVT VT = N->getSimpleValueType(0); | |||
38753 | ||||
38754 | // ANDNP(0, x) -> x | |||
38755 | if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode())) | |||
38756 | return N->getOperand(1); | |||
38757 | ||||
38758 | // ANDNP(x, 0) -> 0 | |||
38759 | if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode())) | |||
38760 | return DAG.getConstant(0, SDLoc(N), VT); | |||
38761 | ||||
38762 | // Turn ANDNP back to AND if input is inverted. | |||
38763 | if (VT.isVector() && N->getOperand(0).getOpcode() == ISD::XOR && | |||
38764 | ISD::isBuildVectorAllOnes(N->getOperand(0).getOperand(1).getNode())) { | |||
38765 | return DAG.getNode(ISD::AND, SDLoc(N), VT, | |||
38766 | N->getOperand(0).getOperand(0), N->getOperand(1)); | |||
38767 | } | |||
38768 | ||||
38769 | // Attempt to recursively combine a bitmask ANDNP with shuffles. | |||
38770 | if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) { | |||
38771 | SDValue Op(N, 0); | |||
38772 | if (SDValue Res = combineX86ShufflesRecursively( | |||
38773 | {Op}, 0, Op, {0}, {}, /*Depth*/ 1, | |||
38774 | /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget)) | |||
38775 | return Res; | |||
38776 | } | |||
38777 | ||||
38778 | return SDValue(); | |||
38779 | } | |||
38780 | ||||
38781 | static SDValue combineBT(SDNode *N, SelectionDAG &DAG, | |||
38782 | TargetLowering::DAGCombinerInfo &DCI) { | |||
38783 | SDValue N0 = N->getOperand(0); | |||
38784 | SDValue N1 = N->getOperand(1); | |||
38785 | ||||
38786 | // BT ignores high bits in the bit index operand. | |||
38787 | unsigned BitWidth = N1.getValueSizeInBits(); | |||
38788 | APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth)); | |||
38789 | if (SDValue DemandedN1 = DAG.GetDemandedBits(N1, DemandedMask)) | |||
38790 | return DAG.getNode(X86ISD::BT, SDLoc(N), MVT::i32, N0, DemandedN1); | |||
38791 | ||||
38792 | return SDValue(); | |||
38793 | } | |||
38794 | ||||
38795 | // Try to combine sext_in_reg of a cmov of constants by extending the constants. | |||
38796 | static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) { | |||
38797 | EVT VT = N->getValueType(0); | |||
38798 | ||||
38799 | SDValue N0 = N->getOperand(0); | |||
38800 | SDValue N1 = N->getOperand(1); | |||
38801 | EVT ExtraVT = cast<VTSDNode>(N1)->getVT(); | |||
38802 | ||||
38803 | if (ExtraVT != MVT::i16) | |||
38804 | return SDValue(); | |||
38805 | ||||
38806 | // Look through single use any_extends. | |||
38807 | if (N0.getOpcode() == ISD::ANY_EXTEND && N0.hasOneUse()) | |||
38808 | N0 = N0.getOperand(0); | |||
38809 | ||||
38810 | // See if we have a single use cmov. | |||
38811 | if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse()) | |||
38812 | return SDValue(); | |||
38813 | ||||
38814 | SDValue CMovOp0 = N0.getOperand(0); | |||
38815 | SDValue CMovOp1 = N0.getOperand(1); | |||
38816 | ||||
38817 | // Make sure both operands are constants. | |||
38818 | if (!isa<ConstantSDNode>(CMovOp0.getNode()) || | |||
38819 | !isa<ConstantSDNode>(CMovOp1.getNode())) | |||
38820 | return SDValue(); | |||
38821 | ||||
38822 | SDLoc DL(N); | |||
38823 | ||||
38824 | // If we looked through an any_extend above, add one to the constants. | |||
38825 | if (N0.getValueType() != VT) { | |||
38826 | CMovOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, VT, CMovOp0); | |||
38827 | CMovOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, VT, CMovOp1); | |||
38828 | } | |||
38829 | ||||
38830 | CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, CMovOp0, N1); | |||
38831 | CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, CMovOp1, N1); | |||
38832 | ||||
38833 | return DAG.getNode(X86ISD::CMOV, DL, VT, CMovOp0, CMovOp1, | |||
38834 | N0.getOperand(2), N0.getOperand(3)); | |||
38835 | } | |||
38836 | ||||
38837 | static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG, | |||
38838 | const X86Subtarget &Subtarget) { | |||
38839 | if (SDValue V = combineSextInRegCmov(N, DAG)) | |||
38840 | return V; | |||
38841 | ||||
38842 | EVT VT = N->getValueType(0); | |||
38843 | SDValue N0 = N->getOperand(0); | |||
38844 | SDValue N1 = N->getOperand(1); | |||
38845 | EVT ExtraVT = cast<VTSDNode>(N1)->getVT(); | |||
38846 | SDLoc dl(N); | |||
38847 | ||||
38848 | // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the | |||
38849 | // both SSE and AVX2 since there is no sign-extended shift right | |||
38850 | // operation on a vector with 64-bit elements. | |||
38851 | //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) -> | |||
38852 | // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT))) | |||
38853 | if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND || | |||
38854 | N0.getOpcode() == ISD::SIGN_EXTEND)) { | |||
38855 | SDValue N00 = N0.getOperand(0); | |||
38856 | ||||
38857 | // EXTLOAD has a better solution on AVX2, | |||
38858 | // it may be replaced with X86ISD::VSEXT node. | |||
38859 | if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256()) | |||
38860 | if (!ISD::isNormalLoad(N00.getNode())) | |||
38861 | return SDValue(); | |||
38862 | ||||
38863 | if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) { | |||
38864 | SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, | |||
38865 | N00, N1); | |||
38866 | return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp); | |||
38867 | } | |||
38868 | } | |||
38869 | return SDValue(); | |||
38870 | } | |||
38871 | ||||
38872 | /// sext(add_nsw(x, C)) --> add(sext(x), C_sext) | |||
38873 | /// zext(add_nuw(x, C)) --> add(zext(x), C_zext) | |||
38874 | /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes | |||
38875 | /// opportunities to combine math ops, use an LEA, or use a complex addressing | |||
38876 | /// mode. This can eliminate extend, add, and shift instructions. | |||
38877 | static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG, | |||
38878 | const X86Subtarget &Subtarget) { | |||
38879 | if (Ext->getOpcode() != ISD::SIGN_EXTEND && | |||
38880 | Ext->getOpcode() != ISD::ZERO_EXTEND) | |||
38881 | return SDValue(); | |||
38882 | ||||
38883 | // TODO: This should be valid for other integer types. | |||
38884 | EVT VT = Ext->getValueType(0); | |||
38885 | if (VT != MVT::i64) | |||
38886 | return SDValue(); | |||
38887 | ||||
38888 | SDValue Add = Ext->getOperand(0); | |||
38889 | if (Add.getOpcode() != ISD::ADD) | |||
38890 | return SDValue(); | |||
38891 | ||||
38892 | bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND; | |||
38893 | bool NSW = Add->getFlags().hasNoSignedWrap(); | |||
38894 | bool NUW = Add->getFlags().hasNoUnsignedWrap(); | |||
38895 | ||||
38896 | // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding | |||
38897 | // into the 'zext' | |||
38898 | if ((Sext && !NSW) || (!Sext && !NUW)) | |||
38899 | return SDValue(); | |||
38900 | ||||
38901 | // Having a constant operand to the 'add' ensures that we are not increasing | |||
38902 | // the instruction count because the constant is extended for free below. | |||
38903 | // A constant operand can also become the displacement field of an LEA. | |||
38904 | auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1)); | |||
38905 | if (!AddOp1) | |||
38906 | return SDValue(); | |||
38907 | ||||
38908 | // Don't make the 'add' bigger if there's no hope of combining it with some | |||
38909 | // other 'add' or 'shl' instruction. | |||
38910 | // TODO: It may be profitable to generate simpler LEA instructions in place | |||
38911 | // of single 'add' instructions, but the cost model for selecting an LEA | |||
38912 | // currently has a high threshold. | |||
38913 | bool HasLEAPotential = false; | |||
38914 | for (auto *User : Ext->uses()) { | |||
38915 | if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) { | |||
38916 | HasLEAPotential = true; | |||
38917 | break; | |||
38918 | } | |||
38919 | } | |||
38920 | if (!HasLEAPotential) | |||
38921 | return SDValue(); | |||
38922 | ||||
38923 | // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'. | |||
38924 | int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue(); | |||
38925 | SDValue AddOp0 = Add.getOperand(0); | |||
38926 | SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0); | |||
38927 | SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT); | |||
38928 | ||||
38929 | // The wider add is guaranteed to not wrap because both operands are | |||
38930 | // sign-extended. | |||
38931 | SDNodeFlags Flags; | |||
38932 | Flags.setNoSignedWrap(NSW); | |||
38933 | Flags.setNoUnsignedWrap(NUW); | |||
38934 | return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags); | |||
38935 | } | |||
38936 | ||||
38937 | // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant | |||
38938 | // operands and the result of CMOV is not used anywhere else - promote CMOV | |||
38939 | // itself instead of promoting its result. This could be beneficial, because: | |||
38940 | // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two | |||
38941 | // (or more) pseudo-CMOVs only when they go one-after-another and | |||
38942 | // getting rid of result extension code after CMOV will help that. | |||
38943 | // 2) Promotion of constant CMOV arguments is free, hence the | |||
38944 | // {ANY,SIGN,ZERO}_EXTEND will just be deleted. | |||
38945 | // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this | |||
38946 | // promotion is also good in terms of code-size. | |||
38947 | // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit | |||
38948 | // promotion). | |||
38949 | static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) { | |||
38950 | SDValue CMovN = Extend->getOperand(0); | |||
38951 | if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse()) | |||
38952 | return SDValue(); | |||
38953 | ||||
38954 | EVT TargetVT = Extend->getValueType(0); | |||
38955 | unsigned ExtendOpcode = Extend->getOpcode(); | |||
38956 | SDLoc DL(Extend); | |||
38957 | ||||
38958 | EVT VT = CMovN.getValueType(); | |||
38959 | SDValue CMovOp0 = CMovN.getOperand(0); | |||
38960 | SDValue CMovOp1 = CMovN.getOperand(1); | |||
38961 | ||||
38962 | if (!isa<ConstantSDNode>(CMovOp0.getNode()) || | |||
38963 | !isa<ConstantSDNode>(CMovOp1.getNode())) | |||
38964 | return SDValue(); | |||
38965 | ||||
38966 | // Only extend to i32 or i64. | |||
38967 | if (TargetVT != MVT::i32 && TargetVT != MVT::i64) | |||
38968 | return SDValue(); | |||
38969 | ||||
38970 | // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32 | |||
38971 | // are free. | |||
38972 | if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32)) | |||
38973 | return SDValue(); | |||
38974 | ||||
38975 | // If this a zero extend to i64, we should only extend to i32 and use a free | |||
38976 | // zero extend to finish. | |||
38977 | EVT ExtendVT = TargetVT; | |||
38978 | if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND) | |||
38979 | ExtendVT = MVT::i32; | |||
38980 | ||||
38981 | CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0); | |||
38982 | CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1); | |||
38983 | ||||
38984 | SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1, | |||
38985 | CMovN.getOperand(2), CMovN.getOperand(3)); | |||
38986 | ||||
38987 | // Finish extending if needed. | |||
38988 | if (ExtendVT != TargetVT) | |||
38989 | Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res); | |||
38990 | ||||
38991 | return Res; | |||
38992 | } | |||
38993 | ||||
38994 | // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)). | |||
38995 | // This is more or less the reverse of combineBitcastvxi1. | |||
38996 | static SDValue | |||
38997 | combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG, | |||
38998 | TargetLowering::DAGCombinerInfo &DCI, | |||
38999 | const X86Subtarget &Subtarget) { | |||
39000 | unsigned Opcode = N->getOpcode(); | |||
39001 | if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND && | |||
39002 | Opcode != ISD::ANY_EXTEND) | |||
39003 | return SDValue(); | |||
39004 | if (!DCI.isBeforeLegalizeOps()) | |||
39005 | return SDValue(); | |||
39006 | if (!Subtarget.hasSSE2() || Subtarget.hasAVX512()) | |||
39007 | return SDValue(); | |||
39008 | ||||
39009 | SDValue N0 = N->getOperand(0); | |||
39010 | EVT VT = N->getValueType(0); | |||
39011 | EVT SVT = VT.getScalarType(); | |||
39012 | EVT InSVT = N0.getValueType().getScalarType(); | |||
39013 | unsigned EltSizeInBits = SVT.getSizeInBits(); | |||
39014 | ||||
39015 | // Input type must be extending a bool vector (bit-casted from a scalar | |||
39016 | // integer) to legal integer types. | |||
39017 | if (!VT.isVector()) | |||
39018 | return SDValue(); | |||
39019 | if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8) | |||
39020 | return SDValue(); | |||
39021 | if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST) | |||
39022 | return SDValue(); | |||
39023 | ||||
39024 | SDValue N00 = N0.getOperand(0); | |||
39025 | EVT SclVT = N0.getOperand(0).getValueType(); | |||
39026 | if (!SclVT.isScalarInteger()) | |||
39027 | return SDValue(); | |||
39028 | ||||
39029 | SDLoc DL(N); | |||
39030 | SDValue Vec; | |||
39031 | SmallVector<int, 32> ShuffleMask; | |||
39032 | unsigned NumElts = VT.getVectorNumElements(); | |||
39033 | assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size")((NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size" ) ? static_cast<void> (0) : __assert_fail ("NumElts == SclVT.getSizeInBits() && \"Unexpected bool vector size\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39033, __PRETTY_FUNCTION__)); | |||
39034 | ||||
39035 | // Broadcast the scalar integer to the vector elements. | |||
39036 | if (NumElts > EltSizeInBits) { | |||
39037 | // If the scalar integer is greater than the vector element size, then we | |||
39038 | // must split it down into sub-sections for broadcasting. For example: | |||
39039 | // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections. | |||
39040 | // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections. | |||
39041 | assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale")(((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale" ) ? static_cast<void> (0) : __assert_fail ("(NumElts % EltSizeInBits) == 0 && \"Unexpected integer scale\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39041, __PRETTY_FUNCTION__)); | |||
39042 | unsigned Scale = NumElts / EltSizeInBits; | |||
39043 | EVT BroadcastVT = | |||
39044 | EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits); | |||
39045 | Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00); | |||
39046 | Vec = DAG.getBitcast(VT, Vec); | |||
39047 | ||||
39048 | for (unsigned i = 0; i != Scale; ++i) | |||
39049 | ShuffleMask.append(EltSizeInBits, i); | |||
39050 | } else { | |||
39051 | // For smaller scalar integers, we can simply any-extend it to the vector | |||
39052 | // element size (we don't care about the upper bits) and broadcast it to all | |||
39053 | // elements. | |||
39054 | SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT); | |||
39055 | Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl); | |||
39056 | ShuffleMask.append(NumElts, 0); | |||
39057 | } | |||
39058 | Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask); | |||
39059 | ||||
39060 | // Now, mask the relevant bit in each element. | |||
39061 | SmallVector<SDValue, 32> Bits; | |||
39062 | for (unsigned i = 0; i != NumElts; ++i) { | |||
39063 | int BitIdx = (i % EltSizeInBits); | |||
39064 | APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1); | |||
39065 | Bits.push_back(DAG.getConstant(Bit, DL, SVT)); | |||
39066 | } | |||
39067 | SDValue BitMask = DAG.getBuildVector(VT, DL, Bits); | |||
39068 | Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask); | |||
39069 | ||||
39070 | // Compare against the bitmask and extend the result. | |||
39071 | EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts); | |||
39072 | Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ); | |||
39073 | Vec = DAG.getSExtOrTrunc(Vec, DL, VT); | |||
39074 | ||||
39075 | // For SEXT, this is now done, otherwise shift the result down for | |||
39076 | // zero-extension. | |||
39077 | if (Opcode == ISD::SIGN_EXTEND) | |||
39078 | return Vec; | |||
39079 | return DAG.getNode(ISD::SRL, DL, VT, Vec, | |||
39080 | DAG.getConstant(EltSizeInBits - 1, DL, VT)); | |||
39081 | } | |||
39082 | ||||
39083 | /// Convert a SEXT or ZEXT of a vector to a SIGN_EXTEND_VECTOR_INREG or | |||
39084 | /// ZERO_EXTEND_VECTOR_INREG, this requires the splitting (or concatenating | |||
39085 | /// with UNDEFs) of the input to vectors of the same size as the target type | |||
39086 | /// which then extends the lowest elements. | |||
39087 | static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG, | |||
39088 | TargetLowering::DAGCombinerInfo &DCI, | |||
39089 | const X86Subtarget &Subtarget) { | |||
39090 | if (ExperimentalVectorWideningLegalization) | |||
39091 | return SDValue(); | |||
39092 | ||||
39093 | unsigned Opcode = N->getOpcode(); | |||
39094 | if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND) | |||
39095 | return SDValue(); | |||
39096 | if (!DCI.isBeforeLegalizeOps()) | |||
39097 | return SDValue(); | |||
39098 | if (!Subtarget.hasSSE2()) | |||
39099 | return SDValue(); | |||
39100 | ||||
39101 | SDValue N0 = N->getOperand(0); | |||
39102 | EVT VT = N->getValueType(0); | |||
39103 | EVT SVT = VT.getScalarType(); | |||
39104 | EVT InVT = N0.getValueType(); | |||
39105 | EVT InSVT = InVT.getScalarType(); | |||
39106 | ||||
39107 | // FIXME: Generic DAGCombiner previously had a bug that would cause a | |||
39108 | // sign_extend of setcc to sometimes return the original node and tricked it | |||
39109 | // into thinking CombineTo was used which prevented the target combines from | |||
39110 | // running. | |||
39111 | // Earlying out here to avoid regressions like this | |||
39112 | // (v4i32 (sext (v4i1 (setcc (v4i16))))) | |||
39113 | // Becomes | |||
39114 | // (v4i32 (sext_invec (v8i16 (concat (v4i16 (setcc (v4i16))), undef)))) | |||
39115 | // Type legalized to | |||
39116 | // (v4i32 (sext_invec (v8i16 (trunc_invec (v4i32 (setcc (v4i32))))))) | |||
39117 | // Leading to a packssdw+pmovsxwd | |||
39118 | // We could write a DAG combine to fix this, but really we shouldn't be | |||
39119 | // creating sext_invec that's forcing v8i16 into the DAG. | |||
39120 | if (N0.getOpcode() == ISD::SETCC) | |||
39121 | return SDValue(); | |||
39122 | ||||
39123 | // Input type must be a vector and we must be extending legal integer types. | |||
39124 | if (!VT.isVector() || VT.getVectorNumElements() < 2) | |||
39125 | return SDValue(); | |||
39126 | if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16) | |||
39127 | return SDValue(); | |||
39128 | if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8) | |||
39129 | return SDValue(); | |||
39130 | ||||
39131 | // If the input/output types are both legal then we have at least AVX1 and | |||
39132 | // we will be able to use SIGN_EXTEND/ZERO_EXTEND directly. | |||
39133 | if (DAG.getTargetLoweringInfo().isTypeLegal(VT) && | |||
39134 | DAG.getTargetLoweringInfo().isTypeLegal(InVT)) | |||
39135 | return SDValue(); | |||
39136 | ||||
39137 | SDLoc DL(N); | |||
39138 | ||||
39139 | auto ExtendVecSize = [&DAG](const SDLoc &DL, SDValue N, unsigned Size) { | |||
39140 | EVT InVT = N.getValueType(); | |||
39141 | EVT OutVT = EVT::getVectorVT(*DAG.getContext(), InVT.getScalarType(), | |||
39142 | Size / InVT.getScalarSizeInBits()); | |||
39143 | SmallVector<SDValue, 8> Opnds(Size / InVT.getSizeInBits(), | |||
39144 | DAG.getUNDEF(InVT)); | |||
39145 | Opnds[0] = N; | |||
39146 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Opnds); | |||
39147 | }; | |||
39148 | ||||
39149 | // If target-size is less than 128-bits, extend to a type that would extend | |||
39150 | // to 128 bits, extend that and extract the original target vector. | |||
39151 | if (VT.getSizeInBits() < 128 && !(128 % VT.getSizeInBits())) { | |||
39152 | unsigned Scale = 128 / VT.getSizeInBits(); | |||
39153 | EVT ExVT = | |||
39154 | EVT::getVectorVT(*DAG.getContext(), SVT, 128 / SVT.getSizeInBits()); | |||
39155 | SDValue Ex = ExtendVecSize(DL, N0, Scale * InVT.getSizeInBits()); | |||
39156 | SDValue SExt = DAG.getNode(Opcode, DL, ExVT, Ex); | |||
39157 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SExt, | |||
39158 | DAG.getIntPtrConstant(0, DL)); | |||
39159 | } | |||
39160 | ||||
39161 | // If target-size is 128-bits (or 256-bits on AVX target), then convert to | |||
39162 | // ISD::*_EXTEND_VECTOR_INREG which ensures lowering to X86ISD::V*EXT. | |||
39163 | // Also use this if we don't have SSE41 to allow the legalizer do its job. | |||
39164 | if (!Subtarget.hasSSE41() || VT.is128BitVector() || | |||
39165 | (VT.is256BitVector() && Subtarget.hasAVX()) || | |||
39166 | (VT.is512BitVector() && Subtarget.useAVX512Regs())) { | |||
39167 | SDValue ExOp = ExtendVecSize(DL, N0, VT.getSizeInBits()); | |||
39168 | Opcode = Opcode == ISD::SIGN_EXTEND ? ISD::SIGN_EXTEND_VECTOR_INREG | |||
39169 | : ISD::ZERO_EXTEND_VECTOR_INREG; | |||
39170 | return DAG.getNode(Opcode, DL, VT, ExOp); | |||
39171 | } | |||
39172 | ||||
39173 | auto SplitAndExtendInReg = [&](unsigned SplitSize) { | |||
39174 | unsigned NumVecs = VT.getSizeInBits() / SplitSize; | |||
39175 | unsigned NumSubElts = SplitSize / SVT.getSizeInBits(); | |||
39176 | EVT SubVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumSubElts); | |||
39177 | EVT InSubVT = EVT::getVectorVT(*DAG.getContext(), InSVT, NumSubElts); | |||
39178 | ||||
39179 | unsigned IROpc = Opcode == ISD::SIGN_EXTEND ? ISD::SIGN_EXTEND_VECTOR_INREG | |||
39180 | : ISD::ZERO_EXTEND_VECTOR_INREG; | |||
39181 | ||||
39182 | SmallVector<SDValue, 8> Opnds; | |||
39183 | for (unsigned i = 0, Offset = 0; i != NumVecs; ++i, Offset += NumSubElts) { | |||
39184 | SDValue SrcVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InSubVT, N0, | |||
39185 | DAG.getIntPtrConstant(Offset, DL)); | |||
39186 | SrcVec = ExtendVecSize(DL, SrcVec, SplitSize); | |||
39187 | SrcVec = DAG.getNode(IROpc, DL, SubVT, SrcVec); | |||
39188 | Opnds.push_back(SrcVec); | |||
39189 | } | |||
39190 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Opnds); | |||
39191 | }; | |||
39192 | ||||
39193 | // On pre-AVX targets, split into 128-bit nodes of | |||
39194 | // ISD::*_EXTEND_VECTOR_INREG. | |||
39195 | if (!Subtarget.hasAVX() && !(VT.getSizeInBits() % 128)) | |||
39196 | return SplitAndExtendInReg(128); | |||
39197 | ||||
39198 | // On pre-AVX512 targets, split into 256-bit nodes of | |||
39199 | // ISD::*_EXTEND_VECTOR_INREG. | |||
39200 | if (!Subtarget.useAVX512Regs() && !(VT.getSizeInBits() % 256)) | |||
39201 | return SplitAndExtendInReg(256); | |||
39202 | ||||
39203 | return SDValue(); | |||
39204 | } | |||
39205 | ||||
39206 | // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm | |||
39207 | // result type. | |||
39208 | static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG, | |||
39209 | const X86Subtarget &Subtarget) { | |||
39210 | SDValue N0 = N->getOperand(0); | |||
39211 | EVT VT = N->getValueType(0); | |||
39212 | SDLoc dl(N); | |||
39213 | ||||
39214 | // Only do this combine with AVX512 for vector extends. | |||
39215 | if (!Subtarget.hasAVX512() || !VT.isVector() || N0->getOpcode() != ISD::SETCC) | |||
39216 | return SDValue(); | |||
39217 | ||||
39218 | // Only combine legal element types. | |||
39219 | EVT SVT = VT.getVectorElementType(); | |||
39220 | if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 && | |||
39221 | SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64) | |||
39222 | return SDValue(); | |||
39223 | ||||
39224 | // We can only do this if the vector size in 256 bits or less. | |||
39225 | unsigned Size = VT.getSizeInBits(); | |||
39226 | if (Size > 256) | |||
39227 | return SDValue(); | |||
39228 | ||||
39229 | // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since | |||
39230 | // that's the only integer compares with we have. | |||
39231 | ISD::CondCode CC = cast<CondCodeSDNode>(N0->getOperand(2))->get(); | |||
39232 | if (ISD::isUnsignedIntSetCC(CC)) | |||
39233 | return SDValue(); | |||
39234 | ||||
39235 | // Only do this combine if the extension will be fully consumed by the setcc. | |||
39236 | EVT N00VT = N0.getOperand(0).getValueType(); | |||
39237 | EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger(); | |||
39238 | if (Size != MatchingVecType.getSizeInBits()) | |||
39239 | return SDValue(); | |||
39240 | ||||
39241 | SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); | |||
39242 | ||||
39243 | if (N->getOpcode() == ISD::ZERO_EXTEND) | |||
39244 | Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType().getScalarType()); | |||
39245 | ||||
39246 | return Res; | |||
39247 | } | |||
39248 | ||||
39249 | static SDValue combineSext(SDNode *N, SelectionDAG &DAG, | |||
39250 | TargetLowering::DAGCombinerInfo &DCI, | |||
39251 | const X86Subtarget &Subtarget) { | |||
39252 | SDValue N0 = N->getOperand(0); | |||
39253 | EVT VT = N->getValueType(0); | |||
39254 | EVT InVT = N0.getValueType(); | |||
39255 | SDLoc DL(N); | |||
39256 | ||||
39257 | if (SDValue NewCMov = combineToExtendCMOV(N, DAG)) | |||
39258 | return NewCMov; | |||
39259 | ||||
39260 | if (!DCI.isBeforeLegalizeOps()) | |||
39261 | return SDValue(); | |||
39262 | ||||
39263 | if (SDValue V = combineExtSetcc(N, DAG, Subtarget)) | |||
39264 | return V; | |||
39265 | ||||
39266 | if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR && | |||
39267 | isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) { | |||
39268 | // Invert and sign-extend a boolean is the same as zero-extend and subtract | |||
39269 | // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently | |||
39270 | // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1. | |||
39271 | // sext (xor Bool, -1) --> sub (zext Bool), 1 | |||
39272 | SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)); | |||
39273 | return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT)); | |||
39274 | } | |||
39275 | ||||
39276 | if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget)) | |||
39277 | return V; | |||
39278 | ||||
39279 | if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget)) | |||
39280 | return V; | |||
39281 | ||||
39282 | if (VT.isVector()) | |||
39283 | if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget)) | |||
39284 | return R; | |||
39285 | ||||
39286 | if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget)) | |||
39287 | return NewAdd; | |||
39288 | ||||
39289 | return SDValue(); | |||
39290 | } | |||
39291 | ||||
39292 | static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc) { | |||
39293 | if (NegMul) { | |||
39294 | switch (Opcode) { | |||
39295 | default: llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39295); | |||
39296 | case ISD::FMA: Opcode = X86ISD::FNMADD; break; | |||
39297 | case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break; | |||
39298 | case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break; | |||
39299 | case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break; | |||
39300 | case X86ISD::FNMADD: Opcode = ISD::FMA; break; | |||
39301 | case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break; | |||
39302 | case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break; | |||
39303 | case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break; | |||
39304 | } | |||
39305 | } | |||
39306 | ||||
39307 | if (NegAcc) { | |||
39308 | switch (Opcode) { | |||
39309 | default: llvm_unreachable("Unexpected opcode")::llvm::llvm_unreachable_internal("Unexpected opcode", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39309); | |||
39310 | case ISD::FMA: Opcode = X86ISD::FMSUB; break; | |||
39311 | case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break; | |||
39312 | case X86ISD::FMSUB: Opcode = ISD::FMA; break; | |||
39313 | case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break; | |||
39314 | case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break; | |||
39315 | case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break; | |||
39316 | case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break; | |||
39317 | case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break; | |||
39318 | } | |||
39319 | } | |||
39320 | ||||
39321 | return Opcode; | |||
39322 | } | |||
39323 | ||||
39324 | static SDValue combineFMA(SDNode *N, SelectionDAG &DAG, | |||
39325 | const X86Subtarget &Subtarget) { | |||
39326 | SDLoc dl(N); | |||
39327 | EVT VT = N->getValueType(0); | |||
39328 | ||||
39329 | // Let legalize expand this if it isn't a legal type yet. | |||
39330 | if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) | |||
39331 | return SDValue(); | |||
39332 | ||||
39333 | EVT ScalarVT = VT.getScalarType(); | |||
39334 | if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA()) | |||
39335 | return SDValue(); | |||
39336 | ||||
39337 | SDValue A = N->getOperand(0); | |||
39338 | SDValue B = N->getOperand(1); | |||
39339 | SDValue C = N->getOperand(2); | |||
39340 | ||||
39341 | auto invertIfNegative = [&DAG](SDValue &V) { | |||
39342 | if (SDValue NegVal = isFNEG(DAG, V.getNode())) { | |||
39343 | V = DAG.getBitcast(V.getValueType(), NegVal); | |||
39344 | return true; | |||
39345 | } | |||
39346 | // Look through extract_vector_elts. If it comes from an FNEG, create a | |||
39347 | // new extract from the FNEG input. | |||
39348 | if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT && | |||
39349 | isNullConstant(V.getOperand(1))) { | |||
39350 | if (SDValue NegVal = isFNEG(DAG, V.getOperand(0).getNode())) { | |||
39351 | NegVal = DAG.getBitcast(V.getOperand(0).getValueType(), NegVal); | |||
39352 | V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(), | |||
39353 | NegVal, V.getOperand(1)); | |||
39354 | return true; | |||
39355 | } | |||
39356 | } | |||
39357 | ||||
39358 | return false; | |||
39359 | }; | |||
39360 | ||||
39361 | // Do not convert the passthru input of scalar intrinsics. | |||
39362 | // FIXME: We could allow negations of the lower element only. | |||
39363 | bool NegA = invertIfNegative(A); | |||
39364 | bool NegB = invertIfNegative(B); | |||
39365 | bool NegC = invertIfNegative(C); | |||
39366 | ||||
39367 | if (!NegA && !NegB && !NegC) | |||
39368 | return SDValue(); | |||
39369 | ||||
39370 | unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC); | |||
39371 | ||||
39372 | if (N->getNumOperands() == 4) | |||
39373 | return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3)); | |||
39374 | return DAG.getNode(NewOpcode, dl, VT, A, B, C); | |||
39375 | } | |||
39376 | ||||
39377 | // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C) | |||
39378 | static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG, | |||
39379 | const X86Subtarget &Subtarget) { | |||
39380 | SDLoc dl(N); | |||
39381 | EVT VT = N->getValueType(0); | |||
39382 | ||||
39383 | SDValue NegVal = isFNEG(DAG, N->getOperand(2).getNode()); | |||
39384 | if (!NegVal) | |||
39385 | return SDValue(); | |||
39386 | ||||
39387 | unsigned NewOpcode; | |||
39388 | switch (N->getOpcode()) { | |||
39389 | default: llvm_unreachable("Unexpected opcode!")::llvm::llvm_unreachable_internal("Unexpected opcode!", "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39389); | |||
39390 | case X86ISD::FMADDSUB: NewOpcode = X86ISD::FMSUBADD; break; | |||
39391 | case X86ISD::FMADDSUB_RND: NewOpcode = X86ISD::FMSUBADD_RND; break; | |||
39392 | case X86ISD::FMSUBADD: NewOpcode = X86ISD::FMADDSUB; break; | |||
39393 | case X86ISD::FMSUBADD_RND: NewOpcode = X86ISD::FMADDSUB_RND; break; | |||
39394 | } | |||
39395 | ||||
39396 | if (N->getNumOperands() == 4) | |||
39397 | return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1), | |||
39398 | NegVal, N->getOperand(3)); | |||
39399 | return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1), | |||
39400 | NegVal); | |||
39401 | } | |||
39402 | ||||
39403 | static SDValue combineZext(SDNode *N, SelectionDAG &DAG, | |||
39404 | TargetLowering::DAGCombinerInfo &DCI, | |||
39405 | const X86Subtarget &Subtarget) { | |||
39406 | // (i32 zext (and (i8 x86isd::setcc_carry), 1)) -> | |||
39407 | // (and (i32 x86isd::setcc_carry), 1) | |||
39408 | // This eliminates the zext. This transformation is necessary because | |||
39409 | // ISD::SETCC is always legalized to i8. | |||
39410 | SDLoc dl(N); | |||
39411 | SDValue N0 = N->getOperand(0); | |||
39412 | EVT VT = N->getValueType(0); | |||
39413 | ||||
39414 | if (N0.getOpcode() == ISD::AND && | |||
39415 | N0.hasOneUse() && | |||
39416 | N0.getOperand(0).hasOneUse()) { | |||
39417 | SDValue N00 = N0.getOperand(0); | |||
39418 | if (N00.getOpcode() == X86ISD::SETCC_CARRY) { | |||
39419 | if (!isOneConstant(N0.getOperand(1))) | |||
39420 | return SDValue(); | |||
39421 | return DAG.getNode(ISD::AND, dl, VT, | |||
39422 | DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, | |||
39423 | N00.getOperand(0), N00.getOperand(1)), | |||
39424 | DAG.getConstant(1, dl, VT)); | |||
39425 | } | |||
39426 | } | |||
39427 | ||||
39428 | if (N0.getOpcode() == ISD::TRUNCATE && | |||
39429 | N0.hasOneUse() && | |||
39430 | N0.getOperand(0).hasOneUse()) { | |||
39431 | SDValue N00 = N0.getOperand(0); | |||
39432 | if (N00.getOpcode() == X86ISD::SETCC_CARRY) { | |||
39433 | return DAG.getNode(ISD::AND, dl, VT, | |||
39434 | DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, | |||
39435 | N00.getOperand(0), N00.getOperand(1)), | |||
39436 | DAG.getConstant(1, dl, VT)); | |||
39437 | } | |||
39438 | } | |||
39439 | ||||
39440 | if (SDValue NewCMov = combineToExtendCMOV(N, DAG)) | |||
39441 | return NewCMov; | |||
39442 | ||||
39443 | if (DCI.isBeforeLegalizeOps()) | |||
39444 | if (SDValue V = combineExtSetcc(N, DAG, Subtarget)) | |||
39445 | return V; | |||
39446 | ||||
39447 | if (SDValue V = combineToExtendVectorInReg(N, DAG, DCI, Subtarget)) | |||
39448 | return V; | |||
39449 | ||||
39450 | if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget)) | |||
39451 | return V; | |||
39452 | ||||
39453 | if (VT.isVector()) | |||
39454 | if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget)) | |||
39455 | return R; | |||
39456 | ||||
39457 | if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget)) | |||
39458 | return NewAdd; | |||
39459 | ||||
39460 | if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget)) | |||
39461 | return R; | |||
39462 | ||||
39463 | return SDValue(); | |||
39464 | } | |||
39465 | ||||
39466 | /// Try to map a 128-bit or larger integer comparison to vector instructions | |||
39467 | /// before type legalization splits it up into chunks. | |||
39468 | static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG, | |||
39469 | const X86Subtarget &Subtarget) { | |||
39470 | ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get(); | |||
39471 | assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate")(((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate" ) ? static_cast<void> (0) : __assert_fail ("(CC == ISD::SETNE || CC == ISD::SETEQ) && \"Bad comparison predicate\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39471, __PRETTY_FUNCTION__)); | |||
39472 | ||||
39473 | // We're looking for an oversized integer equality comparison. | |||
39474 | SDValue X = SetCC->getOperand(0); | |||
39475 | SDValue Y = SetCC->getOperand(1); | |||
39476 | EVT OpVT = X.getValueType(); | |||
39477 | unsigned OpSize = OpVT.getSizeInBits(); | |||
39478 | if (!OpVT.isScalarInteger() || OpSize < 128) | |||
39479 | return SDValue(); | |||
39480 | ||||
39481 | // Ignore a comparison with zero because that gets special treatment in | |||
39482 | // EmitTest(). But make an exception for the special case of a pair of | |||
39483 | // logically-combined vector-sized operands compared to zero. This pattern may | |||
39484 | // be generated by the memcmp expansion pass with oversized integer compares | |||
39485 | // (see PR33325). | |||
39486 | bool IsOrXorXorCCZero = isNullConstant(Y) && X.getOpcode() == ISD::OR && | |||
39487 | X.getOperand(0).getOpcode() == ISD::XOR && | |||
39488 | X.getOperand(1).getOpcode() == ISD::XOR; | |||
39489 | if (isNullConstant(Y) && !IsOrXorXorCCZero) | |||
39490 | return SDValue(); | |||
39491 | ||||
39492 | // Bail out if we know that this is not really just an oversized integer. | |||
39493 | if (peekThroughBitcasts(X).getValueType() == MVT::f128 || | |||
39494 | peekThroughBitcasts(Y).getValueType() == MVT::f128) | |||
39495 | return SDValue(); | |||
39496 | ||||
39497 | // TODO: Use PXOR + PTEST for SSE4.1 or later? | |||
39498 | EVT VT = SetCC->getValueType(0); | |||
39499 | SDLoc DL(SetCC); | |||
39500 | if ((OpSize == 128 && Subtarget.hasSSE2()) || | |||
39501 | (OpSize == 256 && Subtarget.hasAVX2()) || | |||
39502 | (OpSize == 512 && Subtarget.useAVX512Regs())) { | |||
39503 | EVT VecVT = OpSize == 512 ? MVT::v16i32 : | |||
39504 | OpSize == 256 ? MVT::v32i8 : | |||
39505 | MVT::v16i8; | |||
39506 | EVT CmpVT = OpSize == 512 ? MVT::v16i1 : VecVT; | |||
39507 | SDValue Cmp; | |||
39508 | if (IsOrXorXorCCZero) { | |||
39509 | // This is a bitwise-combined equality comparison of 2 pairs of vectors: | |||
39510 | // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne | |||
39511 | // Use 2 vector equality compares and 'and' the results before doing a | |||
39512 | // MOVMSK. | |||
39513 | SDValue A = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(0)); | |||
39514 | SDValue B = DAG.getBitcast(VecVT, X.getOperand(0).getOperand(1)); | |||
39515 | SDValue C = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(0)); | |||
39516 | SDValue D = DAG.getBitcast(VecVT, X.getOperand(1).getOperand(1)); | |||
39517 | SDValue Cmp1 = DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ); | |||
39518 | SDValue Cmp2 = DAG.getSetCC(DL, CmpVT, C, D, ISD::SETEQ); | |||
39519 | Cmp = DAG.getNode(ISD::AND, DL, CmpVT, Cmp1, Cmp2); | |||
39520 | } else { | |||
39521 | SDValue VecX = DAG.getBitcast(VecVT, X); | |||
39522 | SDValue VecY = DAG.getBitcast(VecVT, Y); | |||
39523 | Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ); | |||
39524 | } | |||
39525 | // For 512-bits we want to emit a setcc that will lower to kortest. | |||
39526 | if (OpSize == 512) | |||
39527 | return DAG.getSetCC(DL, VT, DAG.getBitcast(MVT::i16, Cmp), | |||
39528 | DAG.getConstant(0xFFFF, DL, MVT::i16), CC); | |||
39529 | // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality. | |||
39530 | // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq | |||
39531 | // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne | |||
39532 | // setcc i256 X, Y, eq --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, eq | |||
39533 | // setcc i256 X, Y, ne --> setcc (vpmovmskb (vpcmpeqb X, Y)), 0xFFFFFFFF, ne | |||
39534 | SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp); | |||
39535 | SDValue FFFFs = DAG.getConstant(OpSize == 128 ? 0xFFFF : 0xFFFFFFFF, DL, | |||
39536 | MVT::i32); | |||
39537 | return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC); | |||
39538 | } | |||
39539 | ||||
39540 | return SDValue(); | |||
39541 | } | |||
39542 | ||||
39543 | static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG, | |||
39544 | const X86Subtarget &Subtarget) { | |||
39545 | ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); | |||
39546 | SDValue LHS = N->getOperand(0); | |||
39547 | SDValue RHS = N->getOperand(1); | |||
39548 | EVT VT = N->getValueType(0); | |||
39549 | EVT OpVT = LHS.getValueType(); | |||
39550 | SDLoc DL(N); | |||
39551 | ||||
39552 | if (CC == ISD::SETNE || CC == ISD::SETEQ) { | |||
39553 | // 0-x == y --> x+y == 0 | |||
39554 | // 0-x != y --> x+y != 0 | |||
39555 | if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) && | |||
39556 | LHS.hasOneUse()) { | |||
39557 | SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, RHS, LHS.getOperand(1)); | |||
39558 | return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); | |||
39559 | } | |||
39560 | // x == 0-y --> x+y == 0 | |||
39561 | // x != 0-y --> x+y != 0 | |||
39562 | if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && | |||
39563 | RHS.hasOneUse()) { | |||
39564 | SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1)); | |||
39565 | return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC); | |||
39566 | } | |||
39567 | ||||
39568 | if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget)) | |||
39569 | return V; | |||
39570 | } | |||
39571 | ||||
39572 | if (VT.isVector() && VT.getVectorElementType() == MVT::i1 && | |||
39573 | (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) { | |||
39574 | // Put build_vectors on the right. | |||
39575 | if (LHS.getOpcode() == ISD::BUILD_VECTOR) { | |||
39576 | std::swap(LHS, RHS); | |||
39577 | CC = ISD::getSetCCSwappedOperands(CC); | |||
39578 | } | |||
39579 | ||||
39580 | bool IsSEXT0 = | |||
39581 | (LHS.getOpcode() == ISD::SIGN_EXTEND) && | |||
39582 | (LHS.getOperand(0).getValueType().getVectorElementType() == MVT::i1); | |||
39583 | bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode()); | |||
39584 | ||||
39585 | if (IsSEXT0 && IsVZero1) { | |||
39586 | assert(VT == LHS.getOperand(0).getValueType() &&((VT == LHS.getOperand(0).getValueType() && "Uexpected operand type" ) ? static_cast<void> (0) : __assert_fail ("VT == LHS.getOperand(0).getValueType() && \"Uexpected operand type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39587, __PRETTY_FUNCTION__)) | |||
39587 | "Uexpected operand type")((VT == LHS.getOperand(0).getValueType() && "Uexpected operand type" ) ? static_cast<void> (0) : __assert_fail ("VT == LHS.getOperand(0).getValueType() && \"Uexpected operand type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39587, __PRETTY_FUNCTION__)); | |||
39588 | if (CC == ISD::SETGT) | |||
39589 | return DAG.getConstant(0, DL, VT); | |||
39590 | if (CC == ISD::SETLE) | |||
39591 | return DAG.getConstant(1, DL, VT); | |||
39592 | if (CC == ISD::SETEQ || CC == ISD::SETGE) | |||
39593 | return DAG.getNOT(DL, LHS.getOperand(0), VT); | |||
39594 | ||||
39595 | assert((CC == ISD::SETNE || CC == ISD::SETLT) &&(((CC == ISD::SETNE || CC == ISD::SETLT) && "Unexpected condition code!" ) ? static_cast<void> (0) : __assert_fail ("(CC == ISD::SETNE || CC == ISD::SETLT) && \"Unexpected condition code!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39596, __PRETTY_FUNCTION__)) | |||
39596 | "Unexpected condition code!")(((CC == ISD::SETNE || CC == ISD::SETLT) && "Unexpected condition code!" ) ? static_cast<void> (0) : __assert_fail ("(CC == ISD::SETNE || CC == ISD::SETLT) && \"Unexpected condition code!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39596, __PRETTY_FUNCTION__)); | |||
39597 | return LHS.getOperand(0); | |||
39598 | } | |||
39599 | } | |||
39600 | ||||
39601 | // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just | |||
39602 | // pre-promote its result type since vXi1 vectors don't get promoted | |||
39603 | // during type legalization. | |||
39604 | // NOTE: The element count check is to ignore operand types that need to | |||
39605 | // go through type promotion to a 128-bit vector. | |||
39606 | if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() && | |||
39607 | VT.getVectorElementType() == MVT::i1 && | |||
39608 | (ExperimentalVectorWideningLegalization || | |||
39609 | VT.getVectorNumElements() > 4) && | |||
39610 | (OpVT.getVectorElementType() == MVT::i8 || | |||
39611 | OpVT.getVectorElementType() == MVT::i16)) { | |||
39612 | SDValue Setcc = DAG.getNode(ISD::SETCC, DL, OpVT, LHS, RHS, | |||
39613 | N->getOperand(2)); | |||
39614 | return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc); | |||
39615 | } | |||
39616 | ||||
39617 | // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early | |||
39618 | // to avoid scalarization via legalization because v4i32 is not a legal type. | |||
39619 | if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 && | |||
39620 | LHS.getValueType() == MVT::v4f32) | |||
39621 | return LowerVSETCC(SDValue(N, 0), Subtarget, DAG); | |||
39622 | ||||
39623 | return SDValue(); | |||
39624 | } | |||
39625 | ||||
39626 | static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG, | |||
39627 | TargetLowering::DAGCombinerInfo &DCI) { | |||
39628 | SDValue Src = N->getOperand(0); | |||
39629 | MVT SrcVT = Src.getSimpleValueType(); | |||
39630 | MVT VT = N->getSimpleValueType(0); | |||
39631 | ||||
39632 | // Perform constant folding. | |||
39633 | if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) { | |||
39634 | assert(VT== MVT::i32 && "Unexpected result type")((VT== MVT::i32 && "Unexpected result type") ? static_cast <void> (0) : __assert_fail ("VT== MVT::i32 && \"Unexpected result type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39634, __PRETTY_FUNCTION__)); | |||
39635 | APInt Imm(32, 0); | |||
39636 | for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) { | |||
39637 | SDValue In = Src.getOperand(Idx); | |||
39638 | if (!In.isUndef() && | |||
39639 | cast<ConstantSDNode>(In)->getAPIntValue().isNegative()) | |||
39640 | Imm.setBit(Idx); | |||
39641 | } | |||
39642 | return DAG.getConstant(Imm, SDLoc(N), VT); | |||
39643 | } | |||
39644 | ||||
39645 | // Look through int->fp bitcasts that don't change the element width. | |||
39646 | if (Src.getOpcode() == ISD::BITCAST && Src.hasOneUse() && | |||
39647 | SrcVT.isFloatingPoint() && | |||
39648 | Src.getOperand(0).getValueType() == | |||
39649 | EVT(SrcVT).changeVectorElementTypeToInteger()) | |||
39650 | Src = Src.getOperand(0); | |||
39651 | ||||
39652 | // Simplify the inputs. | |||
39653 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
39654 | APInt DemandedMask(APInt::getAllOnesValue(VT.getScalarSizeInBits())); | |||
39655 | if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI)) | |||
39656 | return SDValue(N, 0); | |||
39657 | ||||
39658 | // Combine (movmsk (setne (and X, (1 << C)), 0)) -> (movmsk (X << C)). | |||
39659 | // Only do this when the setcc input and output types are the same and the | |||
39660 | // setcc and the 'and' node have a single use. | |||
39661 | // FIXME: Support 256-bits with AVX1. The movmsk is split, but the and isn't. | |||
39662 | APInt SplatVal; | |||
39663 | if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() && | |||
39664 | Src.getOperand(0).getValueType() == Src.getValueType() && | |||
39665 | cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETNE && | |||
39666 | ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) && | |||
39667 | Src.getOperand(0).getOpcode() == ISD::AND) { | |||
39668 | SDValue And = Src.getOperand(0); | |||
39669 | if (And.hasOneUse() && | |||
39670 | ISD::isConstantSplatVector(And.getOperand(1).getNode(), SplatVal) && | |||
39671 | SplatVal.isPowerOf2()) { | |||
39672 | MVT VT = Src.getSimpleValueType(); | |||
39673 | unsigned BitWidth = VT.getScalarSizeInBits(); | |||
39674 | unsigned ShAmt = BitWidth - SplatVal.logBase2() - 1; | |||
39675 | SDLoc DL(And); | |||
39676 | SDValue X = And.getOperand(0); | |||
39677 | // If the element type is i8, we need to bitcast to i16 to use a legal | |||
39678 | // shift. If we wait until lowering we end up with an extra and to bits | |||
39679 | // from crossing the 8-bit elements, but we don't care about that here. | |||
39680 | if (VT.getVectorElementType() == MVT::i8) { | |||
39681 | VT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2); | |||
39682 | X = DAG.getBitcast(VT, X); | |||
39683 | } | |||
39684 | SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, X, | |||
39685 | DAG.getConstant(ShAmt, DL, VT)); | |||
39686 | SDValue Cast = DAG.getBitcast(SrcVT, Shl); | |||
39687 | return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), N->getValueType(0), Cast); | |||
39688 | } | |||
39689 | } | |||
39690 | ||||
39691 | return SDValue(); | |||
39692 | } | |||
39693 | ||||
39694 | static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG, | |||
39695 | TargetLowering::DAGCombinerInfo &DCI, | |||
39696 | const X86Subtarget &Subtarget) { | |||
39697 | SDLoc DL(N); | |||
39698 | ||||
39699 | if (DCI.isBeforeLegalizeOps()) { | |||
39700 | SDValue Index = N->getOperand(4); | |||
39701 | // Remove any sign extends from 32 or smaller to larger than 32. | |||
39702 | // Only do this before LegalizeOps in case we need the sign extend for | |||
39703 | // legalization. | |||
39704 | if (Index.getOpcode() == ISD::SIGN_EXTEND) { | |||
39705 | if (Index.getScalarValueSizeInBits() > 32 && | |||
39706 | Index.getOperand(0).getScalarValueSizeInBits() <= 32) { | |||
39707 | SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end()); | |||
39708 | NewOps[4] = Index.getOperand(0); | |||
39709 | SDNode *Res = DAG.UpdateNodeOperands(N, NewOps); | |||
39710 | if (Res == N) { | |||
39711 | // The original sign extend has less users, add back to worklist in | |||
39712 | // case it needs to be removed | |||
39713 | DCI.AddToWorklist(Index.getNode()); | |||
39714 | DCI.AddToWorklist(N); | |||
39715 | } | |||
39716 | return SDValue(Res, 0); | |||
39717 | } | |||
39718 | } | |||
39719 | ||||
39720 | // Make sure the index is either i32 or i64 | |||
39721 | unsigned ScalarSize = Index.getScalarValueSizeInBits(); | |||
39722 | if (ScalarSize != 32 && ScalarSize != 64) { | |||
39723 | MVT EltVT = ScalarSize > 32 ? MVT::i64 : MVT::i32; | |||
39724 | EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT, | |||
39725 | Index.getValueType().getVectorNumElements()); | |||
39726 | Index = DAG.getSExtOrTrunc(Index, DL, IndexVT); | |||
39727 | SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end()); | |||
39728 | NewOps[4] = Index; | |||
39729 | SDNode *Res = DAG.UpdateNodeOperands(N, NewOps); | |||
39730 | if (Res == N) | |||
39731 | DCI.AddToWorklist(N); | |||
39732 | return SDValue(Res, 0); | |||
39733 | } | |||
39734 | ||||
39735 | // Try to remove zero extends from 32->64 if we know the sign bit of | |||
39736 | // the input is zero. | |||
39737 | if (Index.getOpcode() == ISD::ZERO_EXTEND && | |||
39738 | Index.getScalarValueSizeInBits() == 64 && | |||
39739 | Index.getOperand(0).getScalarValueSizeInBits() == 32) { | |||
39740 | if (DAG.SignBitIsZero(Index.getOperand(0))) { | |||
39741 | SmallVector<SDValue, 5> NewOps(N->op_begin(), N->op_end()); | |||
39742 | NewOps[4] = Index.getOperand(0); | |||
39743 | SDNode *Res = DAG.UpdateNodeOperands(N, NewOps); | |||
39744 | if (Res == N) { | |||
39745 | // The original sign extend has less users, add back to worklist in | |||
39746 | // case it needs to be removed | |||
39747 | DCI.AddToWorklist(Index.getNode()); | |||
39748 | DCI.AddToWorklist(N); | |||
39749 | } | |||
39750 | return SDValue(Res, 0); | |||
39751 | } | |||
39752 | } | |||
39753 | } | |||
39754 | ||||
39755 | // With AVX2 we only demand the upper bit of the mask. | |||
39756 | if (!Subtarget.hasAVX512()) { | |||
39757 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
39758 | SDValue Mask = N->getOperand(2); | |||
39759 | APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits())); | |||
39760 | if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) | |||
39761 | return SDValue(N, 0); | |||
39762 | } | |||
39763 | ||||
39764 | return SDValue(); | |||
39765 | } | |||
39766 | ||||
39767 | // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT | |||
39768 | static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG, | |||
39769 | const X86Subtarget &Subtarget) { | |||
39770 | SDLoc DL(N); | |||
39771 | X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0)); | |||
39772 | SDValue EFLAGS = N->getOperand(1); | |||
39773 | ||||
39774 | // Try to simplify the EFLAGS and condition code operands. | |||
39775 | if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) | |||
39776 | return getSETCC(CC, Flags, DL, DAG); | |||
39777 | ||||
39778 | return SDValue(); | |||
39779 | } | |||
39780 | ||||
39781 | /// Optimize branch condition evaluation. | |||
39782 | static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG, | |||
39783 | const X86Subtarget &Subtarget) { | |||
39784 | SDLoc DL(N); | |||
39785 | SDValue EFLAGS = N->getOperand(3); | |||
39786 | X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2)); | |||
39787 | ||||
39788 | // Try to simplify the EFLAGS and condition code operands. | |||
39789 | // Make sure to not keep references to operands, as combineSetCCEFLAGS can | |||
39790 | // RAUW them under us. | |||
39791 | if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) { | |||
39792 | SDValue Cond = DAG.getConstant(CC, DL, MVT::i8); | |||
39793 | return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0), | |||
39794 | N->getOperand(1), Cond, Flags); | |||
39795 | } | |||
39796 | ||||
39797 | return SDValue(); | |||
39798 | } | |||
39799 | ||||
39800 | static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N, | |||
39801 | SelectionDAG &DAG) { | |||
39802 | // Take advantage of vector comparisons producing 0 or -1 in each lane to | |||
39803 | // optimize away operation when it's from a constant. | |||
39804 | // | |||
39805 | // The general transformation is: | |||
39806 | // UNARYOP(AND(VECTOR_CMP(x,y), constant)) --> | |||
39807 | // AND(VECTOR_CMP(x,y), constant2) | |||
39808 | // constant2 = UNARYOP(constant) | |||
39809 | ||||
39810 | // Early exit if this isn't a vector operation, the operand of the | |||
39811 | // unary operation isn't a bitwise AND, or if the sizes of the operations | |||
39812 | // aren't the same. | |||
39813 | EVT VT = N->getValueType(0); | |||
39814 | if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND || | |||
39815 | N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC || | |||
39816 | VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits()) | |||
39817 | return SDValue(); | |||
39818 | ||||
39819 | // Now check that the other operand of the AND is a constant. We could | |||
39820 | // make the transformation for non-constant splats as well, but it's unclear | |||
39821 | // that would be a benefit as it would not eliminate any operations, just | |||
39822 | // perform one more step in scalar code before moving to the vector unit. | |||
39823 | if (BuildVectorSDNode *BV = | |||
39824 | dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) { | |||
39825 | // Bail out if the vector isn't a constant. | |||
39826 | if (!BV->isConstant()) | |||
39827 | return SDValue(); | |||
39828 | ||||
39829 | // Everything checks out. Build up the new and improved node. | |||
39830 | SDLoc DL(N); | |||
39831 | EVT IntVT = BV->getValueType(0); | |||
39832 | // Create a new constant of the appropriate type for the transformed | |||
39833 | // DAG. | |||
39834 | SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0)); | |||
39835 | // The AND node needs bitcasts to/from an integer vector type around it. | |||
39836 | SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst); | |||
39837 | SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, | |||
39838 | N->getOperand(0)->getOperand(0), MaskConst); | |||
39839 | SDValue Res = DAG.getBitcast(VT, NewAnd); | |||
39840 | return Res; | |||
39841 | } | |||
39842 | ||||
39843 | return SDValue(); | |||
39844 | } | |||
39845 | ||||
39846 | static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG, | |||
39847 | const X86Subtarget &Subtarget) { | |||
39848 | SDValue Op0 = N->getOperand(0); | |||
39849 | EVT VT = N->getValueType(0); | |||
39850 | EVT InVT = Op0.getValueType(); | |||
39851 | ||||
39852 | // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32)) | |||
39853 | // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32)) | |||
39854 | // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32)) | |||
39855 | if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) { | |||
39856 | SDLoc dl(N); | |||
39857 | EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, | |||
39858 | InVT.getVectorNumElements()); | |||
39859 | SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0); | |||
39860 | ||||
39861 | // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP. | |||
39862 | return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P); | |||
39863 | } | |||
39864 | ||||
39865 | // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't | |||
39866 | // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform | |||
39867 | // the optimization here. | |||
39868 | if (DAG.SignBitIsZero(Op0)) | |||
39869 | return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0); | |||
39870 | ||||
39871 | return SDValue(); | |||
39872 | } | |||
39873 | ||||
39874 | static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG, | |||
39875 | const X86Subtarget &Subtarget) { | |||
39876 | // First try to optimize away the conversion entirely when it's | |||
39877 | // conditionally from a constant. Vectors only. | |||
39878 | if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG)) | |||
39879 | return Res; | |||
39880 | ||||
39881 | // Now move on to more general possibilities. | |||
39882 | SDValue Op0 = N->getOperand(0); | |||
39883 | EVT VT = N->getValueType(0); | |||
39884 | EVT InVT = Op0.getValueType(); | |||
39885 | ||||
39886 | // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32)) | |||
39887 | // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32)) | |||
39888 | // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32)) | |||
39889 | if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) { | |||
39890 | SDLoc dl(N); | |||
39891 | EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, | |||
39892 | InVT.getVectorNumElements()); | |||
39893 | SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0); | |||
39894 | return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P); | |||
39895 | } | |||
39896 | ||||
39897 | // Without AVX512DQ we only support i64 to float scalar conversion. For both | |||
39898 | // vectors and scalars, see if we know that the upper bits are all the sign | |||
39899 | // bit, in which case we can truncate the input to i32 and convert from that. | |||
39900 | if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) { | |||
39901 | unsigned BitWidth = InVT.getScalarSizeInBits(); | |||
39902 | unsigned NumSignBits = DAG.ComputeNumSignBits(Op0); | |||
39903 | if (NumSignBits >= (BitWidth - 31)) { | |||
39904 | EVT TruncVT = EVT::getIntegerVT(*DAG.getContext(), 32); | |||
39905 | if (InVT.isVector()) | |||
39906 | TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, | |||
39907 | InVT.getVectorNumElements()); | |||
39908 | SDLoc dl(N); | |||
39909 | SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0); | |||
39910 | return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc); | |||
39911 | } | |||
39912 | } | |||
39913 | ||||
39914 | // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have | |||
39915 | // a 32-bit target where SSE doesn't support i64->FP operations. | |||
39916 | if (!Subtarget.useSoftFloat() && Subtarget.hasX87() && | |||
39917 | Op0.getOpcode() == ISD::LOAD) { | |||
39918 | LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode()); | |||
39919 | EVT LdVT = Ld->getValueType(0); | |||
39920 | ||||
39921 | // This transformation is not supported if the result type is f16 or f128. | |||
39922 | if (VT == MVT::f16 || VT == MVT::f128) | |||
39923 | return SDValue(); | |||
39924 | ||||
39925 | // If we have AVX512DQ we can use packed conversion instructions unless | |||
39926 | // the VT is f80. | |||
39927 | if (Subtarget.hasDQI() && VT != MVT::f80) | |||
39928 | return SDValue(); | |||
39929 | ||||
39930 | if (!Ld->isVolatile() && !VT.isVector() && | |||
39931 | ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() && | |||
39932 | !Subtarget.is64Bit() && LdVT == MVT::i64) { | |||
39933 | SDValue FILDChain = Subtarget.getTargetLowering()->BuildFILD( | |||
39934 | SDValue(N, 0), LdVT, Ld->getChain(), Op0, DAG); | |||
39935 | DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1)); | |||
39936 | return FILDChain; | |||
39937 | } | |||
39938 | } | |||
39939 | return SDValue(); | |||
39940 | } | |||
39941 | ||||
39942 | static bool needCarryOrOverflowFlag(SDValue Flags) { | |||
39943 | assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!")((Flags.getValueType() == MVT::i32 && "Unexpected VT!" ) ? static_cast<void> (0) : __assert_fail ("Flags.getValueType() == MVT::i32 && \"Unexpected VT!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39943, __PRETTY_FUNCTION__)); | |||
39944 | ||||
39945 | for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end(); | |||
39946 | UI != UE; ++UI) { | |||
39947 | SDNode *User = *UI; | |||
39948 | ||||
39949 | X86::CondCode CC; | |||
39950 | switch (User->getOpcode()) { | |||
39951 | default: | |||
39952 | // Be conservative. | |||
39953 | return true; | |||
39954 | case X86ISD::SETCC: | |||
39955 | case X86ISD::SETCC_CARRY: | |||
39956 | CC = (X86::CondCode)User->getConstantOperandVal(0); | |||
39957 | break; | |||
39958 | case X86ISD::BRCOND: | |||
39959 | CC = (X86::CondCode)User->getConstantOperandVal(2); | |||
39960 | break; | |||
39961 | case X86ISD::CMOV: | |||
39962 | CC = (X86::CondCode)User->getConstantOperandVal(2); | |||
39963 | break; | |||
39964 | } | |||
39965 | ||||
39966 | switch (CC) { | |||
39967 | default: break; | |||
39968 | case X86::COND_A: case X86::COND_AE: | |||
39969 | case X86::COND_B: case X86::COND_BE: | |||
39970 | case X86::COND_O: case X86::COND_NO: | |||
39971 | case X86::COND_G: case X86::COND_GE: | |||
39972 | case X86::COND_L: case X86::COND_LE: | |||
39973 | return true; | |||
39974 | } | |||
39975 | } | |||
39976 | ||||
39977 | return false; | |||
39978 | } | |||
39979 | ||||
39980 | static bool onlyZeroFlagUsed(SDValue Flags) { | |||
39981 | assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!")((Flags.getValueType() == MVT::i32 && "Unexpected VT!" ) ? static_cast<void> (0) : __assert_fail ("Flags.getValueType() == MVT::i32 && \"Unexpected VT!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 39981, __PRETTY_FUNCTION__)); | |||
39982 | ||||
39983 | for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end(); | |||
39984 | UI != UE; ++UI) { | |||
39985 | SDNode *User = *UI; | |||
39986 | ||||
39987 | unsigned CCOpNo; | |||
39988 | switch (User->getOpcode()) { | |||
39989 | default: | |||
39990 | // Be conservative. | |||
39991 | return false; | |||
39992 | case X86ISD::SETCC: CCOpNo = 0; break; | |||
39993 | case X86ISD::SETCC_CARRY: CCOpNo = 0; break; | |||
39994 | case X86ISD::BRCOND: CCOpNo = 2; break; | |||
39995 | case X86ISD::CMOV: CCOpNo = 2; break; | |||
39996 | } | |||
39997 | ||||
39998 | X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo); | |||
39999 | if (CC != X86::COND_E && CC != X86::COND_NE) | |||
40000 | return false; | |||
40001 | } | |||
40002 | ||||
40003 | return true; | |||
40004 | } | |||
40005 | ||||
40006 | static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) { | |||
40007 | // Only handle test patterns. | |||
40008 | if (!isNullConstant(N->getOperand(1))) | |||
40009 | return SDValue(); | |||
40010 | ||||
40011 | // If we have a CMP of a truncated binop, see if we can make a smaller binop | |||
40012 | // and use its flags directly. | |||
40013 | // TODO: Maybe we should try promoting compares that only use the zero flag | |||
40014 | // first if we can prove the upper bits with computeKnownBits? | |||
40015 | SDLoc dl(N); | |||
40016 | SDValue Op = N->getOperand(0); | |||
40017 | EVT VT = Op.getValueType(); | |||
40018 | ||||
40019 | // If we have a constant logical shift that's only used in a comparison | |||
40020 | // against zero turn it into an equivalent AND. This allows turning it into | |||
40021 | // a TEST instruction later. | |||
40022 | if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) && | |||
40023 | Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) && | |||
40024 | onlyZeroFlagUsed(SDValue(N, 0))) { | |||
40025 | EVT VT = Op.getValueType(); | |||
40026 | unsigned BitWidth = VT.getSizeInBits(); | |||
40027 | unsigned ShAmt = Op.getConstantOperandVal(1); | |||
40028 | if (ShAmt < BitWidth) { // Avoid undefined shifts. | |||
40029 | APInt Mask = Op.getOpcode() == ISD::SRL | |||
40030 | ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt) | |||
40031 | : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt); | |||
40032 | if (Mask.isSignedIntN(32)) { | |||
40033 | Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), | |||
40034 | DAG.getConstant(Mask, dl, VT)); | |||
40035 | return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, | |||
40036 | DAG.getConstant(0, dl, VT)); | |||
40037 | } | |||
40038 | } | |||
40039 | } | |||
40040 | ||||
40041 | ||||
40042 | // Look for a truncate with a single use. | |||
40043 | if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse()) | |||
40044 | return SDValue(); | |||
40045 | ||||
40046 | Op = Op.getOperand(0); | |||
40047 | ||||
40048 | // Arithmetic op can only have one use. | |||
40049 | if (!Op.hasOneUse()) | |||
40050 | return SDValue(); | |||
40051 | ||||
40052 | unsigned NewOpc; | |||
40053 | switch (Op.getOpcode()) { | |||
40054 | default: return SDValue(); | |||
40055 | case ISD::AND: | |||
40056 | // Skip and with constant. We have special handling for and with immediate | |||
40057 | // during isel to generate test instructions. | |||
40058 | if (isa<ConstantSDNode>(Op.getOperand(1))) | |||
40059 | return SDValue(); | |||
40060 | NewOpc = X86ISD::AND; | |||
40061 | break; | |||
40062 | case ISD::OR: NewOpc = X86ISD::OR; break; | |||
40063 | case ISD::XOR: NewOpc = X86ISD::XOR; break; | |||
40064 | case ISD::ADD: | |||
40065 | // If the carry or overflow flag is used, we can't truncate. | |||
40066 | if (needCarryOrOverflowFlag(SDValue(N, 0))) | |||
40067 | return SDValue(); | |||
40068 | NewOpc = X86ISD::ADD; | |||
40069 | break; | |||
40070 | case ISD::SUB: | |||
40071 | // If the carry or overflow flag is used, we can't truncate. | |||
40072 | if (needCarryOrOverflowFlag(SDValue(N, 0))) | |||
40073 | return SDValue(); | |||
40074 | NewOpc = X86ISD::SUB; | |||
40075 | break; | |||
40076 | } | |||
40077 | ||||
40078 | // We found an op we can narrow. Truncate its inputs. | |||
40079 | SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0)); | |||
40080 | SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1)); | |||
40081 | ||||
40082 | // Use a X86 specific opcode to avoid DAG combine messing with it. | |||
40083 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
40084 | Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1); | |||
40085 | ||||
40086 | // For AND, keep a CMP so that we can match the test pattern. | |||
40087 | if (NewOpc == X86ISD::AND) | |||
40088 | return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op, | |||
40089 | DAG.getConstant(0, dl, VT)); | |||
40090 | ||||
40091 | // Return the flags. | |||
40092 | return Op.getValue(1); | |||
40093 | } | |||
40094 | ||||
40095 | static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) { | |||
40096 | if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) { | |||
40097 | MVT VT = N->getSimpleValueType(0); | |||
40098 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
40099 | return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs, | |||
40100 | N->getOperand(0), N->getOperand(1), | |||
40101 | Flags); | |||
40102 | } | |||
40103 | ||||
40104 | return SDValue(); | |||
40105 | } | |||
40106 | ||||
40107 | // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS | |||
40108 | static SDValue combineADC(SDNode *N, SelectionDAG &DAG, | |||
40109 | TargetLowering::DAGCombinerInfo &DCI) { | |||
40110 | // If the LHS and RHS of the ADC node are zero, then it can't overflow and | |||
40111 | // the result is either zero or one (depending on the input carry bit). | |||
40112 | // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1. | |||
40113 | if (X86::isZeroNode(N->getOperand(0)) && | |||
40114 | X86::isZeroNode(N->getOperand(1)) && | |||
40115 | // We don't have a good way to replace an EFLAGS use, so only do this when | |||
40116 | // dead right now. | |||
40117 | SDValue(N, 1).use_empty()) { | |||
40118 | SDLoc DL(N); | |||
40119 | EVT VT = N->getValueType(0); | |||
40120 | SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1)); | |||
40121 | SDValue Res1 = DAG.getNode(ISD::AND, DL, VT, | |||
40122 | DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, | |||
40123 | DAG.getConstant(X86::COND_B, DL, | |||
40124 | MVT::i8), | |||
40125 | N->getOperand(2)), | |||
40126 | DAG.getConstant(1, DL, VT)); | |||
40127 | return DCI.CombineTo(N, Res1, CarryOut); | |||
40128 | } | |||
40129 | ||||
40130 | if (SDValue Flags = combineCarryThroughADD(N->getOperand(2))) { | |||
40131 | MVT VT = N->getSimpleValueType(0); | |||
40132 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
40133 | return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs, | |||
40134 | N->getOperand(0), N->getOperand(1), | |||
40135 | Flags); | |||
40136 | } | |||
40137 | ||||
40138 | return SDValue(); | |||
40139 | } | |||
40140 | ||||
40141 | /// If this is an add or subtract where one operand is produced by a cmp+setcc, | |||
40142 | /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB} | |||
40143 | /// with CMP+{ADC, SBB}. | |||
40144 | static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) { | |||
40145 | bool IsSub = N->getOpcode() == ISD::SUB; | |||
40146 | SDValue X = N->getOperand(0); | |||
40147 | SDValue Y = N->getOperand(1); | |||
40148 | ||||
40149 | // If this is an add, canonicalize a zext operand to the RHS. | |||
40150 | // TODO: Incomplete? What if both sides are zexts? | |||
40151 | if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND && | |||
40152 | Y.getOpcode() != ISD::ZERO_EXTEND) | |||
40153 | std::swap(X, Y); | |||
40154 | ||||
40155 | // Look through a one-use zext. | |||
40156 | bool PeekedThroughZext = false; | |||
40157 | if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) { | |||
40158 | Y = Y.getOperand(0); | |||
40159 | PeekedThroughZext = true; | |||
40160 | } | |||
40161 | ||||
40162 | // If this is an add, canonicalize a setcc operand to the RHS. | |||
40163 | // TODO: Incomplete? What if both sides are setcc? | |||
40164 | // TODO: Should we allow peeking through a zext of the other operand? | |||
40165 | if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC && | |||
40166 | Y.getOpcode() != X86ISD::SETCC) | |||
40167 | std::swap(X, Y); | |||
40168 | ||||
40169 | if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse()) | |||
40170 | return SDValue(); | |||
40171 | ||||
40172 | SDLoc DL(N); | |||
40173 | EVT VT = N->getValueType(0); | |||
40174 | X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0); | |||
40175 | ||||
40176 | // If X is -1 or 0, then we have an opportunity to avoid constants required in | |||
40177 | // the general case below. | |||
40178 | auto *ConstantX = dyn_cast<ConstantSDNode>(X); | |||
40179 | if (ConstantX) { | |||
40180 | if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) || | |||
40181 | (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) { | |||
40182 | // This is a complicated way to get -1 or 0 from the carry flag: | |||
40183 | // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax | |||
40184 | // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax | |||
40185 | return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, | |||
40186 | DAG.getConstant(X86::COND_B, DL, MVT::i8), | |||
40187 | Y.getOperand(1)); | |||
40188 | } | |||
40189 | ||||
40190 | if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) || | |||
40191 | (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) { | |||
40192 | SDValue EFLAGS = Y->getOperand(1); | |||
40193 | if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() && | |||
40194 | EFLAGS.getValueType().isInteger() && | |||
40195 | !isa<ConstantSDNode>(EFLAGS.getOperand(1))) { | |||
40196 | // Swap the operands of a SUB, and we have the same pattern as above. | |||
40197 | // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB | |||
40198 | // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB | |||
40199 | SDValue NewSub = DAG.getNode( | |||
40200 | X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(), | |||
40201 | EFLAGS.getOperand(1), EFLAGS.getOperand(0)); | |||
40202 | SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo()); | |||
40203 | return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, | |||
40204 | DAG.getConstant(X86::COND_B, DL, MVT::i8), | |||
40205 | NewEFLAGS); | |||
40206 | } | |||
40207 | } | |||
40208 | } | |||
40209 | ||||
40210 | if (CC == X86::COND_B) { | |||
40211 | // X + SETB Z --> adc X, 0 | |||
40212 | // X - SETB Z --> sbb X, 0 | |||
40213 | return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, | |||
40214 | DAG.getVTList(VT, MVT::i32), X, | |||
40215 | DAG.getConstant(0, DL, VT), Y.getOperand(1)); | |||
40216 | } | |||
40217 | ||||
40218 | if (CC == X86::COND_A) { | |||
40219 | SDValue EFLAGS = Y->getOperand(1); | |||
40220 | // Try to convert COND_A into COND_B in an attempt to facilitate | |||
40221 | // materializing "setb reg". | |||
40222 | // | |||
40223 | // Do not flip "e > c", where "c" is a constant, because Cmp instruction | |||
40224 | // cannot take an immediate as its first operand. | |||
40225 | // | |||
40226 | if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() && | |||
40227 | EFLAGS.getValueType().isInteger() && | |||
40228 | !isa<ConstantSDNode>(EFLAGS.getOperand(1))) { | |||
40229 | SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), | |||
40230 | EFLAGS.getNode()->getVTList(), | |||
40231 | EFLAGS.getOperand(1), EFLAGS.getOperand(0)); | |||
40232 | SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo()); | |||
40233 | return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, | |||
40234 | DAG.getVTList(VT, MVT::i32), X, | |||
40235 | DAG.getConstant(0, DL, VT), NewEFLAGS); | |||
40236 | } | |||
40237 | } | |||
40238 | ||||
40239 | if (CC != X86::COND_E && CC != X86::COND_NE) | |||
40240 | return SDValue(); | |||
40241 | ||||
40242 | SDValue Cmp = Y.getOperand(1); | |||
40243 | if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() || | |||
40244 | !X86::isZeroNode(Cmp.getOperand(1)) || | |||
40245 | !Cmp.getOperand(0).getValueType().isInteger()) | |||
40246 | return SDValue(); | |||
40247 | ||||
40248 | SDValue Z = Cmp.getOperand(0); | |||
40249 | EVT ZVT = Z.getValueType(); | |||
40250 | ||||
40251 | // If X is -1 or 0, then we have an opportunity to avoid constants required in | |||
40252 | // the general case below. | |||
40253 | if (ConstantX) { | |||
40254 | // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with | |||
40255 | // fake operands: | |||
40256 | // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z) | |||
40257 | // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z) | |||
40258 | if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) || | |||
40259 | (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) { | |||
40260 | SDValue Zero = DAG.getConstant(0, DL, ZVT); | |||
40261 | SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32); | |||
40262 | SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z); | |||
40263 | return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, | |||
40264 | DAG.getConstant(X86::COND_B, DL, MVT::i8), | |||
40265 | SDValue(Neg.getNode(), 1)); | |||
40266 | } | |||
40267 | ||||
40268 | // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb' | |||
40269 | // with fake operands: | |||
40270 | // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1) | |||
40271 | // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1) | |||
40272 | if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) || | |||
40273 | (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) { | |||
40274 | SDValue One = DAG.getConstant(1, DL, ZVT); | |||
40275 | SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One); | |||
40276 | return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, | |||
40277 | DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp1); | |||
40278 | } | |||
40279 | } | |||
40280 | ||||
40281 | // (cmp Z, 1) sets the carry flag if Z is 0. | |||
40282 | SDValue One = DAG.getConstant(1, DL, ZVT); | |||
40283 | SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One); | |||
40284 | ||||
40285 | // Add the flags type for ADC/SBB nodes. | |||
40286 | SDVTList VTs = DAG.getVTList(VT, MVT::i32); | |||
40287 | ||||
40288 | // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1) | |||
40289 | // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1) | |||
40290 | if (CC == X86::COND_NE) | |||
40291 | return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X, | |||
40292 | DAG.getConstant(-1ULL, DL, VT), Cmp1); | |||
40293 | ||||
40294 | // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1) | |||
40295 | // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1) | |||
40296 | return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X, | |||
40297 | DAG.getConstant(0, DL, VT), Cmp1); | |||
40298 | } | |||
40299 | ||||
40300 | static SDValue combineLoopMAddPattern(SDNode *N, SelectionDAG &DAG, | |||
40301 | const X86Subtarget &Subtarget) { | |||
40302 | if (!Subtarget.hasSSE2()) | |||
40303 | return SDValue(); | |||
40304 | ||||
40305 | SDValue Op0 = N->getOperand(0); | |||
40306 | SDValue Op1 = N->getOperand(1); | |||
40307 | ||||
40308 | EVT VT = N->getValueType(0); | |||
40309 | ||||
40310 | // If the vector size is less than 128, or greater than the supported RegSize, | |||
40311 | // do not use PMADD. | |||
40312 | if (!VT.isVector() || VT.getVectorNumElements() < 8) | |||
40313 | return SDValue(); | |||
40314 | ||||
40315 | if (Op0.getOpcode() != ISD::MUL) | |||
40316 | std::swap(Op0, Op1); | |||
40317 | if (Op0.getOpcode() != ISD::MUL) | |||
40318 | return SDValue(); | |||
40319 | ||||
40320 | ShrinkMode Mode; | |||
40321 | if (!canReduceVMulWidth(Op0.getNode(), DAG, Mode) || Mode == MULU16) | |||
40322 | return SDValue(); | |||
40323 | ||||
40324 | SDLoc DL(N); | |||
40325 | EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, | |||
40326 | VT.getVectorNumElements()); | |||
40327 | EVT MAddVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, | |||
40328 | VT.getVectorNumElements() / 2); | |||
40329 | ||||
40330 | // Madd vector size is half of the original vector size | |||
40331 | auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
40332 | ArrayRef<SDValue> Ops) { | |||
40333 | MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32); | |||
40334 | return DAG.getNode(X86ISD::VPMADDWD, DL, VT, Ops); | |||
40335 | }; | |||
40336 | ||||
40337 | auto BuildPMADDWD = [&](SDValue Mul) { | |||
40338 | // Shrink the operands of mul. | |||
40339 | SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, Mul.getOperand(0)); | |||
40340 | SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, Mul.getOperand(1)); | |||
40341 | ||||
40342 | SDValue Madd = SplitOpsAndApply(DAG, Subtarget, DL, MAddVT, { N0, N1 }, | |||
40343 | PMADDWDBuilder); | |||
40344 | // Fill the rest of the output with 0 | |||
40345 | return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Madd, | |||
40346 | DAG.getConstant(0, DL, MAddVT)); | |||
40347 | }; | |||
40348 | ||||
40349 | Op0 = BuildPMADDWD(Op0); | |||
40350 | ||||
40351 | // It's possible that Op1 is also a mul we can reduce. | |||
40352 | if (Op1.getOpcode() == ISD::MUL && | |||
40353 | canReduceVMulWidth(Op1.getNode(), DAG, Mode) && Mode != MULU16) { | |||
40354 | Op1 = BuildPMADDWD(Op1); | |||
40355 | } | |||
40356 | ||||
40357 | return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); | |||
40358 | } | |||
40359 | ||||
40360 | static SDValue combineLoopSADPattern(SDNode *N, SelectionDAG &DAG, | |||
40361 | const X86Subtarget &Subtarget) { | |||
40362 | if (!Subtarget.hasSSE2()) | |||
40363 | return SDValue(); | |||
40364 | ||||
40365 | SDLoc DL(N); | |||
40366 | EVT VT = N->getValueType(0); | |||
40367 | SDValue Op0 = N->getOperand(0); | |||
40368 | SDValue Op1 = N->getOperand(1); | |||
40369 | ||||
40370 | // TODO: There's nothing special about i32, any integer type above i16 should | |||
40371 | // work just as well. | |||
40372 | if (!VT.isVector() || !VT.isSimple() || | |||
40373 | !(VT.getVectorElementType() == MVT::i32)) | |||
40374 | return SDValue(); | |||
40375 | ||||
40376 | unsigned RegSize = 128; | |||
40377 | if (Subtarget.useBWIRegs()) | |||
40378 | RegSize = 512; | |||
40379 | else if (Subtarget.hasAVX()) | |||
40380 | RegSize = 256; | |||
40381 | ||||
40382 | // We only handle v16i32 for SSE2 / v32i32 for AVX / v64i32 for AVX512. | |||
40383 | // TODO: We should be able to handle larger vectors by splitting them before | |||
40384 | // feeding them into several SADs, and then reducing over those. | |||
40385 | if (VT.getSizeInBits() / 4 > RegSize) | |||
40386 | return SDValue(); | |||
40387 | ||||
40388 | // We know N is a reduction add, which means one of its operands is a phi. | |||
40389 | // To match SAD, we need the other operand to be a vector select. | |||
40390 | if (Op0.getOpcode() != ISD::VSELECT) | |||
40391 | std::swap(Op0, Op1); | |||
40392 | if (Op0.getOpcode() != ISD::VSELECT) | |||
40393 | return SDValue(); | |||
40394 | ||||
40395 | auto BuildPSADBW = [&](SDValue Op0, SDValue Op1) { | |||
40396 | // SAD pattern detected. Now build a SAD instruction and an addition for | |||
40397 | // reduction. Note that the number of elements of the result of SAD is less | |||
40398 | // than the number of elements of its input. Therefore, we could only update | |||
40399 | // part of elements in the reduction vector. | |||
40400 | SDValue Sad = createPSADBW(DAG, Op0, Op1, DL, Subtarget); | |||
40401 | ||||
40402 | // The output of PSADBW is a vector of i64. | |||
40403 | // We need to turn the vector of i64 into a vector of i32. | |||
40404 | // If the reduction vector is at least as wide as the psadbw result, just | |||
40405 | // bitcast. If it's narrower, truncate - the high i32 of each i64 is zero | |||
40406 | // anyway. | |||
40407 | MVT ResVT = MVT::getVectorVT(MVT::i32, Sad.getValueSizeInBits() / 32); | |||
40408 | if (VT.getSizeInBits() >= ResVT.getSizeInBits()) | |||
40409 | Sad = DAG.getNode(ISD::BITCAST, DL, ResVT, Sad); | |||
40410 | else | |||
40411 | Sad = DAG.getNode(ISD::TRUNCATE, DL, VT, Sad); | |||
40412 | ||||
40413 | if (VT.getSizeInBits() > ResVT.getSizeInBits()) { | |||
40414 | // Fill the upper elements with zero to match the add width. | |||
40415 | SDValue Zero = DAG.getConstant(0, DL, VT); | |||
40416 | Sad = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Zero, Sad, | |||
40417 | DAG.getIntPtrConstant(0, DL)); | |||
40418 | } | |||
40419 | ||||
40420 | return Sad; | |||
40421 | }; | |||
40422 | ||||
40423 | // Check whether we have an abs-diff pattern feeding into the select. | |||
40424 | SDValue SadOp0, SadOp1; | |||
40425 | if (!detectZextAbsDiff(Op0, SadOp0, SadOp1)) | |||
40426 | return SDValue(); | |||
40427 | ||||
40428 | Op0 = BuildPSADBW(SadOp0, SadOp1); | |||
40429 | ||||
40430 | // It's possible we have a sad on the other side too. | |||
40431 | if (Op1.getOpcode() == ISD::VSELECT && | |||
40432 | detectZextAbsDiff(Op1, SadOp0, SadOp1)) { | |||
40433 | Op1 = BuildPSADBW(SadOp0, SadOp1); | |||
40434 | } | |||
40435 | ||||
40436 | return DAG.getNode(ISD::ADD, DL, VT, Op0, Op1); | |||
40437 | } | |||
40438 | ||||
40439 | /// Convert vector increment or decrement to sub/add with an all-ones constant: | |||
40440 | /// add X, <1, 1...> --> sub X, <-1, -1...> | |||
40441 | /// sub X, <1, 1...> --> add X, <-1, -1...> | |||
40442 | /// The all-ones vector constant can be materialized using a pcmpeq instruction | |||
40443 | /// that is commonly recognized as an idiom (has no register dependency), so | |||
40444 | /// that's better/smaller than loading a splat 1 constant. | |||
40445 | static SDValue combineIncDecVector(SDNode *N, SelectionDAG &DAG) { | |||
40446 | assert((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&(((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD:: SUB) && "Unexpected opcode for increment/decrement transform" ) ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Unexpected opcode for increment/decrement transform\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 40447, __PRETTY_FUNCTION__)) | |||
40447 | "Unexpected opcode for increment/decrement transform")(((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD:: SUB) && "Unexpected opcode for increment/decrement transform" ) ? static_cast<void> (0) : __assert_fail ("(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) && \"Unexpected opcode for increment/decrement transform\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 40447, __PRETTY_FUNCTION__)); | |||
40448 | ||||
40449 | // Pseudo-legality check: getOnesVector() expects one of these types, so bail | |||
40450 | // out and wait for legalization if we have an unsupported vector length. | |||
40451 | EVT VT = N->getValueType(0); | |||
40452 | if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector()) | |||
40453 | return SDValue(); | |||
40454 | ||||
40455 | APInt SplatVal; | |||
40456 | if (!isConstantSplat(N->getOperand(1), SplatVal) || !SplatVal.isOneValue()) | |||
40457 | return SDValue(); | |||
40458 | ||||
40459 | SDValue AllOnesVec = getOnesVector(VT, DAG, SDLoc(N)); | |||
40460 | unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD; | |||
40461 | return DAG.getNode(NewOpcode, SDLoc(N), VT, N->getOperand(0), AllOnesVec); | |||
40462 | } | |||
40463 | ||||
40464 | static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1, | |||
40465 | const SDLoc &DL, EVT VT, | |||
40466 | const X86Subtarget &Subtarget) { | |||
40467 | // Example of pattern we try to detect: | |||
40468 | // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1)))) | |||
40469 | //(add (build_vector (extract_elt t, 0), | |||
40470 | // (extract_elt t, 2), | |||
40471 | // (extract_elt t, 4), | |||
40472 | // (extract_elt t, 6)), | |||
40473 | // (build_vector (extract_elt t, 1), | |||
40474 | // (extract_elt t, 3), | |||
40475 | // (extract_elt t, 5), | |||
40476 | // (extract_elt t, 7))) | |||
40477 | ||||
40478 | if (!Subtarget.hasSSE2()) | |||
40479 | return SDValue(); | |||
40480 | ||||
40481 | if (Op0.getOpcode() != ISD::BUILD_VECTOR || | |||
40482 | Op1.getOpcode() != ISD::BUILD_VECTOR) | |||
40483 | return SDValue(); | |||
40484 | ||||
40485 | if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 || | |||
40486 | VT.getVectorNumElements() < 4 || | |||
40487 | !isPowerOf2_32(VT.getVectorNumElements())) | |||
40488 | return SDValue(); | |||
40489 | ||||
40490 | // Check if one of Op0,Op1 is of the form: | |||
40491 | // (build_vector (extract_elt Mul, 0), | |||
40492 | // (extract_elt Mul, 2), | |||
40493 | // (extract_elt Mul, 4), | |||
40494 | // ... | |||
40495 | // the other is of the form: | |||
40496 | // (build_vector (extract_elt Mul, 1), | |||
40497 | // (extract_elt Mul, 3), | |||
40498 | // (extract_elt Mul, 5), | |||
40499 | // ... | |||
40500 | // and identify Mul. | |||
40501 | SDValue Mul; | |||
40502 | for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) { | |||
40503 | SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i), | |||
40504 | Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1); | |||
40505 | // TODO: Be more tolerant to undefs. | |||
40506 | if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
40507 | Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
40508 | Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
40509 | Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
40510 | return SDValue(); | |||
40511 | auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1)); | |||
40512 | auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1)); | |||
40513 | auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1)); | |||
40514 | auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1)); | |||
40515 | if (!Const0L || !Const1L || !Const0H || !Const1H) | |||
40516 | return SDValue(); | |||
40517 | unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(), | |||
40518 | Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue(); | |||
40519 | // Commutativity of mul allows factors of a product to reorder. | |||
40520 | if (Idx0L > Idx1L) | |||
40521 | std::swap(Idx0L, Idx1L); | |||
40522 | if (Idx0H > Idx1H) | |||
40523 | std::swap(Idx0H, Idx1H); | |||
40524 | // Commutativity of add allows pairs of factors to reorder. | |||
40525 | if (Idx0L > Idx0H) { | |||
40526 | std::swap(Idx0L, Idx0H); | |||
40527 | std::swap(Idx1L, Idx1H); | |||
40528 | } | |||
40529 | if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 || | |||
40530 | Idx1H != 2 * i + 3) | |||
40531 | return SDValue(); | |||
40532 | if (!Mul) { | |||
40533 | // First time an extract_elt's source vector is visited. Must be a MUL | |||
40534 | // with 2X number of vector elements than the BUILD_VECTOR. | |||
40535 | // Both extracts must be from same MUL. | |||
40536 | Mul = Op0L->getOperand(0); | |||
40537 | if (Mul->getOpcode() != ISD::MUL || | |||
40538 | Mul.getValueType().getVectorNumElements() != 2 * e) | |||
40539 | return SDValue(); | |||
40540 | } | |||
40541 | // Check that the extract is from the same MUL previously seen. | |||
40542 | if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) || | |||
40543 | Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0)) | |||
40544 | return SDValue(); | |||
40545 | } | |||
40546 | ||||
40547 | // Check if the Mul source can be safely shrunk. | |||
40548 | ShrinkMode Mode; | |||
40549 | if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) || Mode == MULU16) | |||
40550 | return SDValue(); | |||
40551 | ||||
40552 | auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
40553 | ArrayRef<SDValue> Ops) { | |||
40554 | // Shrink by adding truncate nodes and let DAGCombine fold with the | |||
40555 | // sources. | |||
40556 | EVT InVT = Ops[0].getValueType(); | |||
40557 | assert(InVT.getScalarType() == MVT::i32 &&((InVT.getScalarType() == MVT::i32 && "Unexpected scalar element type" ) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i32 && \"Unexpected scalar element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 40558, __PRETTY_FUNCTION__)) | |||
40558 | "Unexpected scalar element type")((InVT.getScalarType() == MVT::i32 && "Unexpected scalar element type" ) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i32 && \"Unexpected scalar element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 40558, __PRETTY_FUNCTION__)); | |||
40559 | assert(InVT == Ops[1].getValueType() && "Operands' types mismatch")((InVT == Ops[1].getValueType() && "Operands' types mismatch" ) ? static_cast<void> (0) : __assert_fail ("InVT == Ops[1].getValueType() && \"Operands' types mismatch\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 40559, __PRETTY_FUNCTION__)); | |||
40560 | EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, | |||
40561 | InVT.getVectorNumElements() / 2); | |||
40562 | EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, | |||
40563 | InVT.getVectorNumElements()); | |||
40564 | return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, | |||
40565 | DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[0]), | |||
40566 | DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Ops[1])); | |||
40567 | }; | |||
40568 | return SplitOpsAndApply(DAG, Subtarget, DL, VT, | |||
40569 | { Mul.getOperand(0), Mul.getOperand(1) }, | |||
40570 | PMADDBuilder); | |||
40571 | } | |||
40572 | ||||
40573 | // Try to turn (add (umax X, C), -C) into (psubus X, C) | |||
40574 | static SDValue combineAddToSUBUS(SDNode *N, SelectionDAG &DAG, | |||
40575 | const X86Subtarget &Subtarget) { | |||
40576 | if (!Subtarget.hasSSE2()) | |||
40577 | return SDValue(); | |||
40578 | ||||
40579 | EVT VT = N->getValueType(0); | |||
40580 | ||||
40581 | // psubus is available in SSE2 for i8 and i16 vectors. | |||
40582 | if (!VT.isVector() || VT.getVectorNumElements() < 2 || | |||
40583 | !isPowerOf2_32(VT.getVectorNumElements()) || | |||
40584 | !(VT.getVectorElementType() == MVT::i8 || | |||
40585 | VT.getVectorElementType() == MVT::i16)) | |||
40586 | return SDValue(); | |||
40587 | ||||
40588 | SDValue Op0 = N->getOperand(0); | |||
40589 | SDValue Op1 = N->getOperand(1); | |||
40590 | if (Op0.getOpcode() != ISD::UMAX) | |||
40591 | return SDValue(); | |||
40592 | ||||
40593 | // The add should have a constant that is the negative of the max. | |||
40594 | // TODO: Handle build_vectors with undef elements. | |||
40595 | auto MatchUSUBSAT = [](ConstantSDNode *Max, ConstantSDNode *Op) { | |||
40596 | return Max->getAPIntValue() == (-Op->getAPIntValue()); | |||
40597 | }; | |||
40598 | if (!ISD::matchBinaryPredicate(Op0.getOperand(1), Op1, MatchUSUBSAT)) | |||
40599 | return SDValue(); | |||
40600 | ||||
40601 | SDLoc DL(N); | |||
40602 | return DAG.getNode(ISD::USUBSAT, DL, VT, Op0.getOperand(0), | |||
40603 | Op0.getOperand(1)); | |||
40604 | } | |||
40605 | ||||
40606 | // Attempt to turn this pattern into PMADDWD. | |||
40607 | // (mul (add (zext (build_vector)), (zext (build_vector))), | |||
40608 | // (add (zext (build_vector)), (zext (build_vector))) | |||
40609 | static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1, | |||
40610 | const SDLoc &DL, EVT VT, | |||
40611 | const X86Subtarget &Subtarget) { | |||
40612 | if (!Subtarget.hasSSE2()) | |||
40613 | return SDValue(); | |||
40614 | ||||
40615 | if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL) | |||
40616 | return SDValue(); | |||
40617 | ||||
40618 | if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 || | |||
40619 | VT.getVectorNumElements() < 4 || | |||
40620 | !isPowerOf2_32(VT.getVectorNumElements())) | |||
40621 | return SDValue(); | |||
40622 | ||||
40623 | SDValue N00 = N0.getOperand(0); | |||
40624 | SDValue N01 = N0.getOperand(1); | |||
40625 | SDValue N10 = N1.getOperand(0); | |||
40626 | SDValue N11 = N1.getOperand(1); | |||
40627 | ||||
40628 | // All inputs need to be sign extends. | |||
40629 | // TODO: Support ZERO_EXTEND from known positive? | |||
40630 | if (N00.getOpcode() != ISD::SIGN_EXTEND || | |||
40631 | N01.getOpcode() != ISD::SIGN_EXTEND || | |||
40632 | N10.getOpcode() != ISD::SIGN_EXTEND || | |||
40633 | N11.getOpcode() != ISD::SIGN_EXTEND) | |||
40634 | return SDValue(); | |||
40635 | ||||
40636 | // Peek through the extends. | |||
40637 | N00 = N00.getOperand(0); | |||
40638 | N01 = N01.getOperand(0); | |||
40639 | N10 = N10.getOperand(0); | |||
40640 | N11 = N11.getOperand(0); | |||
40641 | ||||
40642 | // Must be extending from vXi16. | |||
40643 | EVT InVT = N00.getValueType(); | |||
40644 | if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT || | |||
40645 | N10.getValueType() != InVT || N11.getValueType() != InVT) | |||
40646 | return SDValue(); | |||
40647 | ||||
40648 | // All inputs should be build_vectors. | |||
40649 | if (N00.getOpcode() != ISD::BUILD_VECTOR || | |||
40650 | N01.getOpcode() != ISD::BUILD_VECTOR || | |||
40651 | N10.getOpcode() != ISD::BUILD_VECTOR || | |||
40652 | N11.getOpcode() != ISD::BUILD_VECTOR) | |||
40653 | return SDValue(); | |||
40654 | ||||
40655 | // For each element, we need to ensure we have an odd element from one vector | |||
40656 | // multiplied by the odd element of another vector and the even element from | |||
40657 | // one of the same vectors being multiplied by the even element from the | |||
40658 | // other vector. So we need to make sure for each element i, this operator | |||
40659 | // is being performed: | |||
40660 | // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1] | |||
40661 | SDValue In0, In1; | |||
40662 | for (unsigned i = 0; i != N00.getNumOperands(); ++i) { | |||
40663 | SDValue N00Elt = N00.getOperand(i); | |||
40664 | SDValue N01Elt = N01.getOperand(i); | |||
40665 | SDValue N10Elt = N10.getOperand(i); | |||
40666 | SDValue N11Elt = N11.getOperand(i); | |||
40667 | // TODO: Be more tolerant to undefs. | |||
40668 | if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
40669 | N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
40670 | N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT || | |||
40671 | N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT) | |||
40672 | return SDValue(); | |||
40673 | auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1)); | |||
40674 | auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1)); | |||
40675 | auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1)); | |||
40676 | auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1)); | |||
40677 | if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt) | |||
40678 | return SDValue(); | |||
40679 | unsigned IdxN00 = ConstN00Elt->getZExtValue(); | |||
40680 | unsigned IdxN01 = ConstN01Elt->getZExtValue(); | |||
40681 | unsigned IdxN10 = ConstN10Elt->getZExtValue(); | |||
40682 | unsigned IdxN11 = ConstN11Elt->getZExtValue(); | |||
40683 | // Add is commutative so indices can be reordered. | |||
40684 | if (IdxN00 > IdxN10) { | |||
40685 | std::swap(IdxN00, IdxN10); | |||
40686 | std::swap(IdxN01, IdxN11); | |||
40687 | } | |||
40688 | // N0 indices be the even element. N1 indices must be the next odd element. | |||
40689 | if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 || | |||
40690 | IdxN01 != 2 * i || IdxN11 != 2 * i + 1) | |||
40691 | return SDValue(); | |||
40692 | SDValue N00In = N00Elt.getOperand(0); | |||
40693 | SDValue N01In = N01Elt.getOperand(0); | |||
40694 | SDValue N10In = N10Elt.getOperand(0); | |||
40695 | SDValue N11In = N11Elt.getOperand(0); | |||
40696 | // First time we find an input capture it. | |||
40697 | if (!In0) { | |||
40698 | In0 = N00In; | |||
40699 | In1 = N01In; | |||
40700 | } | |||
40701 | // Mul is commutative so the input vectors can be in any order. | |||
40702 | // Canonicalize to make the compares easier. | |||
40703 | if (In0 != N00In) | |||
40704 | std::swap(N00In, N01In); | |||
40705 | if (In0 != N10In) | |||
40706 | std::swap(N10In, N11In); | |||
40707 | if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In) | |||
40708 | return SDValue(); | |||
40709 | } | |||
40710 | ||||
40711 | auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
40712 | ArrayRef<SDValue> Ops) { | |||
40713 | // Shrink by adding truncate nodes and let DAGCombine fold with the | |||
40714 | // sources. | |||
40715 | EVT InVT = Ops[0].getValueType(); | |||
40716 | assert(InVT.getScalarType() == MVT::i16 &&((InVT.getScalarType() == MVT::i16 && "Unexpected scalar element type" ) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i16 && \"Unexpected scalar element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 40717, __PRETTY_FUNCTION__)) | |||
40717 | "Unexpected scalar element type")((InVT.getScalarType() == MVT::i16 && "Unexpected scalar element type" ) ? static_cast<void> (0) : __assert_fail ("InVT.getScalarType() == MVT::i16 && \"Unexpected scalar element type\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 40717, __PRETTY_FUNCTION__)); | |||
40718 | assert(InVT == Ops[1].getValueType() && "Operands' types mismatch")((InVT == Ops[1].getValueType() && "Operands' types mismatch" ) ? static_cast<void> (0) : __assert_fail ("InVT == Ops[1].getValueType() && \"Operands' types mismatch\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 40718, __PRETTY_FUNCTION__)); | |||
40719 | EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, | |||
40720 | InVT.getVectorNumElements() / 2); | |||
40721 | return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]); | |||
40722 | }; | |||
40723 | return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 }, | |||
40724 | PMADDBuilder); | |||
40725 | } | |||
40726 | ||||
40727 | static SDValue combineAdd(SDNode *N, SelectionDAG &DAG, | |||
40728 | const X86Subtarget &Subtarget) { | |||
40729 | const SDNodeFlags Flags = N->getFlags(); | |||
40730 | if (Flags.hasVectorReduction()) { | |||
40731 | if (SDValue Sad = combineLoopSADPattern(N, DAG, Subtarget)) | |||
40732 | return Sad; | |||
40733 | if (SDValue MAdd = combineLoopMAddPattern(N, DAG, Subtarget)) | |||
40734 | return MAdd; | |||
40735 | } | |||
40736 | EVT VT = N->getValueType(0); | |||
40737 | SDValue Op0 = N->getOperand(0); | |||
40738 | SDValue Op1 = N->getOperand(1); | |||
40739 | ||||
40740 | if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget)) | |||
40741 | return MAdd; | |||
40742 | if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget)) | |||
40743 | return MAdd; | |||
40744 | ||||
40745 | // Try to synthesize horizontal adds from adds of shuffles. | |||
40746 | if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 || | |||
40747 | VT == MVT::v8i32) && | |||
40748 | Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, true) && | |||
40749 | shouldCombineToHorizontalOp(Op0 == Op1, DAG, Subtarget)) { | |||
40750 | auto HADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
40751 | ArrayRef<SDValue> Ops) { | |||
40752 | return DAG.getNode(X86ISD::HADD, DL, Ops[0].getValueType(), Ops); | |||
40753 | }; | |||
40754 | return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1}, | |||
40755 | HADDBuilder); | |||
40756 | } | |||
40757 | ||||
40758 | if (SDValue V = combineIncDecVector(N, DAG)) | |||
40759 | return V; | |||
40760 | ||||
40761 | if (SDValue V = combineAddToSUBUS(N, DAG, Subtarget)) | |||
40762 | return V; | |||
40763 | ||||
40764 | return combineAddOrSubToADCOrSBB(N, DAG); | |||
40765 | } | |||
40766 | ||||
40767 | static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG, | |||
40768 | const X86Subtarget &Subtarget) { | |||
40769 | SDValue Op0 = N->getOperand(0); | |||
40770 | SDValue Op1 = N->getOperand(1); | |||
40771 | EVT VT = N->getValueType(0); | |||
40772 | ||||
40773 | // PSUBUS is supported, starting from SSE2, but truncation for v8i32 | |||
40774 | // is only worth it with SSSE3 (PSHUFB). | |||
40775 | if (!(Subtarget.hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) && | |||
40776 | !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) && | |||
40777 | !(Subtarget.hasAVX() && (VT == MVT::v32i8 || VT == MVT::v16i16)) && | |||
40778 | !(Subtarget.useBWIRegs() && (VT == MVT::v64i8 || VT == MVT::v32i16 || | |||
40779 | VT == MVT::v16i32 || VT == MVT::v8i64))) | |||
40780 | return SDValue(); | |||
40781 | ||||
40782 | SDValue SubusLHS, SubusRHS; | |||
40783 | // Try to find umax(a,b) - b or a - umin(a,b) patterns | |||
40784 | // they may be converted to subus(a,b). | |||
40785 | // TODO: Need to add IR canonicalization for this code. | |||
40786 | if (Op0.getOpcode() == ISD::UMAX) { | |||
40787 | SubusRHS = Op1; | |||
40788 | SDValue MaxLHS = Op0.getOperand(0); | |||
40789 | SDValue MaxRHS = Op0.getOperand(1); | |||
40790 | if (MaxLHS == Op1) | |||
40791 | SubusLHS = MaxRHS; | |||
40792 | else if (MaxRHS == Op1) | |||
40793 | SubusLHS = MaxLHS; | |||
40794 | else | |||
40795 | return SDValue(); | |||
40796 | } else if (Op1.getOpcode() == ISD::UMIN) { | |||
40797 | SubusLHS = Op0; | |||
40798 | SDValue MinLHS = Op1.getOperand(0); | |||
40799 | SDValue MinRHS = Op1.getOperand(1); | |||
40800 | if (MinLHS == Op0) | |||
40801 | SubusRHS = MinRHS; | |||
40802 | else if (MinRHS == Op0) | |||
40803 | SubusRHS = MinLHS; | |||
40804 | else | |||
40805 | return SDValue(); | |||
40806 | } else | |||
40807 | return SDValue(); | |||
40808 | ||||
40809 | auto USUBSATBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
40810 | ArrayRef<SDValue> Ops) { | |||
40811 | return DAG.getNode(ISD::USUBSAT, DL, Ops[0].getValueType(), Ops); | |||
40812 | }; | |||
40813 | ||||
40814 | // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with | |||
40815 | // special preprocessing in some cases. | |||
40816 | if (VT != MVT::v8i32 && VT != MVT::v16i32 && VT != MVT::v8i64) | |||
40817 | return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, | |||
40818 | { SubusLHS, SubusRHS }, USUBSATBuilder); | |||
40819 | ||||
40820 | // Special preprocessing case can be only applied | |||
40821 | // if the value was zero extended from 16 bit, | |||
40822 | // so we require first 16 bits to be zeros for 32 bit | |||
40823 | // values, or first 48 bits for 64 bit values. | |||
40824 | KnownBits Known = DAG.computeKnownBits(SubusLHS); | |||
40825 | unsigned NumZeros = Known.countMinLeadingZeros(); | |||
40826 | if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16) | |||
40827 | return SDValue(); | |||
40828 | ||||
40829 | EVT ExtType = SubusLHS.getValueType(); | |||
40830 | EVT ShrinkedType; | |||
40831 | if (VT == MVT::v8i32 || VT == MVT::v8i64) | |||
40832 | ShrinkedType = MVT::v8i16; | |||
40833 | else | |||
40834 | ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16; | |||
40835 | ||||
40836 | // If SubusLHS is zeroextended - truncate SubusRHS to it's | |||
40837 | // size SubusRHS = umin(0xFFF.., SubusRHS). | |||
40838 | SDValue SaturationConst = | |||
40839 | DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(), | |||
40840 | ShrinkedType.getScalarSizeInBits()), | |||
40841 | SDLoc(SubusLHS), ExtType); | |||
40842 | SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS, | |||
40843 | SaturationConst); | |||
40844 | SDValue NewSubusLHS = | |||
40845 | DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType); | |||
40846 | SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType); | |||
40847 | SDValue Psubus = | |||
40848 | SplitOpsAndApply(DAG, Subtarget, SDLoc(N), ShrinkedType, | |||
40849 | { NewSubusLHS, NewSubusRHS }, USUBSATBuilder); | |||
40850 | // Zero extend the result, it may be used somewhere as 32 bit, | |||
40851 | // if not zext and following trunc will shrink. | |||
40852 | return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType); | |||
40853 | } | |||
40854 | ||||
40855 | static SDValue combineSub(SDNode *N, SelectionDAG &DAG, | |||
40856 | const X86Subtarget &Subtarget) { | |||
40857 | SDValue Op0 = N->getOperand(0); | |||
40858 | SDValue Op1 = N->getOperand(1); | |||
40859 | ||||
40860 | // X86 can't encode an immediate LHS of a sub. See if we can push the | |||
40861 | // negation into a preceding instruction. | |||
40862 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) { | |||
40863 | // If the RHS of the sub is a XOR with one use and a constant, invert the | |||
40864 | // immediate. Then add one to the LHS of the sub so we can turn | |||
40865 | // X-Y -> X+~Y+1, saving one register. | |||
40866 | if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR && | |||
40867 | isa<ConstantSDNode>(Op1.getOperand(1))) { | |||
40868 | APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue(); | |||
40869 | EVT VT = Op0.getValueType(); | |||
40870 | SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, | |||
40871 | Op1.getOperand(0), | |||
40872 | DAG.getConstant(~XorC, SDLoc(Op1), VT)); | |||
40873 | return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor, | |||
40874 | DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT)); | |||
40875 | } | |||
40876 | } | |||
40877 | ||||
40878 | // Try to synthesize horizontal subs from subs of shuffles. | |||
40879 | EVT VT = N->getValueType(0); | |||
40880 | if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 || | |||
40881 | VT == MVT::v8i32) && | |||
40882 | Subtarget.hasSSSE3() && isHorizontalBinOp(Op0, Op1, false) && | |||
40883 | shouldCombineToHorizontalOp(Op0 == Op1, DAG, Subtarget)) { | |||
40884 | auto HSUBBuilder = [](SelectionDAG &DAG, const SDLoc &DL, | |||
40885 | ArrayRef<SDValue> Ops) { | |||
40886 | return DAG.getNode(X86ISD::HSUB, DL, Ops[0].getValueType(), Ops); | |||
40887 | }; | |||
40888 | return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1}, | |||
40889 | HSUBBuilder); | |||
40890 | } | |||
40891 | ||||
40892 | if (SDValue V = combineIncDecVector(N, DAG)) | |||
40893 | return V; | |||
40894 | ||||
40895 | // Try to create PSUBUS if SUB's argument is max/min | |||
40896 | if (SDValue V = combineSubToSubus(N, DAG, Subtarget)) | |||
40897 | return V; | |||
40898 | ||||
40899 | return combineAddOrSubToADCOrSBB(N, DAG); | |||
40900 | } | |||
40901 | ||||
40902 | static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG, | |||
40903 | const X86Subtarget &Subtarget) { | |||
40904 | MVT VT = N->getSimpleValueType(0); | |||
40905 | SDLoc DL(N); | |||
40906 | ||||
40907 | if (N->getOperand(0) == N->getOperand(1)) { | |||
40908 | if (N->getOpcode() == X86ISD::PCMPEQ) | |||
40909 | return DAG.getConstant(-1, DL, VT); | |||
40910 | if (N->getOpcode() == X86ISD::PCMPGT) | |||
40911 | return DAG.getConstant(0, DL, VT); | |||
40912 | } | |||
40913 | ||||
40914 | return SDValue(); | |||
40915 | } | |||
40916 | ||||
40917 | static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG, | |||
40918 | TargetLowering::DAGCombinerInfo &DCI, | |||
40919 | const X86Subtarget &Subtarget) { | |||
40920 | if (DCI.isBeforeLegalizeOps()) | |||
40921 | return SDValue(); | |||
40922 | ||||
40923 | MVT OpVT = N->getSimpleValueType(0); | |||
40924 | ||||
40925 | bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1; | |||
40926 | ||||
40927 | SDLoc dl(N); | |||
40928 | SDValue Vec = N->getOperand(0); | |||
40929 | SDValue SubVec = N->getOperand(1); | |||
40930 | ||||
40931 | unsigned IdxVal = N->getConstantOperandVal(2); | |||
40932 | MVT SubVecVT = SubVec.getSimpleValueType(); | |||
40933 | ||||
40934 | if (ISD::isBuildVectorAllZeros(Vec.getNode())) { | |||
40935 | // Inserting zeros into zeros is a nop. | |||
40936 | if (ISD::isBuildVectorAllZeros(SubVec.getNode())) | |||
40937 | return getZeroVector(OpVT, Subtarget, DAG, dl); | |||
40938 | ||||
40939 | // If we're inserting into a zero vector and then into a larger zero vector, | |||
40940 | // just insert into the larger zero vector directly. | |||
40941 | if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR && | |||
40942 | ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) { | |||
40943 | unsigned Idx2Val = SubVec.getConstantOperandVal(2); | |||
40944 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, | |||
40945 | getZeroVector(OpVT, Subtarget, DAG, dl), | |||
40946 | SubVec.getOperand(1), | |||
40947 | DAG.getIntPtrConstant(IdxVal + Idx2Val, dl)); | |||
40948 | } | |||
40949 | ||||
40950 | // If we're inserting into a zero vector and our input was extracted from an | |||
40951 | // insert into a zero vector of the same type and the extraction was at | |||
40952 | // least as large as the original insertion. Just insert the original | |||
40953 | // subvector into a zero vector. | |||
40954 | if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 && | |||
40955 | SubVec.getConstantOperandVal(1) == 0 && | |||
40956 | SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) { | |||
40957 | SDValue Ins = SubVec.getOperand(0); | |||
40958 | if (Ins.getConstantOperandVal(2) == 0 && | |||
40959 | ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) && | |||
40960 | Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits()) | |||
40961 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, | |||
40962 | getZeroVector(OpVT, Subtarget, DAG, dl), | |||
40963 | Ins.getOperand(1), N->getOperand(2)); | |||
40964 | } | |||
40965 | ||||
40966 | // If we're inserting a bitcast into zeros, rewrite the insert and move the | |||
40967 | // bitcast to the other side. This helps with detecting zero extending | |||
40968 | // during isel. | |||
40969 | // TODO: Is this useful for other indices than 0? | |||
40970 | if (!IsI1Vector && SubVec.getOpcode() == ISD::BITCAST && IdxVal == 0) { | |||
40971 | MVT CastVT = SubVec.getOperand(0).getSimpleValueType(); | |||
40972 | unsigned NumElems = OpVT.getSizeInBits() / CastVT.getScalarSizeInBits(); | |||
40973 | MVT NewVT = MVT::getVectorVT(CastVT.getVectorElementType(), NumElems); | |||
40974 | SDValue Insert = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NewVT, | |||
40975 | DAG.getBitcast(NewVT, Vec), | |||
40976 | SubVec.getOperand(0), N->getOperand(2)); | |||
40977 | return DAG.getBitcast(OpVT, Insert); | |||
40978 | } | |||
40979 | } | |||
40980 | ||||
40981 | // Stop here if this is an i1 vector. | |||
40982 | if (IsI1Vector) | |||
40983 | return SDValue(); | |||
40984 | ||||
40985 | // If this is an insert of an extract, combine to a shuffle. Don't do this | |||
40986 | // if the insert or extract can be represented with a subregister operation. | |||
40987 | if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && | |||
40988 | SubVec.getOperand(0).getSimpleValueType() == OpVT && | |||
40989 | (IdxVal != 0 || !Vec.isUndef())) { | |||
40990 | int ExtIdxVal = SubVec.getConstantOperandVal(1); | |||
40991 | if (ExtIdxVal != 0) { | |||
40992 | int VecNumElts = OpVT.getVectorNumElements(); | |||
40993 | int SubVecNumElts = SubVecVT.getVectorNumElements(); | |||
40994 | SmallVector<int, 64> Mask(VecNumElts); | |||
40995 | // First create an identity shuffle mask. | |||
40996 | for (int i = 0; i != VecNumElts; ++i) | |||
40997 | Mask[i] = i; | |||
40998 | // Now insert the extracted portion. | |||
40999 | for (int i = 0; i != SubVecNumElts; ++i) | |||
41000 | Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts; | |||
41001 | ||||
41002 | return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask); | |||
41003 | } | |||
41004 | } | |||
41005 | ||||
41006 | // Fold two 16-byte or 32-byte subvector loads into one 32-byte or 64-byte | |||
41007 | // load: | |||
41008 | // (insert_subvector (insert_subvector undef, (load16 addr), 0), | |||
41009 | // (load16 addr + 16), Elts/2) | |||
41010 | // --> load32 addr | |||
41011 | // or: | |||
41012 | // (insert_subvector (insert_subvector undef, (load32 addr), 0), | |||
41013 | // (load32 addr + 32), Elts/2) | |||
41014 | // --> load64 addr | |||
41015 | // or a 16-byte or 32-byte broadcast: | |||
41016 | // (insert_subvector (insert_subvector undef, (load16 addr), 0), | |||
41017 | // (load16 addr), Elts/2) | |||
41018 | // --> X86SubVBroadcast(load16 addr) | |||
41019 | // or: | |||
41020 | // (insert_subvector (insert_subvector undef, (load32 addr), 0), | |||
41021 | // (load32 addr), Elts/2) | |||
41022 | // --> X86SubVBroadcast(load32 addr) | |||
41023 | if ((IdxVal == OpVT.getVectorNumElements() / 2) && | |||
41024 | Vec.getOpcode() == ISD::INSERT_SUBVECTOR && | |||
41025 | OpVT.getSizeInBits() == SubVecVT.getSizeInBits() * 2) { | |||
41026 | if (isNullConstant(Vec.getOperand(2))) { | |||
41027 | SDValue SubVec2 = Vec.getOperand(1); | |||
41028 | // If needed, look through bitcasts to get to the load. | |||
41029 | if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(SubVec2))) { | |||
41030 | bool Fast; | |||
41031 | unsigned Alignment = FirstLd->getAlignment(); | |||
41032 | unsigned AS = FirstLd->getAddressSpace(); | |||
41033 | const X86TargetLowering *TLI = Subtarget.getTargetLowering(); | |||
41034 | if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), | |||
41035 | OpVT, AS, Alignment, &Fast) && Fast) { | |||
41036 | SDValue Ops[] = {SubVec2, SubVec}; | |||
41037 | if (SDValue Ld = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, | |||
41038 | Subtarget, false)) | |||
41039 | return Ld; | |||
41040 | } | |||
41041 | } | |||
41042 | // If lower/upper loads are the same and there's no other use of the lower | |||
41043 | // load, then splat the loaded value with a broadcast. | |||
41044 | if (auto *Ld = dyn_cast<LoadSDNode>(peekThroughOneUseBitcasts(SubVec2))) | |||
41045 | if (SubVec2 == SubVec && ISD::isNormalLoad(Ld) && Vec.hasOneUse()) | |||
41046 | return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT, SubVec); | |||
41047 | ||||
41048 | // If this is subv_broadcast insert into both halves, use a larger | |||
41049 | // subv_broadcast. | |||
41050 | if (SubVec.getOpcode() == X86ISD::SUBV_BROADCAST && SubVec == SubVec2) | |||
41051 | return DAG.getNode(X86ISD::SUBV_BROADCAST, dl, OpVT, | |||
41052 | SubVec.getOperand(0)); | |||
41053 | ||||
41054 | // If we're inserting all zeros into the upper half, change this to | |||
41055 | // an insert into an all zeros vector. We will match this to a move | |||
41056 | // with implicit upper bit zeroing during isel. | |||
41057 | if (ISD::isBuildVectorAllZeros(SubVec.getNode())) | |||
41058 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, | |||
41059 | getZeroVector(OpVT, Subtarget, DAG, dl), SubVec2, | |||
41060 | Vec.getOperand(2)); | |||
41061 | ||||
41062 | // If we are inserting into both halves of the vector, the starting | |||
41063 | // vector should be undef. If it isn't, make it so. Only do this if the | |||
41064 | // the early insert has no other uses. | |||
41065 | // TODO: Should this be a generic DAG combine? | |||
41066 | if (!Vec.getOperand(0).isUndef() && Vec.hasOneUse()) { | |||
41067 | Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, DAG.getUNDEF(OpVT), | |||
41068 | SubVec2, Vec.getOperand(2)); | |||
41069 | return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec, SubVec, | |||
41070 | N->getOperand(2)); | |||
41071 | ||||
41072 | } | |||
41073 | } | |||
41074 | } | |||
41075 | ||||
41076 | return SDValue(); | |||
41077 | } | |||
41078 | ||||
41079 | static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG, | |||
41080 | TargetLowering::DAGCombinerInfo &DCI, | |||
41081 | const X86Subtarget &Subtarget) { | |||
41082 | // For AVX1 only, if we are extracting from a 256-bit and+not (which will | |||
41083 | // eventually get combined/lowered into ANDNP) with a concatenated operand, | |||
41084 | // split the 'and' into 128-bit ops to avoid the concatenate and extract. | |||
41085 | // We let generic combining take over from there to simplify the | |||
41086 | // insert/extract and 'not'. | |||
41087 | // This pattern emerges during AVX1 legalization. We handle it before lowering | |||
41088 | // to avoid complications like splitting constant vector loads. | |||
41089 | ||||
41090 | // Capture the original wide type in the likely case that we need to bitcast | |||
41091 | // back to this type. | |||
41092 | EVT VT = N->getValueType(0); | |||
41093 | EVT WideVecVT = N->getOperand(0).getValueType(); | |||
41094 | SDValue WideVec = peekThroughBitcasts(N->getOperand(0)); | |||
41095 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
41096 | if (Subtarget.hasAVX() && !Subtarget.hasAVX2() && | |||
41097 | TLI.isTypeLegal(WideVecVT) && | |||
41098 | WideVecVT.getSizeInBits() == 256 && WideVec.getOpcode() == ISD::AND) { | |||
41099 | auto isConcatenatedNot = [] (SDValue V) { | |||
41100 | V = peekThroughBitcasts(V); | |||
41101 | if (!isBitwiseNot(V)) | |||
41102 | return false; | |||
41103 | SDValue NotOp = V->getOperand(0); | |||
41104 | return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS; | |||
41105 | }; | |||
41106 | if (isConcatenatedNot(WideVec.getOperand(0)) || | |||
41107 | isConcatenatedNot(WideVec.getOperand(1))) { | |||
41108 | // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1 | |||
41109 | SDValue Concat = split256IntArith(WideVec, DAG); | |||
41110 | return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT, | |||
41111 | DAG.getBitcast(WideVecVT, Concat), N->getOperand(1)); | |||
41112 | } | |||
41113 | } | |||
41114 | ||||
41115 | if (DCI.isBeforeLegalizeOps()) | |||
41116 | return SDValue(); | |||
41117 | ||||
41118 | MVT OpVT = N->getSimpleValueType(0); | |||
41119 | SDValue InVec = N->getOperand(0); | |||
41120 | unsigned IdxVal = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); | |||
41121 | ||||
41122 | if (ISD::isBuildVectorAllZeros(InVec.getNode())) | |||
41123 | return getZeroVector(OpVT, Subtarget, DAG, SDLoc(N)); | |||
41124 | ||||
41125 | if (ISD::isBuildVectorAllOnes(InVec.getNode())) { | |||
41126 | if (OpVT.getScalarType() == MVT::i1) | |||
41127 | return DAG.getConstant(1, SDLoc(N), OpVT); | |||
41128 | return getOnesVector(OpVT, DAG, SDLoc(N)); | |||
41129 | } | |||
41130 | ||||
41131 | if (InVec.getOpcode() == ISD::BUILD_VECTOR) | |||
41132 | return DAG.getBuildVector( | |||
41133 | OpVT, SDLoc(N), | |||
41134 | InVec.getNode()->ops().slice(IdxVal, OpVT.getVectorNumElements())); | |||
41135 | ||||
41136 | // If we're extracting the lowest subvector and we're the only user, | |||
41137 | // we may be able to perform this with a smaller vector width. | |||
41138 | if (IdxVal == 0 && InVec.hasOneUse()) { | |||
41139 | unsigned InOpcode = InVec.getOpcode(); | |||
41140 | if (OpVT == MVT::v2f64 && InVec.getValueType() == MVT::v4f64) { | |||
41141 | // v2f64 CVTDQ2PD(v4i32). | |||
41142 | if (InOpcode == ISD::SINT_TO_FP && | |||
41143 | InVec.getOperand(0).getValueType() == MVT::v4i32) { | |||
41144 | return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), OpVT, InVec.getOperand(0)); | |||
41145 | } | |||
41146 | // v2f64 CVTPS2PD(v4f32). | |||
41147 | if (InOpcode == ISD::FP_EXTEND && | |||
41148 | InVec.getOperand(0).getValueType() == MVT::v4f32) { | |||
41149 | return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), OpVT, InVec.getOperand(0)); | |||
41150 | } | |||
41151 | } | |||
41152 | if ((InOpcode == ISD::ZERO_EXTEND || InOpcode == ISD::SIGN_EXTEND) && | |||
41153 | OpVT.is128BitVector() && | |||
41154 | InVec.getOperand(0).getSimpleValueType().is128BitVector()) { | |||
41155 | unsigned ExtOp = | |||
41156 | InOpcode == ISD::ZERO_EXTEND ? ISD::ZERO_EXTEND_VECTOR_INREG | |||
41157 | : ISD::SIGN_EXTEND_VECTOR_INREG; | |||
41158 | return DAG.getNode(ExtOp, SDLoc(N), OpVT, InVec.getOperand(0)); | |||
41159 | } | |||
41160 | if ((InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG || | |||
41161 | InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) && | |||
41162 | OpVT.is128BitVector() && | |||
41163 | InVec.getOperand(0).getSimpleValueType().is128BitVector()) { | |||
41164 | return DAG.getNode(InOpcode, SDLoc(N), OpVT, InVec.getOperand(0)); | |||
41165 | } | |||
41166 | if (InOpcode == ISD::BITCAST) { | |||
41167 | // TODO - do this for target shuffles in general. | |||
41168 | SDValue InVecBC = peekThroughOneUseBitcasts(InVec); | |||
41169 | if (InVecBC.getOpcode() == X86ISD::PSHUFB && OpVT.is128BitVector()) { | |||
41170 | SDLoc DL(N); | |||
41171 | SDValue SubPSHUFB = | |||
41172 | DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, | |||
41173 | extract128BitVector(InVecBC.getOperand(0), 0, DAG, DL), | |||
41174 | extract128BitVector(InVecBC.getOperand(1), 0, DAG, DL)); | |||
41175 | return DAG.getBitcast(OpVT, SubPSHUFB); | |||
41176 | } | |||
41177 | } | |||
41178 | } | |||
41179 | ||||
41180 | return SDValue(); | |||
41181 | } | |||
41182 | ||||
41183 | static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) { | |||
41184 | EVT VT = N->getValueType(0); | |||
41185 | SDValue Src = N->getOperand(0); | |||
41186 | ||||
41187 | // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and. | |||
41188 | // This occurs frequently in our masked scalar intrinsic code and our | |||
41189 | // floating point select lowering with AVX512. | |||
41190 | // TODO: SimplifyDemandedBits instead? | |||
41191 | if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse()) | |||
41192 | if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1))) | |||
41193 | if (C->getAPIntValue().isOneValue()) | |||
41194 | return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), MVT::v1i1, | |||
41195 | Src.getOperand(0)); | |||
41196 | ||||
41197 | return SDValue(); | |||
41198 | } | |||
41199 | ||||
41200 | // Simplify PMULDQ and PMULUDQ operations. | |||
41201 | static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG, | |||
41202 | TargetLowering::DAGCombinerInfo &DCI) { | |||
41203 | SDValue LHS = N->getOperand(0); | |||
41204 | SDValue RHS = N->getOperand(1); | |||
41205 | ||||
41206 | // Canonicalize constant to RHS. | |||
41207 | if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) && | |||
41208 | !DAG.isConstantIntBuildVectorOrConstantInt(RHS)) | |||
41209 | return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS); | |||
41210 | ||||
41211 | // Multiply by zero. | |||
41212 | if (ISD::isBuildVectorAllZeros(RHS.getNode())) | |||
41213 | return RHS; | |||
41214 | ||||
41215 | // Aggressively peek through ops to get at the demanded low bits. | |||
41216 | APInt DemandedMask = APInt::getLowBitsSet(64, 32); | |||
41217 | SDValue DemandedLHS = DAG.GetDemandedBits(LHS, DemandedMask); | |||
41218 | SDValue DemandedRHS = DAG.GetDemandedBits(RHS, DemandedMask); | |||
41219 | if (DemandedLHS || DemandedRHS) | |||
41220 | return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), | |||
41221 | DemandedLHS ? DemandedLHS : LHS, | |||
41222 | DemandedRHS ? DemandedRHS : RHS); | |||
41223 | ||||
41224 | // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element. | |||
41225 | const TargetLowering &TLI = DAG.getTargetLoweringInfo(); | |||
41226 | if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI)) | |||
41227 | return SDValue(N, 0); | |||
41228 | ||||
41229 | return SDValue(); | |||
41230 | } | |||
41231 | ||||
41232 | SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, | |||
41233 | DAGCombinerInfo &DCI) const { | |||
41234 | SelectionDAG &DAG = DCI.DAG; | |||
41235 | switch (N->getOpcode()) { | |||
41236 | default: break; | |||
41237 | case ISD::SCALAR_TO_VECTOR: | |||
41238 | return combineScalarToVector(N, DAG); | |||
41239 | case ISD::EXTRACT_VECTOR_ELT: | |||
41240 | case X86ISD::PEXTRW: | |||
41241 | case X86ISD::PEXTRB: | |||
41242 | return combineExtractVectorElt(N, DAG, DCI, Subtarget); | |||
41243 | case ISD::INSERT_SUBVECTOR: | |||
41244 | return combineInsertSubvector(N, DAG, DCI, Subtarget); | |||
41245 | case ISD::EXTRACT_SUBVECTOR: | |||
41246 | return combineExtractSubvector(N, DAG, DCI, Subtarget); | |||
41247 | case ISD::VSELECT: | |||
41248 | case ISD::SELECT: | |||
41249 | case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget); | |||
41250 | case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget); | |||
41251 | case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget); | |||
41252 | case X86ISD::CMP: return combineCMP(N, DAG); | |||
41253 | case ISD::ADD: return combineAdd(N, DAG, Subtarget); | |||
41254 | case ISD::SUB: return combineSub(N, DAG, Subtarget); | |||
41255 | case X86ISD::SBB: return combineSBB(N, DAG); | |||
41256 | case X86ISD::ADC: return combineADC(N, DAG, DCI); | |||
41257 | case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget); | |||
41258 | case ISD::SHL: | |||
41259 | case ISD::SRA: | |||
41260 | case ISD::SRL: return combineShift(N, DAG, DCI, Subtarget); | |||
41261 | case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget); | |||
41262 | case ISD::OR: return combineOr(N, DAG, DCI, Subtarget); | |||
41263 | case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget); | |||
41264 | case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget); | |||
41265 | case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget); | |||
41266 | case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget); | |||
41267 | case ISD::STORE: return combineStore(N, DAG, Subtarget); | |||
41268 | case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget); | |||
41269 | case ISD::SINT_TO_FP: return combineSIntToFP(N, DAG, Subtarget); | |||
41270 | case ISD::UINT_TO_FP: return combineUIntToFP(N, DAG, Subtarget); | |||
41271 | case ISD::FADD: | |||
41272 | case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget); | |||
41273 | case ISD::FNEG: return combineFneg(N, DAG, Subtarget); | |||
41274 | case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget); | |||
41275 | case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget); | |||
41276 | case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget); | |||
41277 | case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget); | |||
41278 | case X86ISD::FXOR: | |||
41279 | case X86ISD::FOR: return combineFOr(N, DAG, Subtarget); | |||
41280 | case X86ISD::FMIN: | |||
41281 | case X86ISD::FMAX: return combineFMinFMax(N, DAG); | |||
41282 | case ISD::FMINNUM: | |||
41283 | case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget); | |||
41284 | case X86ISD::CVTSI2P: | |||
41285 | case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI); | |||
41286 | case X86ISD::BT: return combineBT(N, DAG, DCI); | |||
41287 | case ISD::ANY_EXTEND: | |||
41288 | case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget); | |||
41289 | case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget); | |||
41290 | case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget); | |||
41291 | case ISD::SETCC: return combineSetCC(N, DAG, Subtarget); | |||
41292 | case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget); | |||
41293 | case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget); | |||
41294 | case X86ISD::PACKSS: | |||
41295 | case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget); | |||
41296 | case X86ISD::VSHL: | |||
41297 | case X86ISD::VSRA: | |||
41298 | case X86ISD::VSRL: | |||
41299 | return combineVectorShiftVar(N, DAG, DCI, Subtarget); | |||
41300 | case X86ISD::VSHLI: | |||
41301 | case X86ISD::VSRAI: | |||
41302 | case X86ISD::VSRLI: | |||
41303 | return combineVectorShiftImm(N, DAG, DCI, Subtarget); | |||
41304 | case X86ISD::PINSRB: | |||
41305 | case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget); | |||
41306 | case X86ISD::SHUFP: // Handle all target specific shuffles | |||
41307 | case X86ISD::INSERTPS: | |||
41308 | case X86ISD::EXTRQI: | |||
41309 | case X86ISD::INSERTQI: | |||
41310 | case X86ISD::PALIGNR: | |||
41311 | case X86ISD::VSHLDQ: | |||
41312 | case X86ISD::VSRLDQ: | |||
41313 | case X86ISD::BLENDI: | |||
41314 | case X86ISD::UNPCKH: | |||
41315 | case X86ISD::UNPCKL: | |||
41316 | case X86ISD::MOVHLPS: | |||
41317 | case X86ISD::MOVLHPS: | |||
41318 | case X86ISD::PSHUFB: | |||
41319 | case X86ISD::PSHUFD: | |||
41320 | case X86ISD::PSHUFHW: | |||
41321 | case X86ISD::PSHUFLW: | |||
41322 | case X86ISD::MOVSHDUP: | |||
41323 | case X86ISD::MOVSLDUP: | |||
41324 | case X86ISD::MOVDDUP: | |||
41325 | case X86ISD::MOVSS: | |||
41326 | case X86ISD::MOVSD: | |||
41327 | case X86ISD::VBROADCAST: | |||
41328 | case X86ISD::VPPERM: | |||
41329 | case X86ISD::VPERMI: | |||
41330 | case X86ISD::VPERMV: | |||
41331 | case X86ISD::VPERMV3: | |||
41332 | case X86ISD::VPERMIL2: | |||
41333 | case X86ISD::VPERMILPI: | |||
41334 | case X86ISD::VPERMILPV: | |||
41335 | case X86ISD::VPERM2X128: | |||
41336 | case X86ISD::SHUF128: | |||
41337 | case X86ISD::VZEXT_MOVL: | |||
41338 | case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget); | |||
41339 | case X86ISD::FMADD_RND: | |||
41340 | case X86ISD::FMSUB: | |||
41341 | case X86ISD::FMSUB_RND: | |||
41342 | case X86ISD::FNMADD: | |||
41343 | case X86ISD::FNMADD_RND: | |||
41344 | case X86ISD::FNMSUB: | |||
41345 | case X86ISD::FNMSUB_RND: | |||
41346 | case ISD::FMA: return combineFMA(N, DAG, Subtarget); | |||
41347 | case X86ISD::FMADDSUB_RND: | |||
41348 | case X86ISD::FMSUBADD_RND: | |||
41349 | case X86ISD::FMADDSUB: | |||
41350 | case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, Subtarget); | |||
41351 | case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI); | |||
41352 | case X86ISD::MGATHER: | |||
41353 | case X86ISD::MSCATTER: | |||
41354 | case ISD::MGATHER: | |||
41355 | case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget); | |||
41356 | case X86ISD::PCMPEQ: | |||
41357 | case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget); | |||
41358 | case X86ISD::PMULDQ: | |||
41359 | case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI); | |||
41360 | } | |||
41361 | ||||
41362 | return SDValue(); | |||
41363 | } | |||
41364 | ||||
41365 | bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { | |||
41366 | if (!isTypeLegal(VT)) | |||
41367 | return false; | |||
41368 | ||||
41369 | // There are no vXi8 shifts. | |||
41370 | if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8) | |||
41371 | return false; | |||
41372 | ||||
41373 | // 8-bit multiply is probably not much cheaper than 32-bit multiply, and | |||
41374 | // we have specializations to turn 32-bit multiply into LEA or other ops. | |||
41375 | // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally | |||
41376 | // check for a constant operand to the multiply. | |||
41377 | if (Opc == ISD::MUL && VT == MVT::i8) | |||
41378 | return false; | |||
41379 | ||||
41380 | // i16 instruction encodings are longer and some i16 instructions are slow, | |||
41381 | // so those are not desirable. | |||
41382 | if (VT == MVT::i16) { | |||
41383 | switch (Opc) { | |||
41384 | default: | |||
41385 | break; | |||
41386 | case ISD::LOAD: | |||
41387 | case ISD::SIGN_EXTEND: | |||
41388 | case ISD::ZERO_EXTEND: | |||
41389 | case ISD::ANY_EXTEND: | |||
41390 | case ISD::SHL: | |||
41391 | case ISD::SRL: | |||
41392 | case ISD::SUB: | |||
41393 | case ISD::ADD: | |||
41394 | case ISD::MUL: | |||
41395 | case ISD::AND: | |||
41396 | case ISD::OR: | |||
41397 | case ISD::XOR: | |||
41398 | return false; | |||
41399 | } | |||
41400 | } | |||
41401 | ||||
41402 | // Any legal type not explicitly accounted for above here is desirable. | |||
41403 | return true; | |||
41404 | } | |||
41405 | ||||
41406 | SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl, | |||
41407 | SDValue Value, SDValue Addr, | |||
41408 | SelectionDAG &DAG) const { | |||
41409 | const Module *M = DAG.getMachineFunction().getMMI().getModule(); | |||
41410 | Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch"); | |||
41411 | if (IsCFProtectionSupported) { | |||
41412 | // In case control-flow branch protection is enabled, we need to add | |||
41413 | // notrack prefix to the indirect branch. | |||
41414 | // In order to do that we create NT_BRIND SDNode. | |||
41415 | // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix. | |||
41416 | return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr); | |||
41417 | } | |||
41418 | ||||
41419 | return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG); | |||
41420 | } | |||
41421 | ||||
41422 | bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const { | |||
41423 | EVT VT = Op.getValueType(); | |||
41424 | bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL && | |||
41425 | isa<ConstantSDNode>(Op.getOperand(1)); | |||
41426 | ||||
41427 | // i16 is legal, but undesirable since i16 instruction encodings are longer | |||
41428 | // and some i16 instructions are slow. | |||
41429 | // 8-bit multiply-by-constant can usually be expanded to something cheaper | |||
41430 | // using LEA and/or other ALU ops. | |||
41431 | if (VT != MVT::i16 && !Is8BitMulByConstant) | |||
41432 | return false; | |||
41433 | ||||
41434 | auto IsFoldableRMW = [](SDValue Load, SDValue Op) { | |||
41435 | if (!Op.hasOneUse()) | |||
41436 | return false; | |||
41437 | SDNode *User = *Op->use_begin(); | |||
41438 | if (!ISD::isNormalStore(User)) | |||
41439 | return false; | |||
41440 | auto *Ld = cast<LoadSDNode>(Load); | |||
41441 | auto *St = cast<StoreSDNode>(User); | |||
41442 | return Ld->getBasePtr() == St->getBasePtr(); | |||
41443 | }; | |||
41444 | ||||
41445 | auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) { | |||
41446 | if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD) | |||
41447 | return false; | |||
41448 | if (!Op.hasOneUse()) | |||
41449 | return false; | |||
41450 | SDNode *User = *Op->use_begin(); | |||
41451 | if (User->getOpcode() != ISD::ATOMIC_STORE) | |||
41452 | return false; | |||
41453 | auto *Ld = cast<AtomicSDNode>(Load); | |||
41454 | auto *St = cast<AtomicSDNode>(User); | |||
41455 | return Ld->getBasePtr() == St->getBasePtr(); | |||
41456 | }; | |||
41457 | ||||
41458 | bool Commute = false; | |||
41459 | switch (Op.getOpcode()) { | |||
41460 | default: return false; | |||
41461 | case ISD::SIGN_EXTEND: | |||
41462 | case ISD::ZERO_EXTEND: | |||
41463 | case ISD::ANY_EXTEND: | |||
41464 | break; | |||
41465 | case ISD::SHL: | |||
41466 | case ISD::SRL: { | |||
41467 | SDValue N0 = Op.getOperand(0); | |||
41468 | // Look out for (store (shl (load), x)). | |||
41469 | if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op)) | |||
41470 | return false; | |||
41471 | break; | |||
41472 | } | |||
41473 | case ISD::ADD: | |||
41474 | case ISD::MUL: | |||
41475 | case ISD::AND: | |||
41476 | case ISD::OR: | |||
41477 | case ISD::XOR: | |||
41478 | Commute = true; | |||
41479 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
41480 | case ISD::SUB: { | |||
41481 | SDValue N0 = Op.getOperand(0); | |||
41482 | SDValue N1 = Op.getOperand(1); | |||
41483 | // Avoid disabling potential load folding opportunities. | |||
41484 | if (MayFoldLoad(N1) && | |||
41485 | (!Commute || !isa<ConstantSDNode>(N0) || | |||
41486 | (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op)))) | |||
41487 | return false; | |||
41488 | if (MayFoldLoad(N0) && | |||
41489 | ((Commute && !isa<ConstantSDNode>(N1)) || | |||
41490 | (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op)))) | |||
41491 | return false; | |||
41492 | if (IsFoldableAtomicRMW(N0, Op) || | |||
41493 | (Commute && IsFoldableAtomicRMW(N1, Op))) | |||
41494 | return false; | |||
41495 | } | |||
41496 | } | |||
41497 | ||||
41498 | PVT = MVT::i32; | |||
41499 | return true; | |||
41500 | } | |||
41501 | ||||
41502 | bool X86TargetLowering:: | |||
41503 | isDesirableToCombineBuildVectorToShuffleTruncate( | |||
41504 | ArrayRef<int> ShuffleMask, EVT SrcVT, EVT TruncVT) const { | |||
41505 | ||||
41506 | assert(SrcVT.getVectorNumElements() == ShuffleMask.size() &&((SrcVT.getVectorNumElements() == ShuffleMask.size() && "Element count mismatch") ? static_cast<void> (0) : __assert_fail ("SrcVT.getVectorNumElements() == ShuffleMask.size() && \"Element count mismatch\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 41507, __PRETTY_FUNCTION__)) | |||
41507 | "Element count mismatch")((SrcVT.getVectorNumElements() == ShuffleMask.size() && "Element count mismatch") ? static_cast<void> (0) : __assert_fail ("SrcVT.getVectorNumElements() == ShuffleMask.size() && \"Element count mismatch\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 41507, __PRETTY_FUNCTION__)); | |||
41508 | assert(((Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask , SrcVT) && "Shuffle Mask expected to be legal") ? static_cast <void> (0) : __assert_fail ("Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) && \"Shuffle Mask expected to be legal\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 41510, __PRETTY_FUNCTION__)) | |||
41509 | Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) &&((Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask , SrcVT) && "Shuffle Mask expected to be legal") ? static_cast <void> (0) : __assert_fail ("Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) && \"Shuffle Mask expected to be legal\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 41510, __PRETTY_FUNCTION__)) | |||
41510 | "Shuffle Mask expected to be legal")((Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask , SrcVT) && "Shuffle Mask expected to be legal") ? static_cast <void> (0) : __assert_fail ("Subtarget.getTargetLowering()->isShuffleMaskLegal(ShuffleMask, SrcVT) && \"Shuffle Mask expected to be legal\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 41510, __PRETTY_FUNCTION__)); | |||
41511 | ||||
41512 | // For 32-bit elements VPERMD is better than shuffle+truncate. | |||
41513 | // TODO: After we improve lowerBuildVector, add execption for VPERMW. | |||
41514 | if (SrcVT.getScalarSizeInBits() == 32 || !Subtarget.hasAVX2()) | |||
41515 | return false; | |||
41516 | ||||
41517 | if (is128BitLaneCrossingShuffleMask(SrcVT.getSimpleVT(), ShuffleMask)) | |||
41518 | return false; | |||
41519 | ||||
41520 | return true; | |||
41521 | } | |||
41522 | ||||
41523 | //===----------------------------------------------------------------------===// | |||
41524 | // X86 Inline Assembly Support | |||
41525 | //===----------------------------------------------------------------------===// | |||
41526 | ||||
41527 | // Helper to match a string separated by whitespace. | |||
41528 | static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) { | |||
41529 | S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace. | |||
41530 | ||||
41531 | for (StringRef Piece : Pieces) { | |||
41532 | if (!S.startswith(Piece)) // Check if the piece matches. | |||
41533 | return false; | |||
41534 | ||||
41535 | S = S.substr(Piece.size()); | |||
41536 | StringRef::size_type Pos = S.find_first_not_of(" \t"); | |||
41537 | if (Pos == 0) // We matched a prefix. | |||
41538 | return false; | |||
41539 | ||||
41540 | S = S.substr(Pos); | |||
41541 | } | |||
41542 | ||||
41543 | return S.empty(); | |||
41544 | } | |||
41545 | ||||
41546 | static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) { | |||
41547 | ||||
41548 | if (AsmPieces.size() == 3 || AsmPieces.size() == 4) { | |||
41549 | if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") && | |||
41550 | std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") && | |||
41551 | std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) { | |||
41552 | ||||
41553 | if (AsmPieces.size() == 3) | |||
41554 | return true; | |||
41555 | else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}")) | |||
41556 | return true; | |||
41557 | } | |||
41558 | } | |||
41559 | return false; | |||
41560 | } | |||
41561 | ||||
41562 | bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { | |||
41563 | InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue()); | |||
41564 | ||||
41565 | const std::string &AsmStr = IA->getAsmString(); | |||
41566 | ||||
41567 | IntegerType *Ty = dyn_cast<IntegerType>(CI->getType()); | |||
41568 | if (!Ty || Ty->getBitWidth() % 16 != 0) | |||
41569 | return false; | |||
41570 | ||||
41571 | // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a" | |||
41572 | SmallVector<StringRef, 4> AsmPieces; | |||
41573 | SplitString(AsmStr, AsmPieces, ";\n"); | |||
41574 | ||||
41575 | switch (AsmPieces.size()) { | |||
41576 | default: return false; | |||
41577 | case 1: | |||
41578 | // FIXME: this should verify that we are targeting a 486 or better. If not, | |||
41579 | // we will turn this bswap into something that will be lowered to logical | |||
41580 | // ops instead of emitting the bswap asm. For now, we don't support 486 or | |||
41581 | // lower so don't worry about this. | |||
41582 | // bswap $0 | |||
41583 | if (matchAsm(AsmPieces[0], {"bswap", "$0"}) || | |||
41584 | matchAsm(AsmPieces[0], {"bswapl", "$0"}) || | |||
41585 | matchAsm(AsmPieces[0], {"bswapq", "$0"}) || | |||
41586 | matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) || | |||
41587 | matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) || | |||
41588 | matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) { | |||
41589 | // No need to check constraints, nothing other than the equivalent of | |||
41590 | // "=r,0" would be valid here. | |||
41591 | return IntrinsicLowering::LowerToByteSwap(CI); | |||
41592 | } | |||
41593 | ||||
41594 | // rorw $$8, ${0:w} --> llvm.bswap.i16 | |||
41595 | if (CI->getType()->isIntegerTy(16) && | |||
41596 | IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && | |||
41597 | (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) || | |||
41598 | matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) { | |||
41599 | AsmPieces.clear(); | |||
41600 | StringRef ConstraintsStr = IA->getConstraintString(); | |||
41601 | SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); | |||
41602 | array_pod_sort(AsmPieces.begin(), AsmPieces.end()); | |||
41603 | if (clobbersFlagRegisters(AsmPieces)) | |||
41604 | return IntrinsicLowering::LowerToByteSwap(CI); | |||
41605 | } | |||
41606 | break; | |||
41607 | case 3: | |||
41608 | if (CI->getType()->isIntegerTy(32) && | |||
41609 | IA->getConstraintString().compare(0, 5, "=r,0,") == 0 && | |||
41610 | matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) && | |||
41611 | matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) && | |||
41612 | matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) { | |||
41613 | AsmPieces.clear(); | |||
41614 | StringRef ConstraintsStr = IA->getConstraintString(); | |||
41615 | SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ","); | |||
41616 | array_pod_sort(AsmPieces.begin(), AsmPieces.end()); | |||
41617 | if (clobbersFlagRegisters(AsmPieces)) | |||
41618 | return IntrinsicLowering::LowerToByteSwap(CI); | |||
41619 | } | |||
41620 | ||||
41621 | if (CI->getType()->isIntegerTy(64)) { | |||
41622 | InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints(); | |||
41623 | if (Constraints.size() >= 2 && | |||
41624 | Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" && | |||
41625 | Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") { | |||
41626 | // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64 | |||
41627 | if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) && | |||
41628 | matchAsm(AsmPieces[1], {"bswap", "%edx"}) && | |||
41629 | matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"})) | |||
41630 | return IntrinsicLowering::LowerToByteSwap(CI); | |||
41631 | } | |||
41632 | } | |||
41633 | break; | |||
41634 | } | |||
41635 | return false; | |||
41636 | } | |||
41637 | ||||
41638 | /// Given a constraint letter, return the type of constraint for this target. | |||
41639 | X86TargetLowering::ConstraintType | |||
41640 | X86TargetLowering::getConstraintType(StringRef Constraint) const { | |||
41641 | if (Constraint.size() == 1) { | |||
41642 | switch (Constraint[0]) { | |||
41643 | case 'R': | |||
41644 | case 'q': | |||
41645 | case 'Q': | |||
41646 | case 'f': | |||
41647 | case 't': | |||
41648 | case 'u': | |||
41649 | case 'y': | |||
41650 | case 'x': | |||
41651 | case 'v': | |||
41652 | case 'Y': | |||
41653 | case 'l': | |||
41654 | case 'k': // AVX512 masking registers. | |||
41655 | return C_RegisterClass; | |||
41656 | case 'a': | |||
41657 | case 'b': | |||
41658 | case 'c': | |||
41659 | case 'd': | |||
41660 | case 'S': | |||
41661 | case 'D': | |||
41662 | case 'A': | |||
41663 | return C_Register; | |||
41664 | case 'I': | |||
41665 | case 'J': | |||
41666 | case 'K': | |||
41667 | case 'L': | |||
41668 | case 'M': | |||
41669 | case 'N': | |||
41670 | case 'G': | |||
41671 | case 'C': | |||
41672 | case 'e': | |||
41673 | case 'Z': | |||
41674 | return C_Other; | |||
41675 | default: | |||
41676 | break; | |||
41677 | } | |||
41678 | } | |||
41679 | else if (Constraint.size() == 2) { | |||
41680 | switch (Constraint[0]) { | |||
41681 | default: | |||
41682 | break; | |||
41683 | case 'Y': | |||
41684 | switch (Constraint[1]) { | |||
41685 | default: | |||
41686 | break; | |||
41687 | case 'z': | |||
41688 | case '0': | |||
41689 | return C_Register; | |||
41690 | case 'i': | |||
41691 | case 'm': | |||
41692 | case 'k': | |||
41693 | case 't': | |||
41694 | case '2': | |||
41695 | return C_RegisterClass; | |||
41696 | } | |||
41697 | } | |||
41698 | } | |||
41699 | return TargetLowering::getConstraintType(Constraint); | |||
41700 | } | |||
41701 | ||||
41702 | /// Examine constraint type and operand type and determine a weight value. | |||
41703 | /// This object must already have been set up with the operand type | |||
41704 | /// and the current alternative constraint selected. | |||
41705 | TargetLowering::ConstraintWeight | |||
41706 | X86TargetLowering::getSingleConstraintMatchWeight( | |||
41707 | AsmOperandInfo &info, const char *constraint) const { | |||
41708 | ConstraintWeight weight = CW_Invalid; | |||
41709 | Value *CallOperandVal = info.CallOperandVal; | |||
41710 | // If we don't have a value, we can't do a match, | |||
41711 | // but allow it at the lowest weight. | |||
41712 | if (!CallOperandVal) | |||
41713 | return CW_Default; | |||
41714 | Type *type = CallOperandVal->getType(); | |||
41715 | // Look at the constraint type. | |||
41716 | switch (*constraint) { | |||
41717 | default: | |||
41718 | weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); | |||
41719 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
41720 | case 'R': | |||
41721 | case 'q': | |||
41722 | case 'Q': | |||
41723 | case 'a': | |||
41724 | case 'b': | |||
41725 | case 'c': | |||
41726 | case 'd': | |||
41727 | case 'S': | |||
41728 | case 'D': | |||
41729 | case 'A': | |||
41730 | if (CallOperandVal->getType()->isIntegerTy()) | |||
41731 | weight = CW_SpecificReg; | |||
41732 | break; | |||
41733 | case 'f': | |||
41734 | case 't': | |||
41735 | case 'u': | |||
41736 | if (type->isFloatingPointTy()) | |||
41737 | weight = CW_SpecificReg; | |||
41738 | break; | |||
41739 | case 'y': | |||
41740 | if (type->isX86_MMXTy() && Subtarget.hasMMX()) | |||
41741 | weight = CW_SpecificReg; | |||
41742 | break; | |||
41743 | case 'Y': { | |||
41744 | unsigned Size = StringRef(constraint).size(); | |||
41745 | // Pick 'i' as the next char as 'Yi' and 'Y' are synonymous, when matching 'Y' | |||
41746 | char NextChar = Size == 2 ? constraint[1] : 'i'; | |||
41747 | if (Size > 2) | |||
41748 | break; | |||
41749 | switch (NextChar) { | |||
41750 | default: | |||
41751 | return CW_Invalid; | |||
41752 | // XMM0 | |||
41753 | case 'z': | |||
41754 | case '0': | |||
41755 | if ((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) | |||
41756 | return CW_SpecificReg; | |||
41757 | return CW_Invalid; | |||
41758 | // Conditional OpMask regs (AVX512) | |||
41759 | case 'k': | |||
41760 | if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512()) | |||
41761 | return CW_Register; | |||
41762 | return CW_Invalid; | |||
41763 | // Any MMX reg | |||
41764 | case 'm': | |||
41765 | if (type->isX86_MMXTy() && Subtarget.hasMMX()) | |||
41766 | return weight; | |||
41767 | return CW_Invalid; | |||
41768 | // Any SSE reg when ISA >= SSE2, same as 'Y' | |||
41769 | case 'i': | |||
41770 | case 't': | |||
41771 | case '2': | |||
41772 | if (!Subtarget.hasSSE2()) | |||
41773 | return CW_Invalid; | |||
41774 | break; | |||
41775 | } | |||
41776 | // Fall through (handle "Y" constraint). | |||
41777 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
41778 | } | |||
41779 | case 'v': | |||
41780 | if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()) | |||
41781 | weight = CW_Register; | |||
41782 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
41783 | case 'x': | |||
41784 | if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) || | |||
41785 | ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX())) | |||
41786 | weight = CW_Register; | |||
41787 | break; | |||
41788 | case 'k': | |||
41789 | // Enable conditional vector operations using %k<#> registers. | |||
41790 | if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512()) | |||
41791 | weight = CW_Register; | |||
41792 | break; | |||
41793 | case 'I': | |||
41794 | if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) { | |||
41795 | if (C->getZExtValue() <= 31) | |||
41796 | weight = CW_Constant; | |||
41797 | } | |||
41798 | break; | |||
41799 | case 'J': | |||
41800 | if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
41801 | if (C->getZExtValue() <= 63) | |||
41802 | weight = CW_Constant; | |||
41803 | } | |||
41804 | break; | |||
41805 | case 'K': | |||
41806 | if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
41807 | if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f)) | |||
41808 | weight = CW_Constant; | |||
41809 | } | |||
41810 | break; | |||
41811 | case 'L': | |||
41812 | if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
41813 | if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff)) | |||
41814 | weight = CW_Constant; | |||
41815 | } | |||
41816 | break; | |||
41817 | case 'M': | |||
41818 | if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
41819 | if (C->getZExtValue() <= 3) | |||
41820 | weight = CW_Constant; | |||
41821 | } | |||
41822 | break; | |||
41823 | case 'N': | |||
41824 | if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
41825 | if (C->getZExtValue() <= 0xff) | |||
41826 | weight = CW_Constant; | |||
41827 | } | |||
41828 | break; | |||
41829 | case 'G': | |||
41830 | case 'C': | |||
41831 | if (isa<ConstantFP>(CallOperandVal)) { | |||
41832 | weight = CW_Constant; | |||
41833 | } | |||
41834 | break; | |||
41835 | case 'e': | |||
41836 | if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
41837 | if ((C->getSExtValue() >= -0x80000000LL) && | |||
41838 | (C->getSExtValue() <= 0x7fffffffLL)) | |||
41839 | weight = CW_Constant; | |||
41840 | } | |||
41841 | break; | |||
41842 | case 'Z': | |||
41843 | if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) { | |||
41844 | if (C->getZExtValue() <= 0xffffffff) | |||
41845 | weight = CW_Constant; | |||
41846 | } | |||
41847 | break; | |||
41848 | } | |||
41849 | return weight; | |||
41850 | } | |||
41851 | ||||
41852 | /// Try to replace an X constraint, which matches anything, with another that | |||
41853 | /// has more specific requirements based on the type of the corresponding | |||
41854 | /// operand. | |||
41855 | const char *X86TargetLowering:: | |||
41856 | LowerXConstraint(EVT ConstraintVT) const { | |||
41857 | // FP X constraints get lowered to SSE1/2 registers if available, otherwise | |||
41858 | // 'f' like normal targets. | |||
41859 | if (ConstraintVT.isFloatingPoint()) { | |||
41860 | if (Subtarget.hasSSE2()) | |||
41861 | return "Y"; | |||
41862 | if (Subtarget.hasSSE1()) | |||
41863 | return "x"; | |||
41864 | } | |||
41865 | ||||
41866 | return TargetLowering::LowerXConstraint(ConstraintVT); | |||
41867 | } | |||
41868 | ||||
41869 | /// Lower the specified operand into the Ops vector. | |||
41870 | /// If it is invalid, don't add anything to Ops. | |||
41871 | void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op, | |||
41872 | std::string &Constraint, | |||
41873 | std::vector<SDValue>&Ops, | |||
41874 | SelectionDAG &DAG) const { | |||
41875 | SDValue Result; | |||
41876 | ||||
41877 | // Only support length 1 constraints for now. | |||
41878 | if (Constraint.length() > 1) return; | |||
41879 | ||||
41880 | char ConstraintLetter = Constraint[0]; | |||
41881 | switch (ConstraintLetter) { | |||
41882 | default: break; | |||
41883 | case 'I': | |||
41884 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
41885 | if (C->getZExtValue() <= 31) { | |||
41886 | Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
41887 | Op.getValueType()); | |||
41888 | break; | |||
41889 | } | |||
41890 | } | |||
41891 | return; | |||
41892 | case 'J': | |||
41893 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
41894 | if (C->getZExtValue() <= 63) { | |||
41895 | Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
41896 | Op.getValueType()); | |||
41897 | break; | |||
41898 | } | |||
41899 | } | |||
41900 | return; | |||
41901 | case 'K': | |||
41902 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
41903 | if (isInt<8>(C->getSExtValue())) { | |||
41904 | Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
41905 | Op.getValueType()); | |||
41906 | break; | |||
41907 | } | |||
41908 | } | |||
41909 | return; | |||
41910 | case 'L': | |||
41911 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
41912 | if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff || | |||
41913 | (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) { | |||
41914 | Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), | |||
41915 | Op.getValueType()); | |||
41916 | break; | |||
41917 | } | |||
41918 | } | |||
41919 | return; | |||
41920 | case 'M': | |||
41921 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
41922 | if (C->getZExtValue() <= 3) { | |||
41923 | Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
41924 | Op.getValueType()); | |||
41925 | break; | |||
41926 | } | |||
41927 | } | |||
41928 | return; | |||
41929 | case 'N': | |||
41930 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
41931 | if (C->getZExtValue() <= 255) { | |||
41932 | Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
41933 | Op.getValueType()); | |||
41934 | break; | |||
41935 | } | |||
41936 | } | |||
41937 | return; | |||
41938 | case 'O': | |||
41939 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
41940 | if (C->getZExtValue() <= 127) { | |||
41941 | Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
41942 | Op.getValueType()); | |||
41943 | break; | |||
41944 | } | |||
41945 | } | |||
41946 | return; | |||
41947 | case 'e': { | |||
41948 | // 32-bit signed value | |||
41949 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
41950 | if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), | |||
41951 | C->getSExtValue())) { | |||
41952 | // Widen to 64 bits here to get it sign extended. | |||
41953 | Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64); | |||
41954 | break; | |||
41955 | } | |||
41956 | // FIXME gcc accepts some relocatable values here too, but only in certain | |||
41957 | // memory models; it's complicated. | |||
41958 | } | |||
41959 | return; | |||
41960 | } | |||
41961 | case 'Z': { | |||
41962 | // 32-bit unsigned value | |||
41963 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) { | |||
41964 | if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()), | |||
41965 | C->getZExtValue())) { | |||
41966 | Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op), | |||
41967 | Op.getValueType()); | |||
41968 | break; | |||
41969 | } | |||
41970 | } | |||
41971 | // FIXME gcc accepts some relocatable values here too, but only in certain | |||
41972 | // memory models; it's complicated. | |||
41973 | return; | |||
41974 | } | |||
41975 | case 'i': { | |||
41976 | // Literal immediates are always ok. | |||
41977 | if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) { | |||
41978 | // Widen to 64 bits here to get it sign extended. | |||
41979 | Result = DAG.getTargetConstant(CST->getSExtValue(), SDLoc(Op), MVT::i64); | |||
41980 | break; | |||
41981 | } | |||
41982 | ||||
41983 | // In any sort of PIC mode addresses need to be computed at runtime by | |||
41984 | // adding in a register or some sort of table lookup. These can't | |||
41985 | // be used as immediates. | |||
41986 | if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) | |||
41987 | return; | |||
41988 | ||||
41989 | // If we are in non-pic codegen mode, we allow the address of a global (with | |||
41990 | // an optional displacement) to be used with 'i'. | |||
41991 | GlobalAddressSDNode *GA = nullptr; | |||
41992 | int64_t Offset = 0; | |||
41993 | ||||
41994 | // Match either (GA), (GA+C), (GA+C1+C2), etc. | |||
41995 | while (1) { | |||
41996 | if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) { | |||
41997 | Offset += GA->getOffset(); | |||
41998 | break; | |||
41999 | } else if (Op.getOpcode() == ISD::ADD) { | |||
42000 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { | |||
42001 | Offset += C->getZExtValue(); | |||
42002 | Op = Op.getOperand(0); | |||
42003 | continue; | |||
42004 | } | |||
42005 | } else if (Op.getOpcode() == ISD::SUB) { | |||
42006 | if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { | |||
42007 | Offset += -C->getZExtValue(); | |||
42008 | Op = Op.getOperand(0); | |||
42009 | continue; | |||
42010 | } | |||
42011 | } | |||
42012 | ||||
42013 | // Otherwise, this isn't something we can handle, reject it. | |||
42014 | return; | |||
42015 | } | |||
42016 | ||||
42017 | const GlobalValue *GV = GA->getGlobal(); | |||
42018 | // If we require an extra load to get this address, as in PIC mode, we | |||
42019 | // can't accept it. | |||
42020 | if (isGlobalStubReference(Subtarget.classifyGlobalReference(GV))) | |||
42021 | return; | |||
42022 | ||||
42023 | Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op), | |||
42024 | GA->getValueType(0), Offset); | |||
42025 | break; | |||
42026 | } | |||
42027 | } | |||
42028 | ||||
42029 | if (Result.getNode()) { | |||
42030 | Ops.push_back(Result); | |||
42031 | return; | |||
42032 | } | |||
42033 | return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); | |||
42034 | } | |||
42035 | ||||
42036 | /// Check if \p RC is a general purpose register class. | |||
42037 | /// I.e., GR* or one of their variant. | |||
42038 | static bool isGRClass(const TargetRegisterClass &RC) { | |||
42039 | return RC.hasSuperClassEq(&X86::GR8RegClass) || | |||
42040 | RC.hasSuperClassEq(&X86::GR16RegClass) || | |||
42041 | RC.hasSuperClassEq(&X86::GR32RegClass) || | |||
42042 | RC.hasSuperClassEq(&X86::GR64RegClass) || | |||
42043 | RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass); | |||
42044 | } | |||
42045 | ||||
42046 | /// Check if \p RC is a vector register class. | |||
42047 | /// I.e., FR* / VR* or one of their variant. | |||
42048 | static bool isFRClass(const TargetRegisterClass &RC) { | |||
42049 | return RC.hasSuperClassEq(&X86::FR32XRegClass) || | |||
42050 | RC.hasSuperClassEq(&X86::FR64XRegClass) || | |||
42051 | RC.hasSuperClassEq(&X86::VR128XRegClass) || | |||
42052 | RC.hasSuperClassEq(&X86::VR256XRegClass) || | |||
42053 | RC.hasSuperClassEq(&X86::VR512RegClass); | |||
42054 | } | |||
42055 | ||||
42056 | std::pair<unsigned, const TargetRegisterClass *> | |||
42057 | X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, | |||
42058 | StringRef Constraint, | |||
42059 | MVT VT) const { | |||
42060 | // First, see if this is a constraint that directly corresponds to an LLVM | |||
42061 | // register class. | |||
42062 | if (Constraint.size() == 1) { | |||
42063 | // GCC Constraint Letters | |||
42064 | switch (Constraint[0]) { | |||
42065 | default: break; | |||
42066 | // TODO: Slight differences here in allocation order and leaving | |||
42067 | // RIP in the class. Do they matter any more here than they do | |||
42068 | // in the normal allocation? | |||
42069 | case 'k': | |||
42070 | if (Subtarget.hasAVX512()) { | |||
42071 | // Only supported in AVX512 or later. | |||
42072 | switch (VT.SimpleTy) { | |||
42073 | default: break; | |||
42074 | case MVT::i32: | |||
42075 | return std::make_pair(0U, &X86::VK32RegClass); | |||
42076 | case MVT::i16: | |||
42077 | return std::make_pair(0U, &X86::VK16RegClass); | |||
42078 | case MVT::i8: | |||
42079 | return std::make_pair(0U, &X86::VK8RegClass); | |||
42080 | case MVT::i1: | |||
42081 | return std::make_pair(0U, &X86::VK1RegClass); | |||
42082 | case MVT::i64: | |||
42083 | return std::make_pair(0U, &X86::VK64RegClass); | |||
42084 | } | |||
42085 | } | |||
42086 | break; | |||
42087 | case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode. | |||
42088 | if (Subtarget.is64Bit()) { | |||
42089 | if (VT == MVT::i32 || VT == MVT::f32) | |||
42090 | return std::make_pair(0U, &X86::GR32RegClass); | |||
42091 | if (VT == MVT::i16) | |||
42092 | return std::make_pair(0U, &X86::GR16RegClass); | |||
42093 | if (VT == MVT::i8 || VT == MVT::i1) | |||
42094 | return std::make_pair(0U, &X86::GR8RegClass); | |||
42095 | if (VT == MVT::i64 || VT == MVT::f64) | |||
42096 | return std::make_pair(0U, &X86::GR64RegClass); | |||
42097 | break; | |||
42098 | } | |||
42099 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
42100 | // 32-bit fallthrough | |||
42101 | case 'Q': // Q_REGS | |||
42102 | if (VT == MVT::i32 || VT == MVT::f32) | |||
42103 | return std::make_pair(0U, &X86::GR32_ABCDRegClass); | |||
42104 | if (VT == MVT::i16) | |||
42105 | return std::make_pair(0U, &X86::GR16_ABCDRegClass); | |||
42106 | if (VT == MVT::i8 || VT == MVT::i1) | |||
42107 | return std::make_pair(0U, &X86::GR8_ABCD_LRegClass); | |||
42108 | if (VT == MVT::i64) | |||
42109 | return std::make_pair(0U, &X86::GR64_ABCDRegClass); | |||
42110 | break; | |||
42111 | case 'r': // GENERAL_REGS | |||
42112 | case 'l': // INDEX_REGS | |||
42113 | if (VT == MVT::i8 || VT == MVT::i1) | |||
42114 | return std::make_pair(0U, &X86::GR8RegClass); | |||
42115 | if (VT == MVT::i16) | |||
42116 | return std::make_pair(0U, &X86::GR16RegClass); | |||
42117 | if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit()) | |||
42118 | return std::make_pair(0U, &X86::GR32RegClass); | |||
42119 | return std::make_pair(0U, &X86::GR64RegClass); | |||
42120 | case 'R': // LEGACY_REGS | |||
42121 | if (VT == MVT::i8 || VT == MVT::i1) | |||
42122 | return std::make_pair(0U, &X86::GR8_NOREXRegClass); | |||
42123 | if (VT == MVT::i16) | |||
42124 | return std::make_pair(0U, &X86::GR16_NOREXRegClass); | |||
42125 | if (VT == MVT::i32 || !Subtarget.is64Bit()) | |||
42126 | return std::make_pair(0U, &X86::GR32_NOREXRegClass); | |||
42127 | return std::make_pair(0U, &X86::GR64_NOREXRegClass); | |||
42128 | case 'f': // FP Stack registers. | |||
42129 | // If SSE is enabled for this VT, use f80 to ensure the isel moves the | |||
42130 | // value to the correct fpstack register class. | |||
42131 | if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT)) | |||
42132 | return std::make_pair(0U, &X86::RFP32RegClass); | |||
42133 | if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT)) | |||
42134 | return std::make_pair(0U, &X86::RFP64RegClass); | |||
42135 | return std::make_pair(0U, &X86::RFP80RegClass); | |||
42136 | case 'y': // MMX_REGS if MMX allowed. | |||
42137 | if (!Subtarget.hasMMX()) break; | |||
42138 | return std::make_pair(0U, &X86::VR64RegClass); | |||
42139 | case 'Y': // SSE_REGS if SSE2 allowed | |||
42140 | if (!Subtarget.hasSSE2()) break; | |||
42141 | LLVM_FALLTHROUGH[[clang::fallthrough]]; | |||
42142 | case 'v': | |||
42143 | case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed | |||
42144 | if (!Subtarget.hasSSE1()) break; | |||
42145 | bool VConstraint = (Constraint[0] == 'v'); | |||
42146 | ||||
42147 | switch (VT.SimpleTy) { | |||
42148 | default: break; | |||
42149 | // Scalar SSE types. | |||
42150 | case MVT::f32: | |||
42151 | case MVT::i32: | |||
42152 | if (VConstraint && Subtarget.hasAVX512() && Subtarget.hasVLX()) | |||
42153 | return std::make_pair(0U, &X86::FR32XRegClass); | |||
42154 | return std::make_pair(0U, &X86::FR32RegClass); | |||
42155 | case MVT::f64: | |||
42156 | case MVT::i64: | |||
42157 | if (VConstraint && Subtarget.hasVLX()) | |||
42158 | return std::make_pair(0U, &X86::FR64XRegClass); | |||
42159 | return std::make_pair(0U, &X86::FR64RegClass); | |||
42160 | // TODO: Handle f128 and i128 in FR128RegClass after it is tested well. | |||
42161 | // Vector types. | |||
42162 | case MVT::v16i8: | |||
42163 | case MVT::v8i16: | |||
42164 | case MVT::v4i32: | |||
42165 | case MVT::v2i64: | |||
42166 | case MVT::v4f32: | |||
42167 | case MVT::v2f64: | |||
42168 | if (VConstraint && Subtarget.hasVLX()) | |||
42169 | return std::make_pair(0U, &X86::VR128XRegClass); | |||
42170 | return std::make_pair(0U, &X86::VR128RegClass); | |||
42171 | // AVX types. | |||
42172 | case MVT::v32i8: | |||
42173 | case MVT::v16i16: | |||
42174 | case MVT::v8i32: | |||
42175 | case MVT::v4i64: | |||
42176 | case MVT::v8f32: | |||
42177 | case MVT::v4f64: | |||
42178 | if (VConstraint && Subtarget.hasVLX()) | |||
42179 | return std::make_pair(0U, &X86::VR256XRegClass); | |||
42180 | return std::make_pair(0U, &X86::VR256RegClass); | |||
42181 | case MVT::v8f64: | |||
42182 | case MVT::v16f32: | |||
42183 | case MVT::v16i32: | |||
42184 | case MVT::v8i64: | |||
42185 | return std::make_pair(0U, &X86::VR512RegClass); | |||
42186 | } | |||
42187 | break; | |||
42188 | } | |||
42189 | } else if (Constraint.size() == 2 && Constraint[0] == 'Y') { | |||
42190 | switch (Constraint[1]) { | |||
42191 | default: | |||
42192 | break; | |||
42193 | case 'i': | |||
42194 | case 't': | |||
42195 | case '2': | |||
42196 | return getRegForInlineAsmConstraint(TRI, "Y", VT); | |||
42197 | case 'm': | |||
42198 | if (!Subtarget.hasMMX()) break; | |||
42199 | return std::make_pair(0U, &X86::VR64RegClass); | |||
42200 | case 'z': | |||
42201 | case '0': | |||
42202 | if (!Subtarget.hasSSE1()) break; | |||
42203 | return std::make_pair(X86::XMM0, &X86::VR128RegClass); | |||
42204 | case 'k': | |||
42205 | // This register class doesn't allocate k0 for masked vector operation. | |||
42206 | if (Subtarget.hasAVX512()) { // Only supported in AVX512. | |||
42207 | switch (VT.SimpleTy) { | |||
42208 | default: break; | |||
42209 | case MVT::i32: | |||
42210 | return std::make_pair(0U, &X86::VK32WMRegClass); | |||
42211 | case MVT::i16: | |||
42212 | return std::make_pair(0U, &X86::VK16WMRegClass); | |||
42213 | case MVT::i8: | |||
42214 | return std::make_pair(0U, &X86::VK8WMRegClass); | |||
42215 | case MVT::i1: | |||
42216 | return std::make_pair(0U, &X86::VK1WMRegClass); | |||
42217 | case MVT::i64: | |||
42218 | return std::make_pair(0U, &X86::VK64WMRegClass); | |||
42219 | } | |||
42220 | } | |||
42221 | break; | |||
42222 | } | |||
42223 | } | |||
42224 | ||||
42225 | // Use the default implementation in TargetLowering to convert the register | |||
42226 | // constraint into a member of a register class. | |||
42227 | std::pair<unsigned, const TargetRegisterClass*> Res; | |||
42228 | Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); | |||
42229 | ||||
42230 | // Not found as a standard register? | |||
42231 | if (!Res.second) { | |||
42232 | // Map st(0) -> st(7) -> ST0 | |||
42233 | if (Constraint.size() == 7 && Constraint[0] == '{' && | |||
42234 | tolower(Constraint[1]) == 's' && | |||
42235 | tolower(Constraint[2]) == 't' && | |||
42236 | Constraint[3] == '(' && | |||
42237 | (Constraint[4] >= '0' && Constraint[4] <= '7') && | |||
42238 | Constraint[5] == ')' && | |||
42239 | Constraint[6] == '}') | |||
42240 | return std::make_pair(X86::FP0 + Constraint[4] - '0', | |||
42241 | &X86::RFP80RegClass); | |||
42242 | ||||
42243 | // GCC allows "st(0)" to be called just plain "st". | |||
42244 | if (StringRef("{st}").equals_lower(Constraint)) | |||
42245 | return std::make_pair(X86::FP0, &X86::RFP80RegClass); | |||
42246 | ||||
42247 | // flags -> EFLAGS | |||
42248 | if (StringRef("{flags}").equals_lower(Constraint)) | |||
42249 | return std::make_pair(X86::EFLAGS, &X86::CCRRegClass); | |||
42250 | ||||
42251 | // 'A' means [ER]AX + [ER]DX. | |||
42252 | if (Constraint == "A") { | |||
42253 | if (Subtarget.is64Bit()) | |||
42254 | return std::make_pair(X86::RAX, &X86::GR64_ADRegClass); | |||
42255 | assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&(((Subtarget.is32Bit() || Subtarget.is16Bit()) && "Expecting 64, 32 or 16 bit subtarget" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.is32Bit() || Subtarget.is16Bit()) && \"Expecting 64, 32 or 16 bit subtarget\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 42256, __PRETTY_FUNCTION__)) | |||
42256 | "Expecting 64, 32 or 16 bit subtarget")(((Subtarget.is32Bit() || Subtarget.is16Bit()) && "Expecting 64, 32 or 16 bit subtarget" ) ? static_cast<void> (0) : __assert_fail ("(Subtarget.is32Bit() || Subtarget.is16Bit()) && \"Expecting 64, 32 or 16 bit subtarget\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 42256, __PRETTY_FUNCTION__)); | |||
42257 | return std::make_pair(X86::EAX, &X86::GR32_ADRegClass); | |||
42258 | } | |||
42259 | return Res; | |||
42260 | } | |||
42261 | ||||
42262 | // Make sure it isn't a register that requires 64-bit mode. | |||
42263 | if (!Subtarget.is64Bit() && | |||
42264 | (isFRClass(*Res.second) || isGRClass(*Res.second)) && | |||
42265 | TRI->getEncodingValue(Res.first) >= 8) { | |||
42266 | // Register requires REX prefix, but we're in 32-bit mode. | |||
42267 | return std::make_pair(0, nullptr); | |||
42268 | } | |||
42269 | ||||
42270 | // Make sure it isn't a register that requires AVX512. | |||
42271 | if (!Subtarget.hasAVX512() && isFRClass(*Res.second) && | |||
42272 | TRI->getEncodingValue(Res.first) & 0x10) { | |||
42273 | // Register requires EVEX prefix. | |||
42274 | return std::make_pair(0, nullptr); | |||
42275 | } | |||
42276 | ||||
42277 | // Otherwise, check to see if this is a register class of the wrong value | |||
42278 | // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to | |||
42279 | // turn into {ax},{dx}. | |||
42280 | // MVT::Other is used to specify clobber names. | |||
42281 | if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other) | |||
42282 | return Res; // Correct type already, nothing to do. | |||
42283 | ||||
42284 | // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should | |||
42285 | // return "eax". This should even work for things like getting 64bit integer | |||
42286 | // registers when given an f64 type. | |||
42287 | const TargetRegisterClass *Class = Res.second; | |||
42288 | // The generic code will match the first register class that contains the | |||
42289 | // given register. Thus, based on the ordering of the tablegened file, | |||
42290 | // the "plain" GR classes might not come first. | |||
42291 | // Therefore, use a helper method. | |||
42292 | if (isGRClass(*Class)) { | |||
42293 | unsigned Size = VT.getSizeInBits(); | |||
42294 | if (Size == 1) Size = 8; | |||
42295 | unsigned DestReg = getX86SubSuperRegisterOrZero(Res.first, Size); | |||
42296 | if (DestReg > 0) { | |||
42297 | bool is64Bit = Subtarget.is64Bit(); | |||
42298 | const TargetRegisterClass *RC = | |||
42299 | Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass) | |||
42300 | : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass) | |||
42301 | : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass) | |||
42302 | : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr) | |||
42303 | : nullptr; | |||
42304 | if (Size == 64 && !is64Bit) { | |||
42305 | // Model GCC's behavior here and select a fixed pair of 32-bit | |||
42306 | // registers. | |||
42307 | switch (Res.first) { | |||
42308 | case X86::EAX: | |||
42309 | return std::make_pair(X86::EAX, &X86::GR32_ADRegClass); | |||
42310 | case X86::EDX: | |||
42311 | return std::make_pair(X86::EDX, &X86::GR32_DCRegClass); | |||
42312 | case X86::ECX: | |||
42313 | return std::make_pair(X86::ECX, &X86::GR32_CBRegClass); | |||
42314 | case X86::EBX: | |||
42315 | return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass); | |||
42316 | case X86::ESI: | |||
42317 | return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass); | |||
42318 | case X86::EDI: | |||
42319 | return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass); | |||
42320 | case X86::EBP: | |||
42321 | return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass); | |||
42322 | default: | |||
42323 | return std::make_pair(0, nullptr); | |||
42324 | } | |||
42325 | } | |||
42326 | if (RC && RC->contains(DestReg)) | |||
42327 | return std::make_pair(DestReg, RC); | |||
42328 | return Res; | |||
42329 | } | |||
42330 | // No register found/type mismatch. | |||
42331 | return std::make_pair(0, nullptr); | |||
42332 | } else if (isFRClass(*Class)) { | |||
42333 | // Handle references to XMM physical registers that got mapped into the | |||
42334 | // wrong class. This can happen with constraints like {xmm0} where the | |||
42335 | // target independent register mapper will just pick the first match it can | |||
42336 | // find, ignoring the required type. | |||
42337 | ||||
42338 | // TODO: Handle f128 and i128 in FR128RegClass after it is tested well. | |||
42339 | if (VT == MVT::f32 || VT == MVT::i32) | |||
42340 | Res.second = &X86::FR32RegClass; | |||
42341 | else if (VT == MVT::f64 || VT == MVT::i64) | |||
42342 | Res.second = &X86::FR64RegClass; | |||
42343 | else if (TRI->isTypeLegalForClass(X86::VR128RegClass, VT)) | |||
42344 | Res.second = &X86::VR128RegClass; | |||
42345 | else if (TRI->isTypeLegalForClass(X86::VR256RegClass, VT)) | |||
42346 | Res.second = &X86::VR256RegClass; | |||
42347 | else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT)) | |||
42348 | Res.second = &X86::VR512RegClass; | |||
42349 | else { | |||
42350 | // Type mismatch and not a clobber: Return an error; | |||
42351 | Res.first = 0; | |||
42352 | Res.second = nullptr; | |||
42353 | } | |||
42354 | } | |||
42355 | ||||
42356 | return Res; | |||
42357 | } | |||
42358 | ||||
42359 | int X86TargetLowering::getScalingFactorCost(const DataLayout &DL, | |||
42360 | const AddrMode &AM, Type *Ty, | |||
42361 | unsigned AS) const { | |||
42362 | // Scaling factors are not free at all. | |||
42363 | // An indexed folded instruction, i.e., inst (reg1, reg2, scale), | |||
42364 | // will take 2 allocations in the out of order engine instead of 1 | |||
42365 | // for plain addressing mode, i.e. inst (reg1). | |||
42366 | // E.g., | |||
42367 | // vaddps (%rsi,%rdx), %ymm0, %ymm1 | |||
42368 | // Requires two allocations (one for the load, one for the computation) | |||
42369 | // whereas: | |||
42370 | // vaddps (%rsi), %ymm0, %ymm1 | |||
42371 | // Requires just 1 allocation, i.e., freeing allocations for other operations | |||
42372 | // and having less micro operations to execute. | |||
42373 | // | |||
42374 | // For some X86 architectures, this is even worse because for instance for | |||
42375 | // stores, the complex addressing mode forces the instruction to use the | |||
42376 | // "load" ports instead of the dedicated "store" port. | |||
42377 | // E.g., on Haswell: | |||
42378 | // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3. | |||
42379 | // vmovaps %ymm1, (%r8) can use port 2, 3, or 7. | |||
42380 | if (isLegalAddressingMode(DL, AM, Ty, AS)) | |||
42381 | // Scale represents reg2 * scale, thus account for 1 | |||
42382 | // as soon as we use a second register. | |||
42383 | return AM.Scale != 0; | |||
42384 | return -1; | |||
42385 | } | |||
42386 | ||||
42387 | bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const { | |||
42388 | // Integer division on x86 is expensive. However, when aggressively optimizing | |||
42389 | // for code size, we prefer to use a div instruction, as it is usually smaller | |||
42390 | // than the alternative sequence. | |||
42391 | // The exception to this is vector division. Since x86 doesn't have vector | |||
42392 | // integer division, leaving the division as-is is a loss even in terms of | |||
42393 | // size, because it will have to be scalarized, while the alternative code | |||
42394 | // sequence can be performed in vector form. | |||
42395 | bool OptSize = | |||
42396 | Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize); | |||
42397 | return OptSize && !VT.isVector(); | |||
42398 | } | |||
42399 | ||||
42400 | void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { | |||
42401 | if (!Subtarget.is64Bit()) | |||
42402 | return; | |||
42403 | ||||
42404 | // Update IsSplitCSR in X86MachineFunctionInfo. | |||
42405 | X86MachineFunctionInfo *AFI = | |||
42406 | Entry->getParent()->getInfo<X86MachineFunctionInfo>(); | |||
42407 | AFI->setIsSplitCSR(true); | |||
42408 | } | |||
42409 | ||||
42410 | void X86TargetLowering::insertCopiesSplitCSR( | |||
42411 | MachineBasicBlock *Entry, | |||
42412 | const SmallVectorImpl<MachineBasicBlock *> &Exits) const { | |||
42413 | const X86RegisterInfo *TRI = Subtarget.getRegisterInfo(); | |||
42414 | const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); | |||
42415 | if (!IStart) | |||
42416 | return; | |||
42417 | ||||
42418 | const TargetInstrInfo *TII = Subtarget.getInstrInfo(); | |||
42419 | MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); | |||
42420 | MachineBasicBlock::iterator MBBI = Entry->begin(); | |||
42421 | for (const MCPhysReg *I = IStart; *I; ++I) { | |||
42422 | const TargetRegisterClass *RC = nullptr; | |||
42423 | if (X86::GR64RegClass.contains(*I)) | |||
42424 | RC = &X86::GR64RegClass; | |||
42425 | else | |||
42426 | llvm_unreachable("Unexpected register class in CSRsViaCopy!")::llvm::llvm_unreachable_internal("Unexpected register class in CSRsViaCopy!" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 42426); | |||
42427 | ||||
42428 | unsigned NewVR = MRI->createVirtualRegister(RC); | |||
42429 | // Create copy from CSR to a virtual register. | |||
42430 | // FIXME: this currently does not emit CFI pseudo-instructions, it works | |||
42431 | // fine for CXX_FAST_TLS since the C++-style TLS access functions should be | |||
42432 | // nounwind. If we want to generalize this later, we may need to emit | |||
42433 | // CFI pseudo-instructions. | |||
42434 | assert(Entry->getParent()->getFunction().hasFnAttribute(((Entry->getParent()->getFunction().hasFnAttribute( Attribute ::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? static_cast<void> (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 42436, __PRETTY_FUNCTION__)) | |||
42435 | Attribute::NoUnwind) &&((Entry->getParent()->getFunction().hasFnAttribute( Attribute ::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? static_cast<void> (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 42436, __PRETTY_FUNCTION__)) | |||
42436 | "Function should be nounwind in insertCopiesSplitCSR!")((Entry->getParent()->getFunction().hasFnAttribute( Attribute ::NoUnwind) && "Function should be nounwind in insertCopiesSplitCSR!" ) ? static_cast<void> (0) : __assert_fail ("Entry->getParent()->getFunction().hasFnAttribute( Attribute::NoUnwind) && \"Function should be nounwind in insertCopiesSplitCSR!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/lib/Target/X86/X86ISelLowering.cpp" , 42436, __PRETTY_FUNCTION__)); | |||
42437 | Entry->addLiveIn(*I); | |||
42438 | BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) | |||
42439 | .addReg(*I); | |||
42440 | ||||
42441 | // Insert the copy-back instructions right before the terminator. | |||
42442 | for (auto *Exit : Exits) | |||
42443 | BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), | |||
42444 | TII->get(TargetOpcode::COPY), *I) | |||
42445 | .addReg(NewVR); | |||
42446 | } | |||
42447 | } | |||
42448 | ||||
42449 | bool X86TargetLowering::supportSwiftError() const { | |||
42450 | return Subtarget.is64Bit(); | |||
42451 | } | |||
42452 | ||||
42453 | /// Returns the name of the symbol used to emit stack probes or the empty | |||
42454 | /// string if not applicable. | |||
42455 | StringRef X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const { | |||
42456 | // If the function specifically requests stack probes, emit them. | |||
42457 | if (MF.getFunction().hasFnAttribute("probe-stack")) | |||
42458 | return MF.getFunction().getFnAttribute("probe-stack").getValueAsString(); | |||
42459 | ||||
42460 | // Generally, if we aren't on Windows, the platform ABI does not include | |||
42461 | // support for stack probes, so don't emit them. | |||
42462 | if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() || | |||
42463 | MF.getFunction().hasFnAttribute("no-stack-arg-probe")) | |||
42464 | return ""; | |||
42465 | ||||
42466 | // We need a stack probe to conform to the Windows ABI. Choose the right | |||
42467 | // symbol. | |||
42468 | if (Subtarget.is64Bit()) | |||
42469 | return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk"; | |||
42470 | return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk"; | |||
42471 | } |
1 | //===- llvm/ADT/SmallBitVector.h - 'Normally small' bit vectors -*- C++ -*-===// |
2 | // |
3 | // The LLVM Compiler Infrastructure |
4 | // |
5 | // This file is distributed under the University of Illinois Open Source |
6 | // License. See LICENSE.TXT for details. |
7 | // |
8 | //===----------------------------------------------------------------------===// |
9 | // |
10 | // This file implements the SmallBitVector class. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_ADT_SMALLBITVECTOR_H |
15 | #define LLVM_ADT_SMALLBITVECTOR_H |
16 | |
17 | #include "llvm/ADT/BitVector.h" |
18 | #include "llvm/ADT/iterator_range.h" |
19 | #include "llvm/Support/MathExtras.h" |
20 | #include <algorithm> |
21 | #include <cassert> |
22 | #include <climits> |
23 | #include <cstddef> |
24 | #include <cstdint> |
25 | #include <limits> |
26 | #include <utility> |
27 | |
28 | namespace llvm { |
29 | |
30 | /// This is a 'bitvector' (really, a variable-sized bit array), optimized for |
31 | /// the case when the array is small. It contains one pointer-sized field, which |
32 | /// is directly used as a plain collection of bits when possible, or as a |
33 | /// pointer to a larger heap-allocated array when necessary. This allows normal |
34 | /// "small" cases to be fast without losing generality for large inputs. |
35 | class SmallBitVector { |
36 | // TODO: In "large" mode, a pointer to a BitVector is used, leading to an |
37 | // unnecessary level of indirection. It would be more efficient to use a |
38 | // pointer to memory containing size, allocation size, and the array of bits. |
39 | uintptr_t X = 1; |
40 | |
41 | enum { |
42 | // The number of bits in this class. |
43 | NumBaseBits = sizeof(uintptr_t) * CHAR_BIT8, |
44 | |
45 | // One bit is used to discriminate between small and large mode. The |
46 | // remaining bits are used for the small-mode representation. |
47 | SmallNumRawBits = NumBaseBits - 1, |
48 | |
49 | // A few more bits are used to store the size of the bit set in small mode. |
50 | // Theoretically this is a ceil-log2. These bits are encoded in the most |
51 | // significant bits of the raw bits. |
52 | SmallNumSizeBits = (NumBaseBits == 32 ? 5 : |
53 | NumBaseBits == 64 ? 6 : |
54 | SmallNumRawBits), |
55 | |
56 | // The remaining bits are used to store the actual set in small mode. |
57 | SmallNumDataBits = SmallNumRawBits - SmallNumSizeBits |
58 | }; |
59 | |
60 | static_assert(NumBaseBits == 64 || NumBaseBits == 32, |
61 | "Unsupported word size"); |
62 | |
63 | public: |
64 | using size_type = unsigned; |
65 | |
66 | // Encapsulation of a single bit. |
67 | class reference { |
68 | SmallBitVector &TheVector; |
69 | unsigned BitPos; |
70 | |
71 | public: |
72 | reference(SmallBitVector &b, unsigned Idx) : TheVector(b), BitPos(Idx) {} |
73 | |
74 | reference(const reference&) = default; |
75 | |
76 | reference& operator=(reference t) { |
77 | *this = bool(t); |
78 | return *this; |
79 | } |
80 | |
81 | reference& operator=(bool t) { |
82 | if (t) |
83 | TheVector.set(BitPos); |
84 | else |
85 | TheVector.reset(BitPos); |
86 | return *this; |
87 | } |
88 | |
89 | operator bool() const { |
90 | return const_cast<const SmallBitVector &>(TheVector).operator[](BitPos); |
91 | } |
92 | }; |
93 | |
94 | private: |
95 | BitVector *getPointer() const { |
96 | assert(!isSmall())((!isSmall()) ? static_cast<void> (0) : __assert_fail ( "!isSmall()", "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 96, __PRETTY_FUNCTION__)); |
97 | return reinterpret_cast<BitVector *>(X); |
98 | } |
99 | |
100 | void switchToSmall(uintptr_t NewSmallBits, size_t NewSize) { |
101 | X = 1; |
102 | setSmallSize(NewSize); |
103 | setSmallBits(NewSmallBits); |
104 | } |
105 | |
106 | void switchToLarge(BitVector *BV) { |
107 | X = reinterpret_cast<uintptr_t>(BV); |
108 | assert(!isSmall() && "Tried to use an unaligned pointer")((!isSmall() && "Tried to use an unaligned pointer") ? static_cast<void> (0) : __assert_fail ("!isSmall() && \"Tried to use an unaligned pointer\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 108, __PRETTY_FUNCTION__)); |
109 | } |
110 | |
111 | // Return all the bits used for the "small" representation; this includes |
112 | // bits for the size as well as the element bits. |
113 | uintptr_t getSmallRawBits() const { |
114 | assert(isSmall())((isSmall()) ? static_cast<void> (0) : __assert_fail ("isSmall()" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 114, __PRETTY_FUNCTION__)); |
115 | return X >> 1; |
116 | } |
117 | |
118 | void setSmallRawBits(uintptr_t NewRawBits) { |
119 | assert(isSmall())((isSmall()) ? static_cast<void> (0) : __assert_fail ("isSmall()" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 119, __PRETTY_FUNCTION__)); |
120 | X = (NewRawBits << 1) | uintptr_t(1); |
121 | } |
122 | |
123 | // Return the size. |
124 | size_t getSmallSize() const { return getSmallRawBits() >> SmallNumDataBits; } |
125 | |
126 | void setSmallSize(size_t Size) { |
127 | setSmallRawBits(getSmallBits() | (Size << SmallNumDataBits)); |
128 | } |
129 | |
130 | // Return the element bits. |
131 | uintptr_t getSmallBits() const { |
132 | return getSmallRawBits() & ~(~uintptr_t(0) << getSmallSize()); |
133 | } |
134 | |
135 | void setSmallBits(uintptr_t NewBits) { |
136 | setSmallRawBits((NewBits & ~(~uintptr_t(0) << getSmallSize())) | |
137 | (getSmallSize() << SmallNumDataBits)); |
138 | } |
139 | |
140 | public: |
141 | /// Creates an empty bitvector. |
142 | SmallBitVector() = default; |
143 | |
144 | /// Creates a bitvector of specified number of bits. All bits are initialized |
145 | /// to the specified value. |
146 | explicit SmallBitVector(unsigned s, bool t = false) { |
147 | if (s <= SmallNumDataBits) |
148 | switchToSmall(t ? ~uintptr_t(0) : 0, s); |
149 | else |
150 | switchToLarge(new BitVector(s, t)); |
151 | } |
152 | |
153 | /// SmallBitVector copy ctor. |
154 | SmallBitVector(const SmallBitVector &RHS) { |
155 | if (RHS.isSmall()) |
156 | X = RHS.X; |
157 | else |
158 | switchToLarge(new BitVector(*RHS.getPointer())); |
159 | } |
160 | |
161 | SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) { |
162 | RHS.X = 1; |
163 | } |
164 | |
165 | ~SmallBitVector() { |
166 | if (!isSmall()) |
167 | delete getPointer(); |
168 | } |
169 | |
170 | using const_set_bits_iterator = const_set_bits_iterator_impl<SmallBitVector>; |
171 | using set_iterator = const_set_bits_iterator; |
172 | |
173 | const_set_bits_iterator set_bits_begin() const { |
174 | return const_set_bits_iterator(*this); |
175 | } |
176 | |
177 | const_set_bits_iterator set_bits_end() const { |
178 | return const_set_bits_iterator(*this, -1); |
179 | } |
180 | |
181 | iterator_range<const_set_bits_iterator> set_bits() const { |
182 | return make_range(set_bits_begin(), set_bits_end()); |
183 | } |
184 | |
185 | bool isSmall() const { return X & uintptr_t(1); } |
186 | |
187 | /// Tests whether there are no bits in this bitvector. |
188 | bool empty() const { |
189 | return isSmall() ? getSmallSize() == 0 : getPointer()->empty(); |
190 | } |
191 | |
192 | /// Returns the number of bits in this bitvector. |
193 | size_t size() const { |
194 | return isSmall() ? getSmallSize() : getPointer()->size(); |
195 | } |
196 | |
197 | /// Returns the number of bits which are set. |
198 | size_type count() const { |
199 | if (isSmall()) { |
200 | uintptr_t Bits = getSmallBits(); |
201 | return countPopulation(Bits); |
202 | } |
203 | return getPointer()->count(); |
204 | } |
205 | |
206 | /// Returns true if any bit is set. |
207 | bool any() const { |
208 | if (isSmall()) |
209 | return getSmallBits() != 0; |
210 | return getPointer()->any(); |
211 | } |
212 | |
213 | /// Returns true if all bits are set. |
214 | bool all() const { |
215 | if (isSmall()) |
216 | return getSmallBits() == (uintptr_t(1) << getSmallSize()) - 1; |
217 | return getPointer()->all(); |
218 | } |
219 | |
220 | /// Returns true if none of the bits are set. |
221 | bool none() const { |
222 | if (isSmall()) |
223 | return getSmallBits() == 0; |
224 | return getPointer()->none(); |
225 | } |
226 | |
227 | /// Returns the index of the first set bit, -1 if none of the bits are set. |
228 | int find_first() const { |
229 | if (isSmall()) { |
230 | uintptr_t Bits = getSmallBits(); |
231 | if (Bits == 0) |
232 | return -1; |
233 | return countTrailingZeros(Bits); |
234 | } |
235 | return getPointer()->find_first(); |
236 | } |
237 | |
238 | int find_last() const { |
239 | if (isSmall()) { |
240 | uintptr_t Bits = getSmallBits(); |
241 | if (Bits == 0) |
242 | return -1; |
243 | return NumBaseBits - countLeadingZeros(Bits) - 1; |
244 | } |
245 | return getPointer()->find_last(); |
246 | } |
247 | |
248 | /// Returns the index of the first unset bit, -1 if all of the bits are set. |
249 | int find_first_unset() const { |
250 | if (isSmall()) { |
251 | if (count() == getSmallSize()) |
252 | return -1; |
253 | |
254 | uintptr_t Bits = getSmallBits(); |
255 | return countTrailingOnes(Bits); |
256 | } |
257 | return getPointer()->find_first_unset(); |
258 | } |
259 | |
260 | int find_last_unset() const { |
261 | if (isSmall()) { |
262 | if (count() == getSmallSize()) |
263 | return -1; |
264 | |
265 | uintptr_t Bits = getSmallBits(); |
266 | // Set unused bits. |
267 | Bits |= ~uintptr_t(0) << getSmallSize(); |
268 | return NumBaseBits - countLeadingOnes(Bits) - 1; |
269 | } |
270 | return getPointer()->find_last_unset(); |
271 | } |
272 | |
273 | /// Returns the index of the next set bit following the "Prev" bit. |
274 | /// Returns -1 if the next set bit is not found. |
275 | int find_next(unsigned Prev) const { |
276 | if (isSmall()) { |
277 | uintptr_t Bits = getSmallBits(); |
278 | // Mask off previous bits. |
279 | Bits &= ~uintptr_t(0) << (Prev + 1); |
280 | if (Bits == 0 || Prev + 1 >= getSmallSize()) |
281 | return -1; |
282 | return countTrailingZeros(Bits); |
283 | } |
284 | return getPointer()->find_next(Prev); |
285 | } |
286 | |
287 | /// Returns the index of the next unset bit following the "Prev" bit. |
288 | /// Returns -1 if the next unset bit is not found. |
289 | int find_next_unset(unsigned Prev) const { |
290 | if (isSmall()) { |
291 | ++Prev; |
292 | uintptr_t Bits = getSmallBits(); |
293 | // Mask in previous bits. |
294 | uintptr_t Mask = (1 << Prev) - 1; |
295 | Bits |= Mask; |
296 | |
297 | if (Bits == ~uintptr_t(0) || Prev + 1 >= getSmallSize()) |
298 | return -1; |
299 | return countTrailingOnes(Bits); |
300 | } |
301 | return getPointer()->find_next_unset(Prev); |
302 | } |
303 | |
304 | /// find_prev - Returns the index of the first set bit that precedes the |
305 | /// the bit at \p PriorTo. Returns -1 if all previous bits are unset. |
306 | int find_prev(unsigned PriorTo) const { |
307 | if (isSmall()) { |
308 | if (PriorTo == 0) |
309 | return -1; |
310 | |
311 | --PriorTo; |
312 | uintptr_t Bits = getSmallBits(); |
313 | Bits &= maskTrailingOnes<uintptr_t>(PriorTo + 1); |
314 | if (Bits == 0) |
315 | return -1; |
316 | |
317 | return NumBaseBits - countLeadingZeros(Bits) - 1; |
318 | } |
319 | return getPointer()->find_prev(PriorTo); |
320 | } |
321 | |
322 | /// Clear all bits. |
323 | void clear() { |
324 | if (!isSmall()) |
325 | delete getPointer(); |
326 | switchToSmall(0, 0); |
327 | } |
328 | |
329 | /// Grow or shrink the bitvector. |
330 | void resize(unsigned N, bool t = false) { |
331 | if (!isSmall()) { |
332 | getPointer()->resize(N, t); |
333 | } else if (SmallNumDataBits >= N) { |
334 | uintptr_t NewBits = t ? ~uintptr_t(0) << getSmallSize() : 0; |
335 | setSmallSize(N); |
336 | setSmallBits(NewBits | getSmallBits()); |
337 | } else { |
338 | BitVector *BV = new BitVector(N, t); |
339 | uintptr_t OldBits = getSmallBits(); |
340 | for (size_t i = 0, e = getSmallSize(); i != e; ++i) |
341 | (*BV)[i] = (OldBits >> i) & 1; |
342 | switchToLarge(BV); |
343 | } |
344 | } |
345 | |
346 | void reserve(unsigned N) { |
347 | if (isSmall()) { |
348 | if (N > SmallNumDataBits) { |
349 | uintptr_t OldBits = getSmallRawBits(); |
350 | size_t SmallSize = getSmallSize(); |
351 | BitVector *BV = new BitVector(SmallSize); |
352 | for (size_t i = 0; i < SmallSize; ++i) |
353 | if ((OldBits >> i) & 1) |
354 | BV->set(i); |
355 | BV->reserve(N); |
356 | switchToLarge(BV); |
357 | } |
358 | } else { |
359 | getPointer()->reserve(N); |
360 | } |
361 | } |
362 | |
363 | // Set, reset, flip |
364 | SmallBitVector &set() { |
365 | if (isSmall()) |
366 | setSmallBits(~uintptr_t(0)); |
367 | else |
368 | getPointer()->set(); |
369 | return *this; |
370 | } |
371 | |
372 | SmallBitVector &set(unsigned Idx) { |
373 | if (isSmall()) { |
374 | assert(Idx <= static_cast<unsigned>(((Idx <= static_cast<unsigned>( std::numeric_limits< uintptr_t>::digits) && "undefined behavior") ? static_cast <void> (0) : __assert_fail ("Idx <= static_cast<unsigned>( std::numeric_limits<uintptr_t>::digits) && \"undefined behavior\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 376, __PRETTY_FUNCTION__)) |
375 | std::numeric_limits<uintptr_t>::digits) &&((Idx <= static_cast<unsigned>( std::numeric_limits< uintptr_t>::digits) && "undefined behavior") ? static_cast <void> (0) : __assert_fail ("Idx <= static_cast<unsigned>( std::numeric_limits<uintptr_t>::digits) && \"undefined behavior\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 376, __PRETTY_FUNCTION__)) |
376 | "undefined behavior")((Idx <= static_cast<unsigned>( std::numeric_limits< uintptr_t>::digits) && "undefined behavior") ? static_cast <void> (0) : __assert_fail ("Idx <= static_cast<unsigned>( std::numeric_limits<uintptr_t>::digits) && \"undefined behavior\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 376, __PRETTY_FUNCTION__)); |
377 | setSmallBits(getSmallBits() | (uintptr_t(1) << Idx)); |
378 | } |
379 | else |
380 | getPointer()->set(Idx); |
381 | return *this; |
382 | } |
383 | |
384 | /// Efficiently set a range of bits in [I, E) |
385 | SmallBitVector &set(unsigned I, unsigned E) { |
386 | assert(I <= E && "Attempted to set backwards range!")((I <= E && "Attempted to set backwards range!") ? static_cast<void> (0) : __assert_fail ("I <= E && \"Attempted to set backwards range!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 386, __PRETTY_FUNCTION__)); |
387 | assert(E <= size() && "Attempted to set out-of-bounds range!")((E <= size() && "Attempted to set out-of-bounds range!" ) ? static_cast<void> (0) : __assert_fail ("E <= size() && \"Attempted to set out-of-bounds range!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 387, __PRETTY_FUNCTION__)); |
388 | if (I == E) return *this; |
389 | if (isSmall()) { |
390 | uintptr_t EMask = ((uintptr_t)1) << E; |
391 | uintptr_t IMask = ((uintptr_t)1) << I; |
392 | uintptr_t Mask = EMask - IMask; |
393 | setSmallBits(getSmallBits() | Mask); |
394 | } else |
395 | getPointer()->set(I, E); |
396 | return *this; |
397 | } |
398 | |
399 | SmallBitVector &reset() { |
400 | if (isSmall()) |
401 | setSmallBits(0); |
402 | else |
403 | getPointer()->reset(); |
404 | return *this; |
405 | } |
406 | |
407 | SmallBitVector &reset(unsigned Idx) { |
408 | if (isSmall()) |
409 | setSmallBits(getSmallBits() & ~(uintptr_t(1) << Idx)); |
410 | else |
411 | getPointer()->reset(Idx); |
412 | return *this; |
413 | } |
414 | |
415 | /// Efficiently reset a range of bits in [I, E) |
416 | SmallBitVector &reset(unsigned I, unsigned E) { |
417 | assert(I <= E && "Attempted to reset backwards range!")((I <= E && "Attempted to reset backwards range!") ? static_cast<void> (0) : __assert_fail ("I <= E && \"Attempted to reset backwards range!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 417, __PRETTY_FUNCTION__)); |
418 | assert(E <= size() && "Attempted to reset out-of-bounds range!")((E <= size() && "Attempted to reset out-of-bounds range!" ) ? static_cast<void> (0) : __assert_fail ("E <= size() && \"Attempted to reset out-of-bounds range!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 418, __PRETTY_FUNCTION__)); |
419 | if (I == E) return *this; |
420 | if (isSmall()) { |
421 | uintptr_t EMask = ((uintptr_t)1) << E; |
422 | uintptr_t IMask = ((uintptr_t)1) << I; |
423 | uintptr_t Mask = EMask - IMask; |
424 | setSmallBits(getSmallBits() & ~Mask); |
425 | } else |
426 | getPointer()->reset(I, E); |
427 | return *this; |
428 | } |
429 | |
430 | SmallBitVector &flip() { |
431 | if (isSmall()) |
432 | setSmallBits(~getSmallBits()); |
433 | else |
434 | getPointer()->flip(); |
435 | return *this; |
436 | } |
437 | |
438 | SmallBitVector &flip(unsigned Idx) { |
439 | if (isSmall()) |
440 | setSmallBits(getSmallBits() ^ (uintptr_t(1) << Idx)); |
441 | else |
442 | getPointer()->flip(Idx); |
443 | return *this; |
444 | } |
445 | |
446 | // No argument flip. |
447 | SmallBitVector operator~() const { |
448 | return SmallBitVector(*this).flip(); |
449 | } |
450 | |
451 | // Indexing. |
452 | reference operator[](unsigned Idx) { |
453 | assert(Idx < size() && "Out-of-bounds Bit access.")((Idx < size() && "Out-of-bounds Bit access.") ? static_cast <void> (0) : __assert_fail ("Idx < size() && \"Out-of-bounds Bit access.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 453, __PRETTY_FUNCTION__)); |
454 | return reference(*this, Idx); |
455 | } |
456 | |
457 | bool operator[](unsigned Idx) const { |
458 | assert(Idx < size() && "Out-of-bounds Bit access.")((Idx < size() && "Out-of-bounds Bit access.") ? static_cast <void> (0) : __assert_fail ("Idx < size() && \"Out-of-bounds Bit access.\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 458, __PRETTY_FUNCTION__)); |
459 | if (isSmall()) |
460 | return ((getSmallBits() >> Idx) & 1) != 0; |
461 | return getPointer()->operator[](Idx); |
462 | } |
463 | |
464 | bool test(unsigned Idx) const { |
465 | return (*this)[Idx]; |
466 | } |
467 | |
468 | // Push single bit to end of vector. |
469 | void push_back(bool Val) { |
470 | resize(size() + 1, Val); |
471 | } |
472 | |
473 | /// Test if any common bits are set. |
474 | bool anyCommon(const SmallBitVector &RHS) const { |
475 | if (isSmall() && RHS.isSmall()) |
476 | return (getSmallBits() & RHS.getSmallBits()) != 0; |
477 | if (!isSmall() && !RHS.isSmall()) |
478 | return getPointer()->anyCommon(*RHS.getPointer()); |
479 | |
480 | for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i) |
481 | if (test(i) && RHS.test(i)) |
482 | return true; |
483 | return false; |
484 | } |
485 | |
486 | // Comparison operators. |
487 | bool operator==(const SmallBitVector &RHS) const { |
488 | if (size() != RHS.size()) |
489 | return false; |
490 | if (isSmall() && RHS.isSmall()) |
491 | return getSmallBits() == RHS.getSmallBits(); |
492 | else if (!isSmall() && !RHS.isSmall()) |
493 | return *getPointer() == *RHS.getPointer(); |
494 | else { |
495 | for (size_t i = 0, e = size(); i != e; ++i) { |
496 | if ((*this)[i] != RHS[i]) |
497 | return false; |
498 | } |
499 | return true; |
500 | } |
501 | } |
502 | |
503 | bool operator!=(const SmallBitVector &RHS) const { |
504 | return !(*this == RHS); |
505 | } |
506 | |
507 | // Intersection, union, disjoint union. |
508 | // FIXME BitVector::operator&= does not resize the LHS but this does |
509 | SmallBitVector &operator&=(const SmallBitVector &RHS) { |
510 | resize(std::max(size(), RHS.size())); |
511 | if (isSmall() && RHS.isSmall()) |
512 | setSmallBits(getSmallBits() & RHS.getSmallBits()); |
513 | else if (!isSmall() && !RHS.isSmall()) |
514 | getPointer()->operator&=(*RHS.getPointer()); |
515 | else { |
516 | size_t i, e; |
517 | for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i) |
518 | (*this)[i] = test(i) && RHS.test(i); |
519 | for (e = size(); i != e; ++i) |
520 | reset(i); |
521 | } |
522 | return *this; |
523 | } |
524 | |
525 | /// Reset bits that are set in RHS. Same as *this &= ~RHS. |
526 | SmallBitVector &reset(const SmallBitVector &RHS) { |
527 | if (isSmall() && RHS.isSmall()) |
528 | setSmallBits(getSmallBits() & ~RHS.getSmallBits()); |
529 | else if (!isSmall() && !RHS.isSmall()) |
530 | getPointer()->reset(*RHS.getPointer()); |
531 | else |
532 | for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i) |
533 | if (RHS.test(i)) |
534 | reset(i); |
535 | |
536 | return *this; |
537 | } |
538 | |
539 | /// Check if (This - RHS) is zero. This is the same as reset(RHS) and any(). |
540 | bool test(const SmallBitVector &RHS) const { |
541 | if (isSmall() && RHS.isSmall()) |
542 | return (getSmallBits() & ~RHS.getSmallBits()) != 0; |
543 | if (!isSmall() && !RHS.isSmall()) |
544 | return getPointer()->test(*RHS.getPointer()); |
545 | |
546 | unsigned i, e; |
547 | for (i = 0, e = std::min(size(), RHS.size()); i != e; ++i) |
548 | if (test(i) && !RHS.test(i)) |
549 | return true; |
550 | |
551 | for (e = size(); i != e; ++i) |
552 | if (test(i)) |
553 | return true; |
554 | |
555 | return false; |
556 | } |
557 | |
558 | SmallBitVector &operator|=(const SmallBitVector &RHS) { |
559 | resize(std::max(size(), RHS.size())); |
560 | if (isSmall() && RHS.isSmall()) |
561 | setSmallBits(getSmallBits() | RHS.getSmallBits()); |
562 | else if (!isSmall() && !RHS.isSmall()) |
563 | getPointer()->operator|=(*RHS.getPointer()); |
564 | else { |
565 | for (size_t i = 0, e = RHS.size(); i != e; ++i) |
566 | (*this)[i] = test(i) || RHS.test(i); |
567 | } |
568 | return *this; |
569 | } |
570 | |
571 | SmallBitVector &operator^=(const SmallBitVector &RHS) { |
572 | resize(std::max(size(), RHS.size())); |
573 | if (isSmall() && RHS.isSmall()) |
574 | setSmallBits(getSmallBits() ^ RHS.getSmallBits()); |
575 | else if (!isSmall() && !RHS.isSmall()) |
576 | getPointer()->operator^=(*RHS.getPointer()); |
577 | else { |
578 | for (size_t i = 0, e = RHS.size(); i != e; ++i) |
579 | (*this)[i] = test(i) != RHS.test(i); |
580 | } |
581 | return *this; |
582 | } |
583 | |
584 | SmallBitVector &operator<<=(unsigned N) { |
585 | if (isSmall()) |
586 | setSmallBits(getSmallBits() << N); |
587 | else |
588 | getPointer()->operator<<=(N); |
589 | return *this; |
590 | } |
591 | |
592 | SmallBitVector &operator>>=(unsigned N) { |
593 | if (isSmall()) |
594 | setSmallBits(getSmallBits() >> N); |
595 | else |
596 | getPointer()->operator>>=(N); |
597 | return *this; |
598 | } |
599 | |
600 | // Assignment operator. |
601 | const SmallBitVector &operator=(const SmallBitVector &RHS) { |
602 | if (isSmall()) { |
603 | if (RHS.isSmall()) |
604 | X = RHS.X; |
605 | else |
606 | switchToLarge(new BitVector(*RHS.getPointer())); |
607 | } else { |
608 | if (!RHS.isSmall()) |
609 | *getPointer() = *RHS.getPointer(); |
610 | else { |
611 | delete getPointer(); |
612 | X = RHS.X; |
613 | } |
614 | } |
615 | return *this; |
616 | } |
617 | |
618 | const SmallBitVector &operator=(SmallBitVector &&RHS) { |
619 | if (this != &RHS) { |
620 | clear(); |
621 | swap(RHS); |
622 | } |
623 | return *this; |
624 | } |
625 | |
626 | void swap(SmallBitVector &RHS) { |
627 | std::swap(X, RHS.X); |
628 | } |
629 | |
630 | /// Add '1' bits from Mask to this vector. Don't resize. |
631 | /// This computes "*this |= Mask". |
632 | void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { |
633 | if (isSmall()) |
634 | applyMask<true, false>(Mask, MaskWords); |
635 | else |
636 | getPointer()->setBitsInMask(Mask, MaskWords); |
637 | } |
638 | |
639 | /// Clear any bits in this vector that are set in Mask. Don't resize. |
640 | /// This computes "*this &= ~Mask". |
641 | void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { |
642 | if (isSmall()) |
643 | applyMask<false, false>(Mask, MaskWords); |
644 | else |
645 | getPointer()->clearBitsInMask(Mask, MaskWords); |
646 | } |
647 | |
648 | /// Add a bit to this vector for every '0' bit in Mask. Don't resize. |
649 | /// This computes "*this |= ~Mask". |
650 | void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { |
651 | if (isSmall()) |
652 | applyMask<true, true>(Mask, MaskWords); |
653 | else |
654 | getPointer()->setBitsNotInMask(Mask, MaskWords); |
655 | } |
656 | |
657 | /// Clear a bit in this vector for every '0' bit in Mask. Don't resize. |
658 | /// This computes "*this &= Mask". |
659 | void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) { |
660 | if (isSmall()) |
661 | applyMask<false, true>(Mask, MaskWords); |
662 | else |
663 | getPointer()->clearBitsNotInMask(Mask, MaskWords); |
664 | } |
665 | |
666 | private: |
667 | template <bool AddBits, bool InvertMask> |
668 | void applyMask(const uint32_t *Mask, unsigned MaskWords) { |
669 | assert(MaskWords <= sizeof(uintptr_t) && "Mask is larger than base!")((MaskWords <= sizeof(uintptr_t) && "Mask is larger than base!" ) ? static_cast<void> (0) : __assert_fail ("MaskWords <= sizeof(uintptr_t) && \"Mask is larger than base!\"" , "/build/llvm-toolchain-snapshot-8~svn350071/include/llvm/ADT/SmallBitVector.h" , 669, __PRETTY_FUNCTION__)); |
670 | uintptr_t M = Mask[0]; |
671 | if (NumBaseBits == 64) |
672 | M |= uint64_t(Mask[1]) << 32; |
673 | if (InvertMask) |
674 | M = ~M; |
675 | if (AddBits) |
676 | setSmallBits(getSmallBits() | M); |
677 | else |
678 | setSmallBits(getSmallBits() & ~M); |
679 | } |
680 | }; |
681 | |
682 | inline SmallBitVector |
683 | operator&(const SmallBitVector &LHS, const SmallBitVector &RHS) { |
684 | SmallBitVector Result(LHS); |
685 | Result &= RHS; |
686 | return Result; |
687 | } |
688 | |
689 | inline SmallBitVector |
690 | operator|(const SmallBitVector &LHS, const SmallBitVector &RHS) { |
691 | SmallBitVector Result(LHS); |
692 | Result |= RHS; |
693 | return Result; |
694 | } |
695 | |
696 | inline SmallBitVector |
697 | operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) { |
698 | SmallBitVector Result(LHS); |
699 | Result ^= RHS; |
700 | return Result; |
701 | } |
702 | |
703 | } // end namespace llvm |
704 | |
705 | namespace std { |
706 | |
707 | /// Implement std::swap in terms of BitVector swap. |
708 | inline void |
709 | swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) { |
710 | LHS.swap(RHS); |
711 | } |
712 | |
713 | } // end namespace std |
714 | |
715 | #endif // LLVM_ADT_SMALLBITVECTOR_H |