File: | llvm/lib/Target/X86/X86TargetTransformInfo.cpp |
Warning: | line 3052, column 15 Division by zero |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===// | ||||||
2 | // | ||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||
6 | // | ||||||
7 | //===----------------------------------------------------------------------===// | ||||||
8 | /// \file | ||||||
9 | /// This file implements a TargetTransformInfo analysis pass specific to the | ||||||
10 | /// X86 target machine. It uses the target's detailed information to provide | ||||||
11 | /// more precise answers to certain TTI queries, while letting the target | ||||||
12 | /// independent and default TTI implementations handle the rest. | ||||||
13 | /// | ||||||
14 | //===----------------------------------------------------------------------===// | ||||||
15 | /// About Cost Model numbers used below it's necessary to say the following: | ||||||
16 | /// the numbers correspond to some "generic" X86 CPU instead of usage of | ||||||
17 | /// concrete CPU model. Usually the numbers correspond to CPU where the feature | ||||||
18 | /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in | ||||||
19 | /// the lookups below the cost is based on Nehalem as that was the first CPU | ||||||
20 | /// to support that feature level and thus has most likely the worst case cost. | ||||||
21 | /// Some examples of other technologies/CPUs: | ||||||
22 | /// SSE 3 - Pentium4 / Athlon64 | ||||||
23 | /// SSE 4.1 - Penryn | ||||||
24 | /// SSE 4.2 - Nehalem | ||||||
25 | /// AVX - Sandy Bridge | ||||||
26 | /// AVX2 - Haswell | ||||||
27 | /// AVX-512 - Xeon Phi / Skylake | ||||||
28 | /// And some examples of instruction target dependent costs (latency) | ||||||
29 | /// divss sqrtss rsqrtss | ||||||
30 | /// AMD K7 11-16 19 3 | ||||||
31 | /// Piledriver 9-24 13-15 5 | ||||||
32 | /// Jaguar 14 16 2 | ||||||
33 | /// Pentium II,III 18 30 2 | ||||||
34 | /// Nehalem 7-14 7-18 3 | ||||||
35 | /// Haswell 10-13 11 5 | ||||||
36 | /// TODO: Develop and implement the target dependent cost model and | ||||||
37 | /// specialize cost numbers for different Cost Model Targets such as throughput, | ||||||
38 | /// code size, latency and uop count. | ||||||
39 | //===----------------------------------------------------------------------===// | ||||||
40 | |||||||
41 | #include "X86TargetTransformInfo.h" | ||||||
42 | #include "llvm/Analysis/TargetTransformInfo.h" | ||||||
43 | #include "llvm/CodeGen/BasicTTIImpl.h" | ||||||
44 | #include "llvm/CodeGen/CostTable.h" | ||||||
45 | #include "llvm/CodeGen/TargetLowering.h" | ||||||
46 | #include "llvm/IR/IntrinsicInst.h" | ||||||
47 | #include "llvm/Support/Debug.h" | ||||||
48 | |||||||
49 | using namespace llvm; | ||||||
50 | |||||||
51 | #define DEBUG_TYPE"x86tti" "x86tti" | ||||||
52 | |||||||
53 | //===----------------------------------------------------------------------===// | ||||||
54 | // | ||||||
55 | // X86 cost model. | ||||||
56 | // | ||||||
57 | //===----------------------------------------------------------------------===// | ||||||
58 | |||||||
59 | TargetTransformInfo::PopcntSupportKind | ||||||
60 | X86TTIImpl::getPopcntSupport(unsigned TyWidth) { | ||||||
61 | assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2")((isPowerOf2_32(TyWidth) && "Ty width must be power of 2" ) ? static_cast<void> (0) : __assert_fail ("isPowerOf2_32(TyWidth) && \"Ty width must be power of 2\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 61, __PRETTY_FUNCTION__)); | ||||||
62 | // TODO: Currently the __builtin_popcount() implementation using SSE3 | ||||||
63 | // instructions is inefficient. Once the problem is fixed, we should | ||||||
64 | // call ST->hasSSE3() instead of ST->hasPOPCNT(). | ||||||
65 | return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software; | ||||||
66 | } | ||||||
67 | |||||||
68 | llvm::Optional<unsigned> X86TTIImpl::getCacheSize( | ||||||
69 | TargetTransformInfo::CacheLevel Level) const { | ||||||
70 | switch (Level) { | ||||||
71 | case TargetTransformInfo::CacheLevel::L1D: | ||||||
72 | // - Penryn | ||||||
73 | // - Nehalem | ||||||
74 | // - Westmere | ||||||
75 | // - Sandy Bridge | ||||||
76 | // - Ivy Bridge | ||||||
77 | // - Haswell | ||||||
78 | // - Broadwell | ||||||
79 | // - Skylake | ||||||
80 | // - Kabylake | ||||||
81 | return 32 * 1024; // 32 KByte | ||||||
82 | case TargetTransformInfo::CacheLevel::L2D: | ||||||
83 | // - Penryn | ||||||
84 | // - Nehalem | ||||||
85 | // - Westmere | ||||||
86 | // - Sandy Bridge | ||||||
87 | // - Ivy Bridge | ||||||
88 | // - Haswell | ||||||
89 | // - Broadwell | ||||||
90 | // - Skylake | ||||||
91 | // - Kabylake | ||||||
92 | return 256 * 1024; // 256 KByte | ||||||
93 | } | ||||||
94 | |||||||
95 | llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 95); | ||||||
96 | } | ||||||
97 | |||||||
98 | llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity( | ||||||
99 | TargetTransformInfo::CacheLevel Level) const { | ||||||
100 | // - Penryn | ||||||
101 | // - Nehalem | ||||||
102 | // - Westmere | ||||||
103 | // - Sandy Bridge | ||||||
104 | // - Ivy Bridge | ||||||
105 | // - Haswell | ||||||
106 | // - Broadwell | ||||||
107 | // - Skylake | ||||||
108 | // - Kabylake | ||||||
109 | switch (Level) { | ||||||
110 | case TargetTransformInfo::CacheLevel::L1D: | ||||||
111 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||||
112 | case TargetTransformInfo::CacheLevel::L2D: | ||||||
113 | return 8; | ||||||
114 | } | ||||||
115 | |||||||
116 | llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 116); | ||||||
117 | } | ||||||
118 | |||||||
119 | unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const { | ||||||
120 | bool Vector = (ClassID == 1); | ||||||
121 | if (Vector && !ST->hasSSE1()) | ||||||
122 | return 0; | ||||||
123 | |||||||
124 | if (ST->is64Bit()) { | ||||||
125 | if (Vector && ST->hasAVX512()) | ||||||
126 | return 32; | ||||||
127 | return 16; | ||||||
128 | } | ||||||
129 | return 8; | ||||||
130 | } | ||||||
131 | |||||||
132 | TypeSize | ||||||
133 | X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { | ||||||
134 | unsigned PreferVectorWidth = ST->getPreferVectorWidth(); | ||||||
135 | switch (K) { | ||||||
136 | case TargetTransformInfo::RGK_Scalar: | ||||||
137 | return TypeSize::getFixed(ST->is64Bit() ? 64 : 32); | ||||||
138 | case TargetTransformInfo::RGK_FixedWidthVector: | ||||||
139 | if (ST->hasAVX512() && PreferVectorWidth >= 512) | ||||||
140 | return TypeSize::getFixed(512); | ||||||
141 | if (ST->hasAVX() && PreferVectorWidth >= 256) | ||||||
142 | return TypeSize::getFixed(256); | ||||||
143 | if (ST->hasSSE1() && PreferVectorWidth >= 128) | ||||||
144 | return TypeSize::getFixed(128); | ||||||
145 | return TypeSize::getFixed(0); | ||||||
146 | case TargetTransformInfo::RGK_ScalableVector: | ||||||
147 | return TypeSize::getScalable(0); | ||||||
148 | } | ||||||
149 | |||||||
150 | llvm_unreachable("Unsupported register kind")::llvm::llvm_unreachable_internal("Unsupported register kind" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 150); | ||||||
151 | } | ||||||
152 | |||||||
153 | unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const { | ||||||
154 | return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector) | ||||||
155 | .getFixedSize(); | ||||||
156 | } | ||||||
157 | |||||||
158 | unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) { | ||||||
159 | // If the loop will not be vectorized, don't interleave the loop. | ||||||
160 | // Let regular unroll to unroll the loop, which saves the overflow | ||||||
161 | // check and memory check cost. | ||||||
162 | if (VF == 1) | ||||||
163 | return 1; | ||||||
164 | |||||||
165 | if (ST->isAtom()) | ||||||
166 | return 1; | ||||||
167 | |||||||
168 | // Sandybridge and Haswell have multiple execution ports and pipelined | ||||||
169 | // vector units. | ||||||
170 | if (ST->hasAVX()) | ||||||
171 | return 4; | ||||||
172 | |||||||
173 | return 2; | ||||||
174 | } | ||||||
175 | |||||||
176 | int X86TTIImpl::getArithmeticInstrCost(unsigned Opcode, Type *Ty, | ||||||
177 | TTI::TargetCostKind CostKind, | ||||||
178 | TTI::OperandValueKind Op1Info, | ||||||
179 | TTI::OperandValueKind Op2Info, | ||||||
180 | TTI::OperandValueProperties Opd1PropInfo, | ||||||
181 | TTI::OperandValueProperties Opd2PropInfo, | ||||||
182 | ArrayRef<const Value *> Args, | ||||||
183 | const Instruction *CxtI) { | ||||||
184 | // TODO: Handle more cost kinds. | ||||||
185 | if (CostKind != TTI::TCK_RecipThroughput) | ||||||
186 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, | ||||||
187 | Op2Info, Opd1PropInfo, | ||||||
188 | Opd2PropInfo, Args, CxtI); | ||||||
189 | // Legalize the type. | ||||||
190 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); | ||||||
191 | |||||||
192 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | ||||||
193 | assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> ( 0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 193, __PRETTY_FUNCTION__)); | ||||||
194 | |||||||
195 | static const CostTblEntry GLMCostTable[] = { | ||||||
196 | { ISD::FDIV, MVT::f32, 18 }, // divss | ||||||
197 | { ISD::FDIV, MVT::v4f32, 35 }, // divps | ||||||
198 | { ISD::FDIV, MVT::f64, 33 }, // divsd | ||||||
199 | { ISD::FDIV, MVT::v2f64, 65 }, // divpd | ||||||
200 | }; | ||||||
201 | |||||||
202 | if (ST->useGLMDivSqrtCosts()) | ||||||
203 | if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, | ||||||
204 | LT.second)) | ||||||
205 | return LT.first * Entry->Cost; | ||||||
206 | |||||||
207 | static const CostTblEntry SLMCostTable[] = { | ||||||
208 | { ISD::MUL, MVT::v4i32, 11 }, // pmulld | ||||||
209 | { ISD::MUL, MVT::v8i16, 2 }, // pmullw | ||||||
210 | { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence. | ||||||
211 | { ISD::FMUL, MVT::f64, 2 }, // mulsd | ||||||
212 | { ISD::FMUL, MVT::v2f64, 4 }, // mulpd | ||||||
213 | { ISD::FMUL, MVT::v4f32, 2 }, // mulps | ||||||
214 | { ISD::FDIV, MVT::f32, 17 }, // divss | ||||||
215 | { ISD::FDIV, MVT::v4f32, 39 }, // divps | ||||||
216 | { ISD::FDIV, MVT::f64, 32 }, // divsd | ||||||
217 | { ISD::FDIV, MVT::v2f64, 69 }, // divpd | ||||||
218 | { ISD::FADD, MVT::v2f64, 2 }, // addpd | ||||||
219 | { ISD::FSUB, MVT::v2f64, 2 }, // subpd | ||||||
220 | // v2i64/v4i64 mul is custom lowered as a series of long: | ||||||
221 | // multiplies(3), shifts(3) and adds(2) | ||||||
222 | // slm muldq version throughput is 2 and addq throughput 4 | ||||||
223 | // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) + | ||||||
224 | // 3X4 (addq throughput) = 17 | ||||||
225 | { ISD::MUL, MVT::v2i64, 17 }, | ||||||
226 | // slm addq\subq throughput is 4 | ||||||
227 | { ISD::ADD, MVT::v2i64, 4 }, | ||||||
228 | { ISD::SUB, MVT::v2i64, 4 }, | ||||||
229 | }; | ||||||
230 | |||||||
231 | if (ST->isSLM()) { | ||||||
232 | if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) { | ||||||
233 | // Check if the operands can be shrinked into a smaller datatype. | ||||||
234 | bool Op1Signed = false; | ||||||
235 | unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed); | ||||||
236 | bool Op2Signed = false; | ||||||
237 | unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed); | ||||||
238 | |||||||
239 | bool SignedMode = Op1Signed || Op2Signed; | ||||||
240 | unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize); | ||||||
241 | |||||||
242 | if (OpMinSize <= 7) | ||||||
243 | return LT.first * 3; // pmullw/sext | ||||||
244 | if (!SignedMode && OpMinSize <= 8) | ||||||
245 | return LT.first * 3; // pmullw/zext | ||||||
246 | if (OpMinSize <= 15) | ||||||
247 | return LT.first * 5; // pmullw/pmulhw/pshuf | ||||||
248 | if (!SignedMode && OpMinSize <= 16) | ||||||
249 | return LT.first * 5; // pmullw/pmulhw/pshuf | ||||||
250 | } | ||||||
251 | |||||||
252 | if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, | ||||||
253 | LT.second)) { | ||||||
254 | return LT.first * Entry->Cost; | ||||||
255 | } | ||||||
256 | } | ||||||
257 | |||||||
258 | if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV || | ||||||
259 | ISD == ISD::UREM) && | ||||||
260 | (Op2Info == TargetTransformInfo::OK_UniformConstantValue || | ||||||
261 | Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && | ||||||
262 | Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) { | ||||||
263 | if (ISD == ISD::SDIV || ISD == ISD::SREM) { | ||||||
264 | // On X86, vector signed division by constants power-of-two are | ||||||
265 | // normally expanded to the sequence SRA + SRL + ADD + SRA. | ||||||
266 | // The OperandValue properties may not be the same as that of the previous | ||||||
267 | // operation; conservatively assume OP_None. | ||||||
268 | int Cost = | ||||||
269 | 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info, | ||||||
270 | Op2Info, | ||||||
271 | TargetTransformInfo::OP_None, | ||||||
272 | TargetTransformInfo::OP_None); | ||||||
273 | Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info, | ||||||
274 | Op2Info, | ||||||
275 | TargetTransformInfo::OP_None, | ||||||
276 | TargetTransformInfo::OP_None); | ||||||
277 | Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info, | ||||||
278 | Op2Info, | ||||||
279 | TargetTransformInfo::OP_None, | ||||||
280 | TargetTransformInfo::OP_None); | ||||||
281 | |||||||
282 | if (ISD == ISD::SREM) { | ||||||
283 | // For SREM: (X % C) is the equivalent of (X - (X/C)*C) | ||||||
284 | Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info, | ||||||
285 | Op2Info); | ||||||
286 | Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info, | ||||||
287 | Op2Info); | ||||||
288 | } | ||||||
289 | |||||||
290 | return Cost; | ||||||
291 | } | ||||||
292 | |||||||
293 | // Vector unsigned division/remainder will be simplified to shifts/masks. | ||||||
294 | if (ISD == ISD::UDIV) | ||||||
295 | return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, | ||||||
296 | Op1Info, Op2Info, | ||||||
297 | TargetTransformInfo::OP_None, | ||||||
298 | TargetTransformInfo::OP_None); | ||||||
299 | |||||||
300 | else // UREM | ||||||
301 | return getArithmeticInstrCost(Instruction::And, Ty, CostKind, | ||||||
302 | Op1Info, Op2Info, | ||||||
303 | TargetTransformInfo::OP_None, | ||||||
304 | TargetTransformInfo::OP_None); | ||||||
305 | } | ||||||
306 | |||||||
307 | static const CostTblEntry AVX512BWUniformConstCostTable[] = { | ||||||
308 | { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. | ||||||
309 | { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. | ||||||
310 | { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. | ||||||
311 | }; | ||||||
312 | |||||||
313 | if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && | ||||||
314 | ST->hasBWI()) { | ||||||
315 | if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD, | ||||||
316 | LT.second)) | ||||||
317 | return LT.first * Entry->Cost; | ||||||
318 | } | ||||||
319 | |||||||
320 | static const CostTblEntry AVX512UniformConstCostTable[] = { | ||||||
321 | { ISD::SRA, MVT::v2i64, 1 }, | ||||||
322 | { ISD::SRA, MVT::v4i64, 1 }, | ||||||
323 | { ISD::SRA, MVT::v8i64, 1 }, | ||||||
324 | |||||||
325 | { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand. | ||||||
326 | { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand. | ||||||
327 | { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb. | ||||||
328 | |||||||
329 | { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence | ||||||
330 | { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence | ||||||
331 | { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence | ||||||
332 | { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence | ||||||
333 | }; | ||||||
334 | |||||||
335 | if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && | ||||||
336 | ST->hasAVX512()) { | ||||||
337 | if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD, | ||||||
338 | LT.second)) | ||||||
339 | return LT.first * Entry->Cost; | ||||||
340 | } | ||||||
341 | |||||||
342 | static const CostTblEntry AVX2UniformConstCostTable[] = { | ||||||
343 | { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand. | ||||||
344 | { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand. | ||||||
345 | { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb. | ||||||
346 | |||||||
347 | { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle. | ||||||
348 | |||||||
349 | { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence | ||||||
350 | { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence | ||||||
351 | { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence | ||||||
352 | { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence | ||||||
353 | }; | ||||||
354 | |||||||
355 | if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && | ||||||
356 | ST->hasAVX2()) { | ||||||
357 | if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD, | ||||||
358 | LT.second)) | ||||||
359 | return LT.first * Entry->Cost; | ||||||
360 | } | ||||||
361 | |||||||
362 | static const CostTblEntry SSE2UniformConstCostTable[] = { | ||||||
363 | { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand. | ||||||
364 | { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand. | ||||||
365 | { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb. | ||||||
366 | |||||||
367 | { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split. | ||||||
368 | { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split. | ||||||
369 | { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split. | ||||||
370 | |||||||
371 | { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split. | ||||||
372 | { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split. | ||||||
373 | { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence | ||||||
374 | { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence | ||||||
375 | { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split. | ||||||
376 | { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split. | ||||||
377 | { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence | ||||||
378 | { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence | ||||||
379 | }; | ||||||
380 | |||||||
381 | // XOP has faster vXi8 shifts. | ||||||
382 | if (Op2Info == TargetTransformInfo::OK_UniformConstantValue && | ||||||
383 | ST->hasSSE2() && !ST->hasXOP()) { | ||||||
384 | if (const auto *Entry = | ||||||
385 | CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second)) | ||||||
386 | return LT.first * Entry->Cost; | ||||||
387 | } | ||||||
388 | |||||||
389 | static const CostTblEntry AVX512BWConstCostTable[] = { | ||||||
390 | { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence | ||||||
391 | { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence | ||||||
392 | { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence | ||||||
393 | { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence | ||||||
394 | { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence | ||||||
395 | { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence | ||||||
396 | { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence | ||||||
397 | { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence | ||||||
398 | }; | ||||||
399 | |||||||
400 | if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || | ||||||
401 | Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && | ||||||
402 | ST->hasBWI()) { | ||||||
403 | if (const auto *Entry = | ||||||
404 | CostTableLookup(AVX512BWConstCostTable, ISD, LT.second)) | ||||||
405 | return LT.first * Entry->Cost; | ||||||
406 | } | ||||||
407 | |||||||
408 | static const CostTblEntry AVX512ConstCostTable[] = { | ||||||
409 | { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence | ||||||
410 | { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence | ||||||
411 | { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence | ||||||
412 | { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence | ||||||
413 | { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence | ||||||
414 | { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence | ||||||
415 | { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence | ||||||
416 | { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence | ||||||
417 | { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence | ||||||
418 | { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence | ||||||
419 | { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence | ||||||
420 | { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence | ||||||
421 | }; | ||||||
422 | |||||||
423 | if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || | ||||||
424 | Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && | ||||||
425 | ST->hasAVX512()) { | ||||||
426 | if (const auto *Entry = | ||||||
427 | CostTableLookup(AVX512ConstCostTable, ISD, LT.second)) | ||||||
428 | return LT.first * Entry->Cost; | ||||||
429 | } | ||||||
430 | |||||||
431 | static const CostTblEntry AVX2ConstCostTable[] = { | ||||||
432 | { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence | ||||||
433 | { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence | ||||||
434 | { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence | ||||||
435 | { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence | ||||||
436 | { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence | ||||||
437 | { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence | ||||||
438 | { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence | ||||||
439 | { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence | ||||||
440 | { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence | ||||||
441 | { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence | ||||||
442 | { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence | ||||||
443 | { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence | ||||||
444 | }; | ||||||
445 | |||||||
446 | if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || | ||||||
447 | Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && | ||||||
448 | ST->hasAVX2()) { | ||||||
449 | if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second)) | ||||||
450 | return LT.first * Entry->Cost; | ||||||
451 | } | ||||||
452 | |||||||
453 | static const CostTblEntry SSE2ConstCostTable[] = { | ||||||
454 | { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. | ||||||
455 | { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. | ||||||
456 | { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence | ||||||
457 | { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence | ||||||
458 | { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split. | ||||||
459 | { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split. | ||||||
460 | { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence | ||||||
461 | { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence | ||||||
462 | { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split. | ||||||
463 | { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split. | ||||||
464 | { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence | ||||||
465 | { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence | ||||||
466 | { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split. | ||||||
467 | { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split. | ||||||
468 | { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence | ||||||
469 | { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence | ||||||
470 | { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split. | ||||||
471 | { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split. | ||||||
472 | { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence | ||||||
473 | { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence | ||||||
474 | { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split. | ||||||
475 | { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split. | ||||||
476 | { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence | ||||||
477 | { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence | ||||||
478 | }; | ||||||
479 | |||||||
480 | if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue || | ||||||
481 | Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) && | ||||||
482 | ST->hasSSE2()) { | ||||||
483 | // pmuldq sequence. | ||||||
484 | if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX()) | ||||||
485 | return LT.first * 32; | ||||||
486 | if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX()) | ||||||
487 | return LT.first * 38; | ||||||
488 | if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41()) | ||||||
489 | return LT.first * 15; | ||||||
490 | if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41()) | ||||||
491 | return LT.first * 20; | ||||||
492 | |||||||
493 | if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second)) | ||||||
494 | return LT.first * Entry->Cost; | ||||||
495 | } | ||||||
496 | |||||||
497 | static const CostTblEntry AVX512BWShiftCostTable[] = { | ||||||
498 | { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw | ||||||
499 | { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw | ||||||
500 | { ISD::SRA, MVT::v8i16, 1 }, // vpsravw | ||||||
501 | |||||||
502 | { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw | ||||||
503 | { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw | ||||||
504 | { ISD::SRA, MVT::v16i16, 1 }, // vpsravw | ||||||
505 | |||||||
506 | { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw | ||||||
507 | { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw | ||||||
508 | { ISD::SRA, MVT::v32i16, 1 }, // vpsravw | ||||||
509 | }; | ||||||
510 | |||||||
511 | if (ST->hasBWI()) | ||||||
512 | if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second)) | ||||||
513 | return LT.first * Entry->Cost; | ||||||
514 | |||||||
515 | static const CostTblEntry AVX2UniformCostTable[] = { | ||||||
516 | // Uniform splats are cheaper for the following instructions. | ||||||
517 | { ISD::SHL, MVT::v16i16, 1 }, // psllw. | ||||||
518 | { ISD::SRL, MVT::v16i16, 1 }, // psrlw. | ||||||
519 | { ISD::SRA, MVT::v16i16, 1 }, // psraw. | ||||||
520 | { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw. | ||||||
521 | { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw. | ||||||
522 | { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw. | ||||||
523 | }; | ||||||
524 | |||||||
525 | if (ST->hasAVX2() && | ||||||
526 | ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || | ||||||
527 | (Op2Info == TargetTransformInfo::OK_UniformValue))) { | ||||||
528 | if (const auto *Entry = | ||||||
529 | CostTableLookup(AVX2UniformCostTable, ISD, LT.second)) | ||||||
530 | return LT.first * Entry->Cost; | ||||||
531 | } | ||||||
532 | |||||||
533 | static const CostTblEntry SSE2UniformCostTable[] = { | ||||||
534 | // Uniform splats are cheaper for the following instructions. | ||||||
535 | { ISD::SHL, MVT::v8i16, 1 }, // psllw. | ||||||
536 | { ISD::SHL, MVT::v4i32, 1 }, // pslld | ||||||
537 | { ISD::SHL, MVT::v2i64, 1 }, // psllq. | ||||||
538 | |||||||
539 | { ISD::SRL, MVT::v8i16, 1 }, // psrlw. | ||||||
540 | { ISD::SRL, MVT::v4i32, 1 }, // psrld. | ||||||
541 | { ISD::SRL, MVT::v2i64, 1 }, // psrlq. | ||||||
542 | |||||||
543 | { ISD::SRA, MVT::v8i16, 1 }, // psraw. | ||||||
544 | { ISD::SRA, MVT::v4i32, 1 }, // psrad. | ||||||
545 | }; | ||||||
546 | |||||||
547 | if (ST->hasSSE2() && | ||||||
548 | ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || | ||||||
549 | (Op2Info == TargetTransformInfo::OK_UniformValue))) { | ||||||
550 | if (const auto *Entry = | ||||||
551 | CostTableLookup(SSE2UniformCostTable, ISD, LT.second)) | ||||||
552 | return LT.first * Entry->Cost; | ||||||
553 | } | ||||||
554 | |||||||
555 | static const CostTblEntry AVX512DQCostTable[] = { | ||||||
556 | { ISD::MUL, MVT::v2i64, 1 }, | ||||||
557 | { ISD::MUL, MVT::v4i64, 1 }, | ||||||
558 | { ISD::MUL, MVT::v8i64, 1 } | ||||||
559 | }; | ||||||
560 | |||||||
561 | // Look for AVX512DQ lowering tricks for custom cases. | ||||||
562 | if (ST->hasDQI()) | ||||||
563 | if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second)) | ||||||
564 | return LT.first * Entry->Cost; | ||||||
565 | |||||||
566 | static const CostTblEntry AVX512BWCostTable[] = { | ||||||
567 | { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. | ||||||
568 | { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. | ||||||
569 | { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. | ||||||
570 | |||||||
571 | { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. | ||||||
572 | { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence. | ||||||
573 | { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence. | ||||||
574 | }; | ||||||
575 | |||||||
576 | // Look for AVX512BW lowering tricks for custom cases. | ||||||
577 | if (ST->hasBWI()) | ||||||
578 | if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second)) | ||||||
579 | return LT.first * Entry->Cost; | ||||||
580 | |||||||
581 | static const CostTblEntry AVX512CostTable[] = { | ||||||
582 | { ISD::SHL, MVT::v16i32, 1 }, | ||||||
583 | { ISD::SRL, MVT::v16i32, 1 }, | ||||||
584 | { ISD::SRA, MVT::v16i32, 1 }, | ||||||
585 | |||||||
586 | { ISD::SHL, MVT::v8i64, 1 }, | ||||||
587 | { ISD::SRL, MVT::v8i64, 1 }, | ||||||
588 | |||||||
589 | { ISD::SRA, MVT::v2i64, 1 }, | ||||||
590 | { ISD::SRA, MVT::v4i64, 1 }, | ||||||
591 | { ISD::SRA, MVT::v8i64, 1 }, | ||||||
592 | |||||||
593 | { ISD::MUL, MVT::v64i8, 26 }, // extend/pmullw/trunc sequence. | ||||||
594 | { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence. | ||||||
595 | { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence. | ||||||
596 | { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org) | ||||||
597 | { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org) | ||||||
598 | { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org) | ||||||
599 | { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add | ||||||
600 | |||||||
601 | { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ | ||||||
602 | { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ | ||||||
603 | { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/ | ||||||
604 | |||||||
605 | { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ | ||||||
606 | { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ | ||||||
607 | { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/ | ||||||
608 | }; | ||||||
609 | |||||||
610 | if (ST->hasAVX512()) | ||||||
611 | if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second)) | ||||||
612 | return LT.first * Entry->Cost; | ||||||
613 | |||||||
614 | static const CostTblEntry AVX2ShiftCostTable[] = { | ||||||
615 | // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to | ||||||
616 | // customize them to detect the cases where shift amount is a scalar one. | ||||||
617 | { ISD::SHL, MVT::v4i32, 1 }, | ||||||
618 | { ISD::SRL, MVT::v4i32, 1 }, | ||||||
619 | { ISD::SRA, MVT::v4i32, 1 }, | ||||||
620 | { ISD::SHL, MVT::v8i32, 1 }, | ||||||
621 | { ISD::SRL, MVT::v8i32, 1 }, | ||||||
622 | { ISD::SRA, MVT::v8i32, 1 }, | ||||||
623 | { ISD::SHL, MVT::v2i64, 1 }, | ||||||
624 | { ISD::SRL, MVT::v2i64, 1 }, | ||||||
625 | { ISD::SHL, MVT::v4i64, 1 }, | ||||||
626 | { ISD::SRL, MVT::v4i64, 1 }, | ||||||
627 | }; | ||||||
628 | |||||||
629 | if (ST->hasAVX512()) { | ||||||
630 | if (ISD == ISD::SHL && LT.second == MVT::v32i16 && | ||||||
631 | (Op2Info == TargetTransformInfo::OK_UniformConstantValue || | ||||||
632 | Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) | ||||||
633 | // On AVX512, a packed v32i16 shift left by a constant build_vector | ||||||
634 | // is lowered into a vector multiply (vpmullw). | ||||||
635 | return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, | ||||||
636 | Op1Info, Op2Info, | ||||||
637 | TargetTransformInfo::OP_None, | ||||||
638 | TargetTransformInfo::OP_None); | ||||||
639 | } | ||||||
640 | |||||||
641 | // Look for AVX2 lowering tricks. | ||||||
642 | if (ST->hasAVX2()) { | ||||||
643 | if (ISD == ISD::SHL && LT.second == MVT::v16i16 && | ||||||
644 | (Op2Info == TargetTransformInfo::OK_UniformConstantValue || | ||||||
645 | Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) | ||||||
646 | // On AVX2, a packed v16i16 shift left by a constant build_vector | ||||||
647 | // is lowered into a vector multiply (vpmullw). | ||||||
648 | return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, | ||||||
649 | Op1Info, Op2Info, | ||||||
650 | TargetTransformInfo::OP_None, | ||||||
651 | TargetTransformInfo::OP_None); | ||||||
652 | |||||||
653 | if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second)) | ||||||
654 | return LT.first * Entry->Cost; | ||||||
655 | } | ||||||
656 | |||||||
657 | static const CostTblEntry XOPShiftCostTable[] = { | ||||||
658 | // 128bit shifts take 1cy, but right shifts require negation beforehand. | ||||||
659 | { ISD::SHL, MVT::v16i8, 1 }, | ||||||
660 | { ISD::SRL, MVT::v16i8, 2 }, | ||||||
661 | { ISD::SRA, MVT::v16i8, 2 }, | ||||||
662 | { ISD::SHL, MVT::v8i16, 1 }, | ||||||
663 | { ISD::SRL, MVT::v8i16, 2 }, | ||||||
664 | { ISD::SRA, MVT::v8i16, 2 }, | ||||||
665 | { ISD::SHL, MVT::v4i32, 1 }, | ||||||
666 | { ISD::SRL, MVT::v4i32, 2 }, | ||||||
667 | { ISD::SRA, MVT::v4i32, 2 }, | ||||||
668 | { ISD::SHL, MVT::v2i64, 1 }, | ||||||
669 | { ISD::SRL, MVT::v2i64, 2 }, | ||||||
670 | { ISD::SRA, MVT::v2i64, 2 }, | ||||||
671 | // 256bit shifts require splitting if AVX2 didn't catch them above. | ||||||
672 | { ISD::SHL, MVT::v32i8, 2+2 }, | ||||||
673 | { ISD::SRL, MVT::v32i8, 4+2 }, | ||||||
674 | { ISD::SRA, MVT::v32i8, 4+2 }, | ||||||
675 | { ISD::SHL, MVT::v16i16, 2+2 }, | ||||||
676 | { ISD::SRL, MVT::v16i16, 4+2 }, | ||||||
677 | { ISD::SRA, MVT::v16i16, 4+2 }, | ||||||
678 | { ISD::SHL, MVT::v8i32, 2+2 }, | ||||||
679 | { ISD::SRL, MVT::v8i32, 4+2 }, | ||||||
680 | { ISD::SRA, MVT::v8i32, 4+2 }, | ||||||
681 | { ISD::SHL, MVT::v4i64, 2+2 }, | ||||||
682 | { ISD::SRL, MVT::v4i64, 4+2 }, | ||||||
683 | { ISD::SRA, MVT::v4i64, 4+2 }, | ||||||
684 | }; | ||||||
685 | |||||||
686 | // Look for XOP lowering tricks. | ||||||
687 | if (ST->hasXOP()) { | ||||||
688 | // If the right shift is constant then we'll fold the negation so | ||||||
689 | // it's as cheap as a left shift. | ||||||
690 | int ShiftISD = ISD; | ||||||
691 | if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && | ||||||
692 | (Op2Info == TargetTransformInfo::OK_UniformConstantValue || | ||||||
693 | Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)) | ||||||
694 | ShiftISD = ISD::SHL; | ||||||
695 | if (const auto *Entry = | ||||||
696 | CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second)) | ||||||
697 | return LT.first * Entry->Cost; | ||||||
698 | } | ||||||
699 | |||||||
700 | static const CostTblEntry SSE2UniformShiftCostTable[] = { | ||||||
701 | // Uniform splats are cheaper for the following instructions. | ||||||
702 | { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split. | ||||||
703 | { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split. | ||||||
704 | { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split. | ||||||
705 | |||||||
706 | { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split. | ||||||
707 | { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split. | ||||||
708 | { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split. | ||||||
709 | |||||||
710 | { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split. | ||||||
711 | { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split. | ||||||
712 | { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle. | ||||||
713 | { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split. | ||||||
714 | }; | ||||||
715 | |||||||
716 | if (ST->hasSSE2() && | ||||||
717 | ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) || | ||||||
718 | (Op2Info == TargetTransformInfo::OK_UniformValue))) { | ||||||
719 | |||||||
720 | // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table. | ||||||
721 | if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2()) | ||||||
722 | return LT.first * 4; // 2*psrad + shuffle. | ||||||
723 | |||||||
724 | if (const auto *Entry = | ||||||
725 | CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second)) | ||||||
726 | return LT.first * Entry->Cost; | ||||||
727 | } | ||||||
728 | |||||||
729 | if (ISD == ISD::SHL && | ||||||
730 | Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) { | ||||||
731 | MVT VT = LT.second; | ||||||
732 | // Vector shift left by non uniform constant can be lowered | ||||||
733 | // into vector multiply. | ||||||
734 | if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || | ||||||
735 | ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX())) | ||||||
736 | ISD = ISD::MUL; | ||||||
737 | } | ||||||
738 | |||||||
739 | static const CostTblEntry AVX2CostTable[] = { | ||||||
740 | { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence. | ||||||
741 | { ISD::SHL, MVT::v64i8, 22 }, // 2*vpblendvb sequence. | ||||||
742 | { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. | ||||||
743 | { ISD::SHL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence. | ||||||
744 | |||||||
745 | { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence. | ||||||
746 | { ISD::SRL, MVT::v64i8, 22 }, // 2*vpblendvb sequence. | ||||||
747 | { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence. | ||||||
748 | { ISD::SRL, MVT::v32i16, 20 }, // 2*extend/vpsrlvd/pack sequence. | ||||||
749 | |||||||
750 | { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence. | ||||||
751 | { ISD::SRA, MVT::v64i8, 48 }, // 2*vpblendvb sequence. | ||||||
752 | { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence. | ||||||
753 | { ISD::SRA, MVT::v32i16, 20 }, // 2*extend/vpsravd/pack sequence. | ||||||
754 | { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence. | ||||||
755 | { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence. | ||||||
756 | |||||||
757 | { ISD::SUB, MVT::v32i8, 1 }, // psubb | ||||||
758 | { ISD::ADD, MVT::v32i8, 1 }, // paddb | ||||||
759 | { ISD::SUB, MVT::v16i16, 1 }, // psubw | ||||||
760 | { ISD::ADD, MVT::v16i16, 1 }, // paddw | ||||||
761 | { ISD::SUB, MVT::v8i32, 1 }, // psubd | ||||||
762 | { ISD::ADD, MVT::v8i32, 1 }, // paddd | ||||||
763 | { ISD::SUB, MVT::v4i64, 1 }, // psubq | ||||||
764 | { ISD::ADD, MVT::v4i64, 1 }, // paddq | ||||||
765 | |||||||
766 | { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence. | ||||||
767 | { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence. | ||||||
768 | { ISD::MUL, MVT::v16i16, 1 }, // pmullw | ||||||
769 | { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org) | ||||||
770 | { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add | ||||||
771 | |||||||
772 | { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ | ||||||
773 | { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ | ||||||
774 | { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ | ||||||
775 | { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ | ||||||
776 | { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/ | ||||||
777 | { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/ | ||||||
778 | |||||||
779 | { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/ | ||||||
780 | { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ | ||||||
781 | { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ | ||||||
782 | { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/ | ||||||
783 | { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ | ||||||
784 | { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ | ||||||
785 | }; | ||||||
786 | |||||||
787 | // Look for AVX2 lowering tricks for custom cases. | ||||||
788 | if (ST->hasAVX2()) | ||||||
789 | if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second)) | ||||||
790 | return LT.first * Entry->Cost; | ||||||
791 | |||||||
792 | static const CostTblEntry AVX1CostTable[] = { | ||||||
793 | // We don't have to scalarize unsupported ops. We can issue two half-sized | ||||||
794 | // operations and we only need to extract the upper YMM half. | ||||||
795 | // Two ops + 1 extract + 1 insert = 4. | ||||||
796 | { ISD::MUL, MVT::v16i16, 4 }, | ||||||
797 | { ISD::MUL, MVT::v8i32, 4 }, | ||||||
798 | { ISD::SUB, MVT::v32i8, 4 }, | ||||||
799 | { ISD::ADD, MVT::v32i8, 4 }, | ||||||
800 | { ISD::SUB, MVT::v16i16, 4 }, | ||||||
801 | { ISD::ADD, MVT::v16i16, 4 }, | ||||||
802 | { ISD::SUB, MVT::v8i32, 4 }, | ||||||
803 | { ISD::ADD, MVT::v8i32, 4 }, | ||||||
804 | { ISD::SUB, MVT::v4i64, 4 }, | ||||||
805 | { ISD::ADD, MVT::v4i64, 4 }, | ||||||
806 | |||||||
807 | // A v4i64 multiply is custom lowered as two split v2i64 vectors that then | ||||||
808 | // are lowered as a series of long multiplies(3), shifts(3) and adds(2) | ||||||
809 | // Because we believe v4i64 to be a legal type, we must also include the | ||||||
810 | // extract+insert in the cost table. Therefore, the cost here is 18 | ||||||
811 | // instead of 8. | ||||||
812 | { ISD::MUL, MVT::v4i64, 18 }, | ||||||
813 | |||||||
814 | { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence. | ||||||
815 | |||||||
816 | { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/ | ||||||
817 | { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ | ||||||
818 | { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ | ||||||
819 | { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/ | ||||||
820 | { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/ | ||||||
821 | { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/ | ||||||
822 | }; | ||||||
823 | |||||||
824 | if (ST->hasAVX()) | ||||||
825 | if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second)) | ||||||
826 | return LT.first * Entry->Cost; | ||||||
827 | |||||||
828 | static const CostTblEntry SSE42CostTable[] = { | ||||||
829 | { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ | ||||||
830 | { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ | ||||||
831 | { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ | ||||||
832 | { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ | ||||||
833 | |||||||
834 | { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ | ||||||
835 | { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/ | ||||||
836 | { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ | ||||||
837 | { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ | ||||||
838 | |||||||
839 | { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/ | ||||||
840 | { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/ | ||||||
841 | { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/ | ||||||
842 | { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/ | ||||||
843 | |||||||
844 | { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/ | ||||||
845 | { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/ | ||||||
846 | { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/ | ||||||
847 | { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/ | ||||||
848 | }; | ||||||
849 | |||||||
850 | if (ST->hasSSE42()) | ||||||
851 | if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second)) | ||||||
852 | return LT.first * Entry->Cost; | ||||||
853 | |||||||
854 | static const CostTblEntry SSE41CostTable[] = { | ||||||
855 | { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence. | ||||||
856 | { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split. | ||||||
857 | { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence. | ||||||
858 | { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. | ||||||
859 | { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld | ||||||
860 | { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split | ||||||
861 | |||||||
862 | { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence. | ||||||
863 | { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split. | ||||||
864 | { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence. | ||||||
865 | { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. | ||||||
866 | { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend. | ||||||
867 | { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split. | ||||||
868 | |||||||
869 | { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence. | ||||||
870 | { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split. | ||||||
871 | { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence. | ||||||
872 | { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split. | ||||||
873 | { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend. | ||||||
874 | { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split. | ||||||
875 | |||||||
876 | { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org) | ||||||
877 | }; | ||||||
878 | |||||||
879 | if (ST->hasSSE41()) | ||||||
880 | if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second)) | ||||||
881 | return LT.first * Entry->Cost; | ||||||
882 | |||||||
883 | static const CostTblEntry SSE2CostTable[] = { | ||||||
884 | // We don't correctly identify costs of casts because they are marked as | ||||||
885 | // custom. | ||||||
886 | { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence. | ||||||
887 | { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence. | ||||||
888 | { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul. | ||||||
889 | { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence. | ||||||
890 | { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. | ||||||
891 | |||||||
892 | { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence. | ||||||
893 | { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence. | ||||||
894 | { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend. | ||||||
895 | { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence. | ||||||
896 | { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split. | ||||||
897 | |||||||
898 | { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence. | ||||||
899 | { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence. | ||||||
900 | { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend. | ||||||
901 | { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence. | ||||||
902 | { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split. | ||||||
903 | |||||||
904 | { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence. | ||||||
905 | { ISD::MUL, MVT::v8i16, 1 }, // pmullw | ||||||
906 | { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle | ||||||
907 | { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add | ||||||
908 | |||||||
909 | { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/ | ||||||
910 | { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/ | ||||||
911 | { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/ | ||||||
912 | { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/ | ||||||
913 | |||||||
914 | { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ | ||||||
915 | { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ | ||||||
916 | |||||||
917 | { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/ | ||||||
918 | { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/ | ||||||
919 | }; | ||||||
920 | |||||||
921 | if (ST->hasSSE2()) | ||||||
922 | if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second)) | ||||||
923 | return LT.first * Entry->Cost; | ||||||
924 | |||||||
925 | static const CostTblEntry SSE1CostTable[] = { | ||||||
926 | { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/ | ||||||
927 | { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/ | ||||||
928 | |||||||
929 | { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ | ||||||
930 | { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ | ||||||
931 | |||||||
932 | { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/ | ||||||
933 | { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/ | ||||||
934 | |||||||
935 | { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ | ||||||
936 | { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ | ||||||
937 | { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ | ||||||
938 | |||||||
939 | { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/ | ||||||
940 | { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/ | ||||||
941 | { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/ | ||||||
942 | }; | ||||||
943 | |||||||
944 | if (ST->hasSSE1()) | ||||||
945 | if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second)) | ||||||
946 | return LT.first * Entry->Cost; | ||||||
947 | |||||||
948 | // It is not a good idea to vectorize division. We have to scalarize it and | ||||||
949 | // in the process we will often end up having to spilling regular | ||||||
950 | // registers. The overhead of division is going to dominate most kernels | ||||||
951 | // anyways so try hard to prevent vectorization of division - it is | ||||||
952 | // generally a bad idea. Assume somewhat arbitrarily that we have to be able | ||||||
953 | // to hide "20 cycles" for each lane. | ||||||
954 | if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM || | ||||||
955 | ISD == ISD::UDIV || ISD == ISD::UREM)) { | ||||||
956 | int ScalarCost = getArithmeticInstrCost( | ||||||
957 | Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info, | ||||||
958 | TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); | ||||||
959 | return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost; | ||||||
960 | } | ||||||
961 | |||||||
962 | // Fallback to the default implementation. | ||||||
963 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info); | ||||||
964 | } | ||||||
965 | |||||||
966 | int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, VectorType *BaseTp, | ||||||
967 | ArrayRef<int> Mask, int Index, | ||||||
968 | VectorType *SubTp) { | ||||||
969 | // 64-bit packed float vectors (v2f32) are widened to type v4f32. | ||||||
970 | // 64-bit packed integer vectors (v2i32) are widened to type v4i32. | ||||||
971 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp); | ||||||
972 | |||||||
973 | // Treat Transpose as 2-op shuffles - there's no difference in lowering. | ||||||
974 | if (Kind == TTI::SK_Transpose) | ||||||
975 | Kind = TTI::SK_PermuteTwoSrc; | ||||||
976 | |||||||
977 | // For Broadcasts we are splatting the first element from the first input | ||||||
978 | // register, so only need to reference that input and all the output | ||||||
979 | // registers are the same. | ||||||
980 | if (Kind == TTI::SK_Broadcast) | ||||||
981 | LT.first = 1; | ||||||
982 | |||||||
983 | // Subvector extractions are free if they start at the beginning of a | ||||||
984 | // vector and cheap if the subvectors are aligned. | ||||||
985 | if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) { | ||||||
986 | int NumElts = LT.second.getVectorNumElements(); | ||||||
987 | if ((Index % NumElts) == 0) | ||||||
988 | return 0; | ||||||
989 | std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp); | ||||||
990 | if (SubLT.second.isVector()) { | ||||||
991 | int NumSubElts = SubLT.second.getVectorNumElements(); | ||||||
992 | if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0) | ||||||
993 | return SubLT.first; | ||||||
994 | // Handle some cases for widening legalization. For now we only handle | ||||||
995 | // cases where the original subvector was naturally aligned and evenly | ||||||
996 | // fit in its legalized subvector type. | ||||||
997 | // FIXME: Remove some of the alignment restrictions. | ||||||
998 | // FIXME: We can use permq for 64-bit or larger extracts from 256-bit | ||||||
999 | // vectors. | ||||||
1000 | int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements(); | ||||||
1001 | if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 && | ||||||
1002 | (NumSubElts % OrigSubElts) == 0 && | ||||||
1003 | LT.second.getVectorElementType() == | ||||||
1004 | SubLT.second.getVectorElementType() && | ||||||
1005 | LT.second.getVectorElementType().getSizeInBits() == | ||||||
1006 | BaseTp->getElementType()->getPrimitiveSizeInBits()) { | ||||||
1007 | assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&((NumElts >= NumSubElts && NumElts > OrigSubElts && "Unexpected number of elements!") ? static_cast< void> (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 1008, __PRETTY_FUNCTION__)) | ||||||
1008 | "Unexpected number of elements!")((NumElts >= NumSubElts && NumElts > OrigSubElts && "Unexpected number of elements!") ? static_cast< void> (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 1008, __PRETTY_FUNCTION__)); | ||||||
1009 | auto *VecTy = FixedVectorType::get(BaseTp->getElementType(), | ||||||
1010 | LT.second.getVectorNumElements()); | ||||||
1011 | auto *SubTy = FixedVectorType::get(BaseTp->getElementType(), | ||||||
1012 | SubLT.second.getVectorNumElements()); | ||||||
1013 | int ExtractIndex = alignDown((Index % NumElts), NumSubElts); | ||||||
1014 | int ExtractCost = getShuffleCost(TTI::SK_ExtractSubvector, VecTy, None, | ||||||
1015 | ExtractIndex, SubTy); | ||||||
1016 | |||||||
1017 | // If the original size is 32-bits or more, we can use pshufd. Otherwise | ||||||
1018 | // if we have SSSE3 we can use pshufb. | ||||||
1019 | if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3()) | ||||||
1020 | return ExtractCost + 1; // pshufd or pshufb | ||||||
1021 | |||||||
1022 | assert(SubTp->getPrimitiveSizeInBits() == 16 &&((SubTp->getPrimitiveSizeInBits() == 16 && "Unexpected vector size" ) ? static_cast<void> (0) : __assert_fail ("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 1023, __PRETTY_FUNCTION__)) | ||||||
1023 | "Unexpected vector size")((SubTp->getPrimitiveSizeInBits() == 16 && "Unexpected vector size" ) ? static_cast<void> (0) : __assert_fail ("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 1023, __PRETTY_FUNCTION__)); | ||||||
1024 | |||||||
1025 | return ExtractCost + 2; // worst case pshufhw + pshufd | ||||||
1026 | } | ||||||
1027 | } | ||||||
1028 | } | ||||||
1029 | |||||||
1030 | // Handle some common (illegal) sub-vector types as they are often very cheap | ||||||
1031 | // to shuffle even on targets without PSHUFB. | ||||||
1032 | EVT VT = TLI->getValueType(DL, BaseTp); | ||||||
1033 | if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 && | ||||||
1034 | !ST->hasSSSE3()) { | ||||||
1035 | static const CostTblEntry SSE2SubVectorShuffleTbl[] = { | ||||||
1036 | {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw | ||||||
1037 | {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw | ||||||
1038 | {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw | ||||||
1039 | {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw | ||||||
1040 | {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck | ||||||
1041 | |||||||
1042 | {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw | ||||||
1043 | {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw | ||||||
1044 | {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus | ||||||
1045 | {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck | ||||||
1046 | |||||||
1047 | {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw | ||||||
1048 | {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw | ||||||
1049 | {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw | ||||||
1050 | {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw | ||||||
1051 | {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck | ||||||
1052 | |||||||
1053 | {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw | ||||||
1054 | {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw | ||||||
1055 | {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw | ||||||
1056 | {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw | ||||||
1057 | {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck | ||||||
1058 | }; | ||||||
1059 | |||||||
1060 | if (ST->hasSSE2()) | ||||||
1061 | if (const auto *Entry = | ||||||
1062 | CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT())) | ||||||
1063 | return Entry->Cost; | ||||||
1064 | } | ||||||
1065 | |||||||
1066 | // We are going to permute multiple sources and the result will be in multiple | ||||||
1067 | // destinations. Providing an accurate cost only for splits where the element | ||||||
1068 | // type remains the same. | ||||||
1069 | if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) { | ||||||
1070 | MVT LegalVT = LT.second; | ||||||
1071 | if (LegalVT.isVector() && | ||||||
1072 | LegalVT.getVectorElementType().getSizeInBits() == | ||||||
1073 | BaseTp->getElementType()->getPrimitiveSizeInBits() && | ||||||
1074 | LegalVT.getVectorNumElements() < | ||||||
1075 | cast<FixedVectorType>(BaseTp)->getNumElements()) { | ||||||
1076 | |||||||
1077 | unsigned VecTySize = DL.getTypeStoreSize(BaseTp); | ||||||
1078 | unsigned LegalVTSize = LegalVT.getStoreSize(); | ||||||
1079 | // Number of source vectors after legalization: | ||||||
1080 | unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize; | ||||||
1081 | // Number of destination vectors after legalization: | ||||||
1082 | unsigned NumOfDests = LT.first; | ||||||
1083 | |||||||
1084 | auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(), | ||||||
1085 | LegalVT.getVectorNumElements()); | ||||||
1086 | |||||||
1087 | unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests; | ||||||
1088 | return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, | ||||||
1089 | None, 0, nullptr); | ||||||
1090 | } | ||||||
1091 | |||||||
1092 | return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); | ||||||
1093 | } | ||||||
1094 | |||||||
1095 | // For 2-input shuffles, we must account for splitting the 2 inputs into many. | ||||||
1096 | if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) { | ||||||
1097 | // We assume that source and destination have the same vector type. | ||||||
1098 | int NumOfDests = LT.first; | ||||||
1099 | int NumOfShufflesPerDest = LT.first * 2 - 1; | ||||||
1100 | LT.first = NumOfDests * NumOfShufflesPerDest; | ||||||
1101 | } | ||||||
1102 | |||||||
1103 | static const CostTblEntry AVX512VBMIShuffleTbl[] = { | ||||||
1104 | {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb | ||||||
1105 | {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb | ||||||
1106 | |||||||
1107 | {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb | ||||||
1108 | {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb | ||||||
1109 | |||||||
1110 | {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b | ||||||
1111 | {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b | ||||||
1112 | {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b | ||||||
1113 | }; | ||||||
1114 | |||||||
1115 | if (ST->hasVBMI()) | ||||||
1116 | if (const auto *Entry = | ||||||
1117 | CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second)) | ||||||
1118 | return LT.first * Entry->Cost; | ||||||
1119 | |||||||
1120 | static const CostTblEntry AVX512BWShuffleTbl[] = { | ||||||
1121 | {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw | ||||||
1122 | {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb | ||||||
1123 | |||||||
1124 | {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw | ||||||
1125 | {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw | ||||||
1126 | {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2 | ||||||
1127 | |||||||
1128 | {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw | ||||||
1129 | {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw | ||||||
1130 | {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16 | ||||||
1131 | |||||||
1132 | {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w | ||||||
1133 | {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w | ||||||
1134 | {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w | ||||||
1135 | {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1 | ||||||
1136 | |||||||
1137 | {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw | ||||||
1138 | {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb | ||||||
1139 | }; | ||||||
1140 | |||||||
1141 | if (ST->hasBWI()) | ||||||
1142 | if (const auto *Entry = | ||||||
1143 | CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second)) | ||||||
1144 | return LT.first * Entry->Cost; | ||||||
1145 | |||||||
1146 | static const CostTblEntry AVX512ShuffleTbl[] = { | ||||||
1147 | {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd | ||||||
1148 | {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps | ||||||
1149 | {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq | ||||||
1150 | {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd | ||||||
1151 | {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw | ||||||
1152 | {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb | ||||||
1153 | |||||||
1154 | {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd | ||||||
1155 | {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps | ||||||
1156 | {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq | ||||||
1157 | {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd | ||||||
1158 | |||||||
1159 | {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd | ||||||
1160 | {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd | ||||||
1161 | {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd | ||||||
1162 | {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps | ||||||
1163 | {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps | ||||||
1164 | {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps | ||||||
1165 | {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq | ||||||
1166 | {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq | ||||||
1167 | {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq | ||||||
1168 | {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd | ||||||
1169 | {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd | ||||||
1170 | {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd | ||||||
1171 | {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb | ||||||
1172 | |||||||
1173 | {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd | ||||||
1174 | {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps | ||||||
1175 | {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q | ||||||
1176 | {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d | ||||||
1177 | {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd | ||||||
1178 | {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps | ||||||
1179 | {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q | ||||||
1180 | {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d | ||||||
1181 | {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd | ||||||
1182 | {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps | ||||||
1183 | {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q | ||||||
1184 | {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d | ||||||
1185 | |||||||
1186 | // FIXME: This just applies the type legalization cost rules above | ||||||
1187 | // assuming these completely split. | ||||||
1188 | {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14}, | ||||||
1189 | {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14}, | ||||||
1190 | {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42}, | ||||||
1191 | {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42}, | ||||||
1192 | |||||||
1193 | {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq | ||||||
1194 | {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq | ||||||
1195 | {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd | ||||||
1196 | {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps | ||||||
1197 | {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq | ||||||
1198 | {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd | ||||||
1199 | }; | ||||||
1200 | |||||||
1201 | if (ST->hasAVX512()) | ||||||
1202 | if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second)) | ||||||
1203 | return LT.first * Entry->Cost; | ||||||
1204 | |||||||
1205 | static const CostTblEntry AVX2ShuffleTbl[] = { | ||||||
1206 | {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd | ||||||
1207 | {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps | ||||||
1208 | {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq | ||||||
1209 | {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd | ||||||
1210 | {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw | ||||||
1211 | {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb | ||||||
1212 | |||||||
1213 | {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd | ||||||
1214 | {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps | ||||||
1215 | {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq | ||||||
1216 | {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd | ||||||
1217 | {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb | ||||||
1218 | {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb | ||||||
1219 | |||||||
1220 | {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb | ||||||
1221 | {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb | ||||||
1222 | |||||||
1223 | {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd | ||||||
1224 | {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps | ||||||
1225 | {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq | ||||||
1226 | {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd | ||||||
1227 | {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb | ||||||
1228 | // + vpblendvb | ||||||
1229 | {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb | ||||||
1230 | // + vpblendvb | ||||||
1231 | |||||||
1232 | {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd | ||||||
1233 | {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps | ||||||
1234 | {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd | ||||||
1235 | {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd | ||||||
1236 | {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb | ||||||
1237 | // + vpblendvb | ||||||
1238 | {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb | ||||||
1239 | // + vpblendvb | ||||||
1240 | }; | ||||||
1241 | |||||||
1242 | if (ST->hasAVX2()) | ||||||
1243 | if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second)) | ||||||
1244 | return LT.first * Entry->Cost; | ||||||
1245 | |||||||
1246 | static const CostTblEntry XOPShuffleTbl[] = { | ||||||
1247 | {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd | ||||||
1248 | {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps | ||||||
1249 | {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd | ||||||
1250 | {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps | ||||||
1251 | {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm | ||||||
1252 | // + vinsertf128 | ||||||
1253 | {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm | ||||||
1254 | // + vinsertf128 | ||||||
1255 | |||||||
1256 | {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm | ||||||
1257 | // + vinsertf128 | ||||||
1258 | {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm | ||||||
1259 | {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm | ||||||
1260 | // + vinsertf128 | ||||||
1261 | {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm | ||||||
1262 | }; | ||||||
1263 | |||||||
1264 | if (ST->hasXOP()) | ||||||
1265 | if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second)) | ||||||
1266 | return LT.first * Entry->Cost; | ||||||
1267 | |||||||
1268 | static const CostTblEntry AVX1ShuffleTbl[] = { | ||||||
1269 | {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd | ||||||
1270 | {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps | ||||||
1271 | {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd | ||||||
1272 | {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps | ||||||
1273 | {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128 | ||||||
1274 | {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128 | ||||||
1275 | |||||||
1276 | {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd | ||||||
1277 | {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps | ||||||
1278 | {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd | ||||||
1279 | {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps | ||||||
1280 | {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb | ||||||
1281 | // + vinsertf128 | ||||||
1282 | {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb | ||||||
1283 | // + vinsertf128 | ||||||
1284 | |||||||
1285 | {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd | ||||||
1286 | {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd | ||||||
1287 | {TTI::SK_Select, MVT::v8i32, 1}, // vblendps | ||||||
1288 | {TTI::SK_Select, MVT::v8f32, 1}, // vblendps | ||||||
1289 | {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor | ||||||
1290 | {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor | ||||||
1291 | |||||||
1292 | {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd | ||||||
1293 | {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd | ||||||
1294 | {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps | ||||||
1295 | {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps | ||||||
1296 | {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb | ||||||
1297 | // + 2*por + vinsertf128 | ||||||
1298 | {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb | ||||||
1299 | // + 2*por + vinsertf128 | ||||||
1300 | |||||||
1301 | {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd | ||||||
1302 | {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd | ||||||
1303 | {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps | ||||||
1304 | {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps | ||||||
1305 | {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb | ||||||
1306 | // + 4*por + vinsertf128 | ||||||
1307 | {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb | ||||||
1308 | // + 4*por + vinsertf128 | ||||||
1309 | }; | ||||||
1310 | |||||||
1311 | if (ST->hasAVX()) | ||||||
1312 | if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second)) | ||||||
1313 | return LT.first * Entry->Cost; | ||||||
1314 | |||||||
1315 | static const CostTblEntry SSE41ShuffleTbl[] = { | ||||||
1316 | {TTI::SK_Select, MVT::v2i64, 1}, // pblendw | ||||||
1317 | {TTI::SK_Select, MVT::v2f64, 1}, // movsd | ||||||
1318 | {TTI::SK_Select, MVT::v4i32, 1}, // pblendw | ||||||
1319 | {TTI::SK_Select, MVT::v4f32, 1}, // blendps | ||||||
1320 | {TTI::SK_Select, MVT::v8i16, 1}, // pblendw | ||||||
1321 | {TTI::SK_Select, MVT::v16i8, 1} // pblendvb | ||||||
1322 | }; | ||||||
1323 | |||||||
1324 | if (ST->hasSSE41()) | ||||||
1325 | if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second)) | ||||||
1326 | return LT.first * Entry->Cost; | ||||||
1327 | |||||||
1328 | static const CostTblEntry SSSE3ShuffleTbl[] = { | ||||||
1329 | {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb | ||||||
1330 | {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb | ||||||
1331 | |||||||
1332 | {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb | ||||||
1333 | {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb | ||||||
1334 | |||||||
1335 | {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por | ||||||
1336 | {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por | ||||||
1337 | |||||||
1338 | {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb | ||||||
1339 | {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb | ||||||
1340 | |||||||
1341 | {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por | ||||||
1342 | {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por | ||||||
1343 | }; | ||||||
1344 | |||||||
1345 | if (ST->hasSSSE3()) | ||||||
1346 | if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second)) | ||||||
1347 | return LT.first * Entry->Cost; | ||||||
1348 | |||||||
1349 | static const CostTblEntry SSE2ShuffleTbl[] = { | ||||||
1350 | {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd | ||||||
1351 | {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd | ||||||
1352 | {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd | ||||||
1353 | {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd | ||||||
1354 | {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd | ||||||
1355 | |||||||
1356 | {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd | ||||||
1357 | {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd | ||||||
1358 | {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd | ||||||
1359 | {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd | ||||||
1360 | {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw | ||||||
1361 | // + 2*pshufd + 2*unpck + packus | ||||||
1362 | |||||||
1363 | {TTI::SK_Select, MVT::v2i64, 1}, // movsd | ||||||
1364 | {TTI::SK_Select, MVT::v2f64, 1}, // movsd | ||||||
1365 | {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps | ||||||
1366 | {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por | ||||||
1367 | {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por | ||||||
1368 | |||||||
1369 | {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd | ||||||
1370 | {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd | ||||||
1371 | {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd | ||||||
1372 | {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw | ||||||
1373 | // + pshufd/unpck | ||||||
1374 | { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw | ||||||
1375 | // + 2*pshufd + 2*unpck + 2*packus | ||||||
1376 | |||||||
1377 | { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd | ||||||
1378 | { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd | ||||||
1379 | { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd} | ||||||
1380 | { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute | ||||||
1381 | { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute | ||||||
1382 | }; | ||||||
1383 | |||||||
1384 | if (ST->hasSSE2()) | ||||||
1385 | if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second)) | ||||||
1386 | return LT.first * Entry->Cost; | ||||||
1387 | |||||||
1388 | static const CostTblEntry SSE1ShuffleTbl[] = { | ||||||
1389 | { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps | ||||||
1390 | { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps | ||||||
1391 | { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps | ||||||
1392 | { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps | ||||||
1393 | { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps | ||||||
1394 | }; | ||||||
1395 | |||||||
1396 | if (ST->hasSSE1()) | ||||||
1397 | if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second)) | ||||||
1398 | return LT.first * Entry->Cost; | ||||||
1399 | |||||||
1400 | return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp); | ||||||
1401 | } | ||||||
1402 | |||||||
1403 | InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, | ||||||
1404 | Type *Src, | ||||||
1405 | TTI::CastContextHint CCH, | ||||||
1406 | TTI::TargetCostKind CostKind, | ||||||
1407 | const Instruction *I) { | ||||||
1408 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | ||||||
1409 | assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> ( 0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 1409, __PRETTY_FUNCTION__)); | ||||||
1410 | |||||||
1411 | // TODO: Allow non-throughput costs that aren't binary. | ||||||
1412 | auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost { | ||||||
1413 | if (CostKind != TTI::TCK_RecipThroughput) | ||||||
1414 | return Cost == 0 ? 0 : 1; | ||||||
1415 | return Cost; | ||||||
1416 | }; | ||||||
1417 | |||||||
1418 | // FIXME: Need a better design of the cost table to handle non-simple types of | ||||||
1419 | // potential massive combinations (elem_num x src_type x dst_type). | ||||||
1420 | |||||||
1421 | static const TypeConversionCostTblEntry AVX512BWConversionTbl[] { | ||||||
1422 | { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, | ||||||
1423 | { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 }, | ||||||
1424 | |||||||
1425 | // Mask sign extend has an instruction. | ||||||
1426 | { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, | ||||||
1427 | { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, | ||||||
1428 | { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, | ||||||
1429 | { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, | ||||||
1430 | { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, | ||||||
1431 | { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, | ||||||
1432 | { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, | ||||||
1433 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, | ||||||
1434 | { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, | ||||||
1435 | { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 }, | ||||||
1436 | { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 }, | ||||||
1437 | |||||||
1438 | // Mask zero extend is a sext + shift. | ||||||
1439 | { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, | ||||||
1440 | { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, | ||||||
1441 | { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, | ||||||
1442 | { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, | ||||||
1443 | { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, | ||||||
1444 | { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, | ||||||
1445 | { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, | ||||||
1446 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, | ||||||
1447 | { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, | ||||||
1448 | { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 }, | ||||||
1449 | { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 }, | ||||||
1450 | |||||||
1451 | { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 }, | ||||||
1452 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm | ||||||
1453 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm | ||||||
1454 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm | ||||||
1455 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm | ||||||
1456 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm | ||||||
1457 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm | ||||||
1458 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm | ||||||
1459 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm | ||||||
1460 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm | ||||||
1461 | { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm | ||||||
1462 | { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 }, | ||||||
1463 | { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 }, | ||||||
1464 | }; | ||||||
1465 | |||||||
1466 | static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = { | ||||||
1467 | { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, | ||||||
1468 | { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, | ||||||
1469 | |||||||
1470 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, | ||||||
1471 | { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, | ||||||
1472 | |||||||
1473 | { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 }, | ||||||
1474 | { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 }, | ||||||
1475 | |||||||
1476 | { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, | ||||||
1477 | { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, | ||||||
1478 | }; | ||||||
1479 | |||||||
1480 | // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and | ||||||
1481 | // 256-bit wide vectors. | ||||||
1482 | |||||||
1483 | static const TypeConversionCostTblEntry AVX512FConversionTbl[] = { | ||||||
1484 | { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 }, | ||||||
1485 | { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 }, | ||||||
1486 | { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 }, | ||||||
1487 | |||||||
1488 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd | ||||||
1489 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd | ||||||
1490 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd | ||||||
1491 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd | ||||||
1492 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq | ||||||
1493 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq | ||||||
1494 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq | ||||||
1495 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd | ||||||
1496 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd | ||||||
1497 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd | ||||||
1498 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd | ||||||
1499 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd | ||||||
1500 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq | ||||||
1501 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq | ||||||
1502 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq | ||||||
1503 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, | ||||||
1504 | { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, | ||||||
1505 | { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, | ||||||
1506 | { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, | ||||||
1507 | { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, | ||||||
1508 | { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd | ||||||
1509 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb | ||||||
1510 | |||||||
1511 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32 | ||||||
1512 | { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 }, | ||||||
1513 | |||||||
1514 | // Sign extend is zmm vpternlogd+vptruncdb. | ||||||
1515 | // Zero extend is zmm broadcast load+vptruncdw. | ||||||
1516 | { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 }, | ||||||
1517 | { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 }, | ||||||
1518 | { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 }, | ||||||
1519 | { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 }, | ||||||
1520 | { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 }, | ||||||
1521 | { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 }, | ||||||
1522 | { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 }, | ||||||
1523 | { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 }, | ||||||
1524 | |||||||
1525 | // Sign extend is zmm vpternlogd+vptruncdw. | ||||||
1526 | // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw. | ||||||
1527 | { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 }, | ||||||
1528 | { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, | ||||||
1529 | { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 }, | ||||||
1530 | { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, | ||||||
1531 | { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 }, | ||||||
1532 | { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, | ||||||
1533 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 }, | ||||||
1534 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, | ||||||
1535 | |||||||
1536 | { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd | ||||||
1537 | { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld | ||||||
1538 | { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd | ||||||
1539 | { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld | ||||||
1540 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd | ||||||
1541 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld | ||||||
1542 | { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq | ||||||
1543 | { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq | ||||||
1544 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq | ||||||
1545 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq | ||||||
1546 | |||||||
1547 | { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd | ||||||
1548 | { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld | ||||||
1549 | { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq | ||||||
1550 | { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq | ||||||
1551 | |||||||
1552 | { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, | ||||||
1553 | { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 }, | ||||||
1554 | { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, | ||||||
1555 | { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 }, | ||||||
1556 | { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, | ||||||
1557 | { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 }, | ||||||
1558 | { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, | ||||||
1559 | { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 }, | ||||||
1560 | { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, | ||||||
1561 | { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, | ||||||
1562 | |||||||
1563 | { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right | ||||||
1564 | { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right | ||||||
1565 | |||||||
1566 | { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, | ||||||
1567 | { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, | ||||||
1568 | { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, | ||||||
1569 | { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, | ||||||
1570 | { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, | ||||||
1571 | { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, | ||||||
1572 | { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, | ||||||
1573 | { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, | ||||||
1574 | |||||||
1575 | { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 }, | ||||||
1576 | { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 }, | ||||||
1577 | { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 }, | ||||||
1578 | { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 }, | ||||||
1579 | { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 }, | ||||||
1580 | { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 }, | ||||||
1581 | { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 }, | ||||||
1582 | { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 }, | ||||||
1583 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 }, | ||||||
1584 | { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 }, | ||||||
1585 | |||||||
1586 | { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f64, 3 }, | ||||||
1587 | { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 }, | ||||||
1588 | { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 3 }, | ||||||
1589 | { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f32, 3 }, | ||||||
1590 | |||||||
1591 | { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 }, | ||||||
1592 | { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 }, | ||||||
1593 | { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 }, | ||||||
1594 | { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 }, | ||||||
1595 | { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 }, | ||||||
1596 | { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 }, | ||||||
1597 | }; | ||||||
1598 | |||||||
1599 | static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] { | ||||||
1600 | // Mask sign extend has an instruction. | ||||||
1601 | { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 }, | ||||||
1602 | { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 }, | ||||||
1603 | { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 }, | ||||||
1604 | { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 }, | ||||||
1605 | { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 }, | ||||||
1606 | { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 }, | ||||||
1607 | { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 }, | ||||||
1608 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, | ||||||
1609 | { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 }, | ||||||
1610 | |||||||
1611 | // Mask zero extend is a sext + shift. | ||||||
1612 | { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 }, | ||||||
1613 | { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 }, | ||||||
1614 | { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 }, | ||||||
1615 | { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 }, | ||||||
1616 | { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 }, | ||||||
1617 | { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 }, | ||||||
1618 | { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 }, | ||||||
1619 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 }, | ||||||
1620 | { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 }, | ||||||
1621 | |||||||
1622 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, | ||||||
1623 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb | ||||||
1624 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw | ||||||
1625 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb | ||||||
1626 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw | ||||||
1627 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb | ||||||
1628 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw | ||||||
1629 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb | ||||||
1630 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw | ||||||
1631 | { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb | ||||||
1632 | }; | ||||||
1633 | |||||||
1634 | static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = { | ||||||
1635 | { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, | ||||||
1636 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, | ||||||
1637 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, | ||||||
1638 | { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, | ||||||
1639 | |||||||
1640 | { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 }, | ||||||
1641 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 }, | ||||||
1642 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 }, | ||||||
1643 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 }, | ||||||
1644 | |||||||
1645 | { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 }, | ||||||
1646 | { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 }, | ||||||
1647 | { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 }, | ||||||
1648 | { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 }, | ||||||
1649 | |||||||
1650 | { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 }, | ||||||
1651 | { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 }, | ||||||
1652 | { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 }, | ||||||
1653 | { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 }, | ||||||
1654 | }; | ||||||
1655 | |||||||
1656 | static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = { | ||||||
1657 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd | ||||||
1658 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd | ||||||
1659 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd | ||||||
1660 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8 | ||||||
1661 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq | ||||||
1662 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq | ||||||
1663 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq | ||||||
1664 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16 | ||||||
1665 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd | ||||||
1666 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd | ||||||
1667 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd | ||||||
1668 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq | ||||||
1669 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq | ||||||
1670 | { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd | ||||||
1671 | |||||||
1672 | // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb | ||||||
1673 | // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb | ||||||
1674 | { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 }, | ||||||
1675 | { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 }, | ||||||
1676 | { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 }, | ||||||
1677 | { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 }, | ||||||
1678 | { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 }, | ||||||
1679 | { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 }, | ||||||
1680 | { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 }, | ||||||
1681 | { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 }, | ||||||
1682 | |||||||
1683 | // sign extend is vpcmpeq+maskedmove+vpmovdw | ||||||
1684 | // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw | ||||||
1685 | { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 }, | ||||||
1686 | { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 }, | ||||||
1687 | { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 }, | ||||||
1688 | { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 }, | ||||||
1689 | { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 }, | ||||||
1690 | { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 }, | ||||||
1691 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 }, | ||||||
1692 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 }, | ||||||
1693 | |||||||
1694 | { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd | ||||||
1695 | { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld | ||||||
1696 | { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd | ||||||
1697 | { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld | ||||||
1698 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd | ||||||
1699 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld | ||||||
1700 | { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq | ||||||
1701 | { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq | ||||||
1702 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq | ||||||
1703 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq | ||||||
1704 | |||||||
1705 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 }, | ||||||
1706 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, | ||||||
1707 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 }, | ||||||
1708 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 }, | ||||||
1709 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, | ||||||
1710 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 }, | ||||||
1711 | { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 }, | ||||||
1712 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 }, | ||||||
1713 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, | ||||||
1714 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, | ||||||
1715 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, | ||||||
1716 | { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 }, | ||||||
1717 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, | ||||||
1718 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 }, | ||||||
1719 | |||||||
1720 | { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 }, | ||||||
1721 | { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 }, | ||||||
1722 | |||||||
1723 | { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 3 }, | ||||||
1724 | { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 3 }, | ||||||
1725 | |||||||
1726 | { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 }, | ||||||
1727 | { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 }, | ||||||
1728 | |||||||
1729 | { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 }, | ||||||
1730 | { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 }, | ||||||
1731 | { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 1 }, | ||||||
1732 | { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 }, | ||||||
1733 | { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 }, | ||||||
1734 | }; | ||||||
1735 | |||||||
1736 | static const TypeConversionCostTblEntry AVX2ConversionTbl[] = { | ||||||
1737 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, | ||||||
1738 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 }, | ||||||
1739 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, | ||||||
1740 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 }, | ||||||
1741 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 1 }, | ||||||
1742 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 1 }, | ||||||
1743 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 1 }, | ||||||
1744 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 1 }, | ||||||
1745 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, | ||||||
1746 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 }, | ||||||
1747 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, | ||||||
1748 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 }, | ||||||
1749 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 1 }, | ||||||
1750 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 1 }, | ||||||
1751 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, | ||||||
1752 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 }, | ||||||
1753 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, | ||||||
1754 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 }, | ||||||
1755 | { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, | ||||||
1756 | { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 }, | ||||||
1757 | |||||||
1758 | { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, | ||||||
1759 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, | ||||||
1760 | |||||||
1761 | { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, | ||||||
1762 | { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, | ||||||
1763 | { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, | ||||||
1764 | { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 }, | ||||||
1765 | |||||||
1766 | { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 }, | ||||||
1767 | { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 }, | ||||||
1768 | |||||||
1769 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 }, | ||||||
1770 | }; | ||||||
1771 | |||||||
1772 | static const TypeConversionCostTblEntry AVXConversionTbl[] = { | ||||||
1773 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 }, | ||||||
1774 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 }, | ||||||
1775 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 }, | ||||||
1776 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 }, | ||||||
1777 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, | ||||||
1778 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, | ||||||
1779 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, | ||||||
1780 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 }, | ||||||
1781 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, | ||||||
1782 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 }, | ||||||
1783 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, | ||||||
1784 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, | ||||||
1785 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 4 }, | ||||||
1786 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, | ||||||
1787 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, | ||||||
1788 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, | ||||||
1789 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, | ||||||
1790 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 }, | ||||||
1791 | |||||||
1792 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 }, | ||||||
1793 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 }, | ||||||
1794 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 }, | ||||||
1795 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 }, | ||||||
1796 | { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 }, | ||||||
1797 | |||||||
1798 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 }, | ||||||
1799 | { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, | ||||||
1800 | { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, | ||||||
1801 | { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 }, | ||||||
1802 | { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 }, | ||||||
1803 | { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 }, | ||||||
1804 | { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 11 }, | ||||||
1805 | { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 9 }, | ||||||
1806 | { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 }, | ||||||
1807 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 11 }, | ||||||
1808 | |||||||
1809 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 }, | ||||||
1810 | { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 }, | ||||||
1811 | { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 }, | ||||||
1812 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 }, | ||||||
1813 | { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 }, | ||||||
1814 | { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 }, | ||||||
1815 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 }, | ||||||
1816 | { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 }, | ||||||
1817 | { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, | ||||||
1818 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 }, | ||||||
1819 | { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 }, | ||||||
1820 | { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 }, | ||||||
1821 | |||||||
1822 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 }, | ||||||
1823 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 }, | ||||||
1824 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 }, | ||||||
1825 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 }, | ||||||
1826 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 }, | ||||||
1827 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 }, | ||||||
1828 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 }, | ||||||
1829 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 }, | ||||||
1830 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 }, | ||||||
1831 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 }, | ||||||
1832 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 }, | ||||||
1833 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 }, | ||||||
1834 | { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 }, | ||||||
1835 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 }, | ||||||
1836 | { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 }, | ||||||
1837 | // The generic code to compute the scalar overhead is currently broken. | ||||||
1838 | // Workaround this limitation by estimating the scalarization overhead | ||||||
1839 | // here. We have roughly 10 instructions per scalar element. | ||||||
1840 | // Multiply that by the vector width. | ||||||
1841 | // FIXME: remove that when PR19268 is fixed. | ||||||
1842 | { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, | ||||||
1843 | { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 }, | ||||||
1844 | |||||||
1845 | { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 4 }, | ||||||
1846 | { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f64, 3 }, | ||||||
1847 | { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f64, 2 }, | ||||||
1848 | { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 3 }, | ||||||
1849 | |||||||
1850 | { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f64, 3 }, | ||||||
1851 | { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f64, 2 }, | ||||||
1852 | { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f32, 4 }, | ||||||
1853 | { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 3 }, | ||||||
1854 | // This node is expanded into scalarized operations but BasicTTI is overly | ||||||
1855 | // optimistic estimating its cost. It computes 3 per element (one | ||||||
1856 | // vector-extract, one scalar conversion and one vector-insert). The | ||||||
1857 | // problem is that the inserts form a read-modify-write chain so latency | ||||||
1858 | // should be factored in too. Inflating the cost per element by 1. | ||||||
1859 | { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 }, | ||||||
1860 | { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 }, | ||||||
1861 | |||||||
1862 | { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 }, | ||||||
1863 | { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 }, | ||||||
1864 | }; | ||||||
1865 | |||||||
1866 | static const TypeConversionCostTblEntry SSE41ConversionTbl[] = { | ||||||
1867 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, | ||||||
1868 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 }, | ||||||
1869 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, | ||||||
1870 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 }, | ||||||
1871 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, | ||||||
1872 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 }, | ||||||
1873 | |||||||
1874 | { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, | ||||||
1875 | { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 }, | ||||||
1876 | { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, | ||||||
1877 | { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 }, | ||||||
1878 | { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, | ||||||
1879 | { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, | ||||||
1880 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, | ||||||
1881 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 }, | ||||||
1882 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, | ||||||
1883 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 }, | ||||||
1884 | { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, | ||||||
1885 | { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 }, | ||||||
1886 | { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, | ||||||
1887 | { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, | ||||||
1888 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, | ||||||
1889 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 }, | ||||||
1890 | { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, | ||||||
1891 | { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 }, | ||||||
1892 | |||||||
1893 | // These truncates end up widening elements. | ||||||
1894 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ | ||||||
1895 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ | ||||||
1896 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD | ||||||
1897 | |||||||
1898 | { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 1 }, | ||||||
1899 | { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 1 }, | ||||||
1900 | { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 }, | ||||||
1901 | { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 }, | ||||||
1902 | { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 }, | ||||||
1903 | { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 }, | ||||||
1904 | { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 }, | ||||||
1905 | { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 }, | ||||||
1906 | { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 1 }, // PSHUFB | ||||||
1907 | |||||||
1908 | { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 }, | ||||||
1909 | { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 }, | ||||||
1910 | |||||||
1911 | { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 3 }, | ||||||
1912 | { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 3 }, | ||||||
1913 | |||||||
1914 | { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 3 }, | ||||||
1915 | { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 3 }, | ||||||
1916 | { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 }, | ||||||
1917 | }; | ||||||
1918 | |||||||
1919 | static const TypeConversionCostTblEntry SSE2ConversionTbl[] = { | ||||||
1920 | // These are somewhat magic numbers justified by looking at the output of | ||||||
1921 | // Intel's IACA, running some kernels and making sure when we take | ||||||
1922 | // legalization into account the throughput will be overestimated. | ||||||
1923 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, | ||||||
1924 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, | ||||||
1925 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, | ||||||
1926 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, | ||||||
1927 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 }, | ||||||
1928 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 2*10 }, | ||||||
1929 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2*10 }, | ||||||
1930 | { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, | ||||||
1931 | { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 }, | ||||||
1932 | |||||||
1933 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 }, | ||||||
1934 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 }, | ||||||
1935 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 }, | ||||||
1936 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 }, | ||||||
1937 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 }, | ||||||
1938 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 }, | ||||||
1939 | { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 }, | ||||||
1940 | { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 }, | ||||||
1941 | |||||||
1942 | { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 4 }, | ||||||
1943 | { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 2 }, | ||||||
1944 | { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 3 }, | ||||||
1945 | { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 }, | ||||||
1946 | { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 }, | ||||||
1947 | { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 4 }, | ||||||
1948 | |||||||
1949 | { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 1 }, | ||||||
1950 | |||||||
1951 | { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 6 }, | ||||||
1952 | { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 }, | ||||||
1953 | |||||||
1954 | { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 }, | ||||||
1955 | { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 }, | ||||||
1956 | { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 4 }, | ||||||
1957 | { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 4 }, | ||||||
1958 | { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 3 }, | ||||||
1959 | { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 2 }, | ||||||
1960 | { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 }, | ||||||
1961 | { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 4 }, | ||||||
1962 | |||||||
1963 | { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 }, | ||||||
1964 | { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 }, | ||||||
1965 | { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 }, | ||||||
1966 | { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 }, | ||||||
1967 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 }, | ||||||
1968 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 }, | ||||||
1969 | { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 }, | ||||||
1970 | { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 }, | ||||||
1971 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, | ||||||
1972 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 }, | ||||||
1973 | { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 }, | ||||||
1974 | { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 }, | ||||||
1975 | { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 }, | ||||||
1976 | { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 }, | ||||||
1977 | { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 }, | ||||||
1978 | { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 }, | ||||||
1979 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 }, | ||||||
1980 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 }, | ||||||
1981 | { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 }, | ||||||
1982 | { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 }, | ||||||
1983 | { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 }, | ||||||
1984 | { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 }, | ||||||
1985 | { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 }, | ||||||
1986 | { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 }, | ||||||
1987 | |||||||
1988 | // These truncates are really widening elements. | ||||||
1989 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD | ||||||
1990 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ | ||||||
1991 | { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD | ||||||
1992 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD | ||||||
1993 | { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD | ||||||
1994 | { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW | ||||||
1995 | |||||||
1996 | { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // PAND+PACKUSWB | ||||||
1997 | { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // PAND+PACKUSWB | ||||||
1998 | { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // PAND+PACKUSWB | ||||||
1999 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, | ||||||
2000 | { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 3 }, // PAND+2*PACKUSWB | ||||||
2001 | { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 }, | ||||||
2002 | { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 }, | ||||||
2003 | { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 }, | ||||||
2004 | { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 }, | ||||||
2005 | { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 }, | ||||||
2006 | { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 }, | ||||||
2007 | { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 }, | ||||||
2008 | { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB | ||||||
2009 | { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW | ||||||
2010 | { ISD::TRUNCATE, MVT::v2i32, MVT::v2i64, 1 }, // PSHUFD | ||||||
2011 | }; | ||||||
2012 | |||||||
2013 | std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src); | ||||||
2014 | std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst); | ||||||
2015 | |||||||
2016 | if (ST->hasSSE2() && !ST->hasAVX()) { | ||||||
2017 | if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, | ||||||
2018 | LTDest.second, LTSrc.second)) | ||||||
2019 | return AdjustCost(LTSrc.first * Entry->Cost); | ||||||
2020 | } | ||||||
2021 | |||||||
2022 | EVT SrcTy = TLI->getValueType(DL, Src); | ||||||
2023 | EVT DstTy = TLI->getValueType(DL, Dst); | ||||||
2024 | |||||||
2025 | // The function getSimpleVT only handles simple value types. | ||||||
2026 | if (!SrcTy.isSimple() || !DstTy.isSimple()) | ||||||
2027 | return AdjustCost(BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind)); | ||||||
2028 | |||||||
2029 | MVT SimpleSrcTy = SrcTy.getSimpleVT(); | ||||||
2030 | MVT SimpleDstTy = DstTy.getSimpleVT(); | ||||||
2031 | |||||||
2032 | if (ST->useAVX512Regs()) { | ||||||
2033 | if (ST->hasBWI()) | ||||||
2034 | if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD, | ||||||
2035 | SimpleDstTy, SimpleSrcTy)) | ||||||
2036 | return AdjustCost(Entry->Cost); | ||||||
2037 | |||||||
2038 | if (ST->hasDQI()) | ||||||
2039 | if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD, | ||||||
2040 | SimpleDstTy, SimpleSrcTy)) | ||||||
2041 | return AdjustCost(Entry->Cost); | ||||||
2042 | |||||||
2043 | if (ST->hasAVX512()) | ||||||
2044 | if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD, | ||||||
2045 | SimpleDstTy, SimpleSrcTy)) | ||||||
2046 | return AdjustCost(Entry->Cost); | ||||||
2047 | } | ||||||
2048 | |||||||
2049 | if (ST->hasBWI()) | ||||||
2050 | if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD, | ||||||
2051 | SimpleDstTy, SimpleSrcTy)) | ||||||
2052 | return AdjustCost(Entry->Cost); | ||||||
2053 | |||||||
2054 | if (ST->hasDQI()) | ||||||
2055 | if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD, | ||||||
2056 | SimpleDstTy, SimpleSrcTy)) | ||||||
2057 | return AdjustCost(Entry->Cost); | ||||||
2058 | |||||||
2059 | if (ST->hasAVX512()) | ||||||
2060 | if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD, | ||||||
2061 | SimpleDstTy, SimpleSrcTy)) | ||||||
2062 | return AdjustCost(Entry->Cost); | ||||||
2063 | |||||||
2064 | if (ST->hasAVX2()) { | ||||||
2065 | if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD, | ||||||
2066 | SimpleDstTy, SimpleSrcTy)) | ||||||
2067 | return AdjustCost(Entry->Cost); | ||||||
2068 | } | ||||||
2069 | |||||||
2070 | if (ST->hasAVX()) { | ||||||
2071 | if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD, | ||||||
2072 | SimpleDstTy, SimpleSrcTy)) | ||||||
2073 | return AdjustCost(Entry->Cost); | ||||||
2074 | } | ||||||
2075 | |||||||
2076 | if (ST->hasSSE41()) { | ||||||
2077 | if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD, | ||||||
2078 | SimpleDstTy, SimpleSrcTy)) | ||||||
2079 | return AdjustCost(Entry->Cost); | ||||||
2080 | } | ||||||
2081 | |||||||
2082 | if (ST->hasSSE2()) { | ||||||
2083 | if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD, | ||||||
2084 | SimpleDstTy, SimpleSrcTy)) | ||||||
2085 | return AdjustCost(Entry->Cost); | ||||||
2086 | } | ||||||
2087 | |||||||
2088 | return AdjustCost( | ||||||
2089 | BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I)); | ||||||
2090 | } | ||||||
2091 | |||||||
2092 | InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, | ||||||
2093 | Type *CondTy, | ||||||
2094 | CmpInst::Predicate VecPred, | ||||||
2095 | TTI::TargetCostKind CostKind, | ||||||
2096 | const Instruction *I) { | ||||||
2097 | // TODO: Handle other cost kinds. | ||||||
2098 | if (CostKind != TTI::TCK_RecipThroughput) | ||||||
2099 | return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, | ||||||
2100 | I); | ||||||
2101 | |||||||
2102 | // Legalize the type. | ||||||
2103 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); | ||||||
2104 | |||||||
2105 | MVT MTy = LT.second; | ||||||
2106 | |||||||
2107 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | ||||||
2108 | assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> ( 0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 2108, __PRETTY_FUNCTION__)); | ||||||
2109 | |||||||
2110 | unsigned ExtraCost = 0; | ||||||
2111 | if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) { | ||||||
2112 | // Some vector comparison predicates cost extra instructions. | ||||||
2113 | if (MTy.isVector() && | ||||||
2114 | !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) || | ||||||
2115 | (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) || | ||||||
2116 | ST->hasBWI())) { | ||||||
2117 | switch (cast<CmpInst>(I)->getPredicate()) { | ||||||
2118 | case CmpInst::Predicate::ICMP_NE: | ||||||
2119 | // xor(cmpeq(x,y),-1) | ||||||
2120 | ExtraCost = 1; | ||||||
2121 | break; | ||||||
2122 | case CmpInst::Predicate::ICMP_SGE: | ||||||
2123 | case CmpInst::Predicate::ICMP_SLE: | ||||||
2124 | // xor(cmpgt(x,y),-1) | ||||||
2125 | ExtraCost = 1; | ||||||
2126 | break; | ||||||
2127 | case CmpInst::Predicate::ICMP_ULT: | ||||||
2128 | case CmpInst::Predicate::ICMP_UGT: | ||||||
2129 | // cmpgt(xor(x,signbit),xor(y,signbit)) | ||||||
2130 | // xor(cmpeq(pmaxu(x,y),x),-1) | ||||||
2131 | ExtraCost = 2; | ||||||
2132 | break; | ||||||
2133 | case CmpInst::Predicate::ICMP_ULE: | ||||||
2134 | case CmpInst::Predicate::ICMP_UGE: | ||||||
2135 | if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) || | ||||||
2136 | (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) { | ||||||
2137 | // cmpeq(psubus(x,y),0) | ||||||
2138 | // cmpeq(pminu(x,y),x) | ||||||
2139 | ExtraCost = 1; | ||||||
2140 | } else { | ||||||
2141 | // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1) | ||||||
2142 | ExtraCost = 3; | ||||||
2143 | } | ||||||
2144 | break; | ||||||
2145 | default: | ||||||
2146 | break; | ||||||
2147 | } | ||||||
2148 | } | ||||||
2149 | } | ||||||
2150 | |||||||
2151 | static const CostTblEntry SLMCostTbl[] = { | ||||||
2152 | // slm pcmpeq/pcmpgt throughput is 2 | ||||||
2153 | { ISD::SETCC, MVT::v2i64, 2 }, | ||||||
2154 | }; | ||||||
2155 | |||||||
2156 | static const CostTblEntry AVX512BWCostTbl[] = { | ||||||
2157 | { ISD::SETCC, MVT::v32i16, 1 }, | ||||||
2158 | { ISD::SETCC, MVT::v64i8, 1 }, | ||||||
2159 | |||||||
2160 | { ISD::SELECT, MVT::v32i16, 1 }, | ||||||
2161 | { ISD::SELECT, MVT::v64i8, 1 }, | ||||||
2162 | }; | ||||||
2163 | |||||||
2164 | static const CostTblEntry AVX512CostTbl[] = { | ||||||
2165 | { ISD::SETCC, MVT::v8i64, 1 }, | ||||||
2166 | { ISD::SETCC, MVT::v16i32, 1 }, | ||||||
2167 | { ISD::SETCC, MVT::v8f64, 1 }, | ||||||
2168 | { ISD::SETCC, MVT::v16f32, 1 }, | ||||||
2169 | |||||||
2170 | { ISD::SELECT, MVT::v8i64, 1 }, | ||||||
2171 | { ISD::SELECT, MVT::v16i32, 1 }, | ||||||
2172 | { ISD::SELECT, MVT::v8f64, 1 }, | ||||||
2173 | { ISD::SELECT, MVT::v16f32, 1 }, | ||||||
2174 | |||||||
2175 | { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4 | ||||||
2176 | { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4 | ||||||
2177 | |||||||
2178 | { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3 | ||||||
2179 | { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3 | ||||||
2180 | }; | ||||||
2181 | |||||||
2182 | static const CostTblEntry AVX2CostTbl[] = { | ||||||
2183 | { ISD::SETCC, MVT::v4i64, 1 }, | ||||||
2184 | { ISD::SETCC, MVT::v8i32, 1 }, | ||||||
2185 | { ISD::SETCC, MVT::v16i16, 1 }, | ||||||
2186 | { ISD::SETCC, MVT::v32i8, 1 }, | ||||||
2187 | |||||||
2188 | { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb | ||||||
2189 | { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb | ||||||
2190 | { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb | ||||||
2191 | { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb | ||||||
2192 | }; | ||||||
2193 | |||||||
2194 | static const CostTblEntry AVX1CostTbl[] = { | ||||||
2195 | { ISD::SETCC, MVT::v4f64, 1 }, | ||||||
2196 | { ISD::SETCC, MVT::v8f32, 1 }, | ||||||
2197 | // AVX1 does not support 8-wide integer compare. | ||||||
2198 | { ISD::SETCC, MVT::v4i64, 4 }, | ||||||
2199 | { ISD::SETCC, MVT::v8i32, 4 }, | ||||||
2200 | { ISD::SETCC, MVT::v16i16, 4 }, | ||||||
2201 | { ISD::SETCC, MVT::v32i8, 4 }, | ||||||
2202 | |||||||
2203 | { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd | ||||||
2204 | { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps | ||||||
2205 | { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd | ||||||
2206 | { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps | ||||||
2207 | { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps | ||||||
2208 | { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps | ||||||
2209 | }; | ||||||
2210 | |||||||
2211 | static const CostTblEntry SSE42CostTbl[] = { | ||||||
2212 | { ISD::SETCC, MVT::v2f64, 1 }, | ||||||
2213 | { ISD::SETCC, MVT::v4f32, 1 }, | ||||||
2214 | { ISD::SETCC, MVT::v2i64, 1 }, | ||||||
2215 | }; | ||||||
2216 | |||||||
2217 | static const CostTblEntry SSE41CostTbl[] = { | ||||||
2218 | { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd | ||||||
2219 | { ISD::SELECT, MVT::v4f32, 1 }, // blendvps | ||||||
2220 | { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb | ||||||
2221 | { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb | ||||||
2222 | { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb | ||||||
2223 | { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb | ||||||
2224 | }; | ||||||
2225 | |||||||
2226 | static const CostTblEntry SSE2CostTbl[] = { | ||||||
2227 | { ISD::SETCC, MVT::v2f64, 2 }, | ||||||
2228 | { ISD::SETCC, MVT::f64, 1 }, | ||||||
2229 | { ISD::SETCC, MVT::v2i64, 8 }, | ||||||
2230 | { ISD::SETCC, MVT::v4i32, 1 }, | ||||||
2231 | { ISD::SETCC, MVT::v8i16, 1 }, | ||||||
2232 | { ISD::SETCC, MVT::v16i8, 1 }, | ||||||
2233 | |||||||
2234 | { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd | ||||||
2235 | { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por | ||||||
2236 | { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por | ||||||
2237 | { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por | ||||||
2238 | { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por | ||||||
2239 | }; | ||||||
2240 | |||||||
2241 | static const CostTblEntry SSE1CostTbl[] = { | ||||||
2242 | { ISD::SETCC, MVT::v4f32, 2 }, | ||||||
2243 | { ISD::SETCC, MVT::f32, 1 }, | ||||||
2244 | |||||||
2245 | { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps | ||||||
2246 | }; | ||||||
2247 | |||||||
2248 | if (ST->isSLM()) | ||||||
2249 | if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) | ||||||
2250 | return LT.first * (ExtraCost + Entry->Cost); | ||||||
2251 | |||||||
2252 | if (ST->hasBWI()) | ||||||
2253 | if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) | ||||||
2254 | return LT.first * (ExtraCost + Entry->Cost); | ||||||
2255 | |||||||
2256 | if (ST->hasAVX512()) | ||||||
2257 | if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) | ||||||
2258 | return LT.first * (ExtraCost + Entry->Cost); | ||||||
2259 | |||||||
2260 | if (ST->hasAVX2()) | ||||||
2261 | if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) | ||||||
2262 | return LT.first * (ExtraCost + Entry->Cost); | ||||||
2263 | |||||||
2264 | if (ST->hasAVX()) | ||||||
2265 | if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) | ||||||
2266 | return LT.first * (ExtraCost + Entry->Cost); | ||||||
2267 | |||||||
2268 | if (ST->hasSSE42()) | ||||||
2269 | if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) | ||||||
2270 | return LT.first * (ExtraCost + Entry->Cost); | ||||||
2271 | |||||||
2272 | if (ST->hasSSE41()) | ||||||
2273 | if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) | ||||||
2274 | return LT.first * (ExtraCost + Entry->Cost); | ||||||
2275 | |||||||
2276 | if (ST->hasSSE2()) | ||||||
2277 | if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) | ||||||
2278 | return LT.first * (ExtraCost + Entry->Cost); | ||||||
2279 | |||||||
2280 | if (ST->hasSSE1()) | ||||||
2281 | if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) | ||||||
2282 | return LT.first * (ExtraCost + Entry->Cost); | ||||||
2283 | |||||||
2284 | return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I); | ||||||
2285 | } | ||||||
2286 | |||||||
2287 | unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; } | ||||||
2288 | |||||||
2289 | InstructionCost | ||||||
2290 | X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, | ||||||
2291 | TTI::TargetCostKind CostKind) { | ||||||
2292 | |||||||
2293 | // Costs should match the codegen from: | ||||||
2294 | // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll | ||||||
2295 | // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll | ||||||
2296 | // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll | ||||||
2297 | // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll | ||||||
2298 | // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll | ||||||
2299 | |||||||
2300 | // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not | ||||||
2301 | // specialized in these tables yet. | ||||||
2302 | static const CostTblEntry AVX512CDCostTbl[] = { | ||||||
2303 | { ISD::CTLZ, MVT::v8i64, 1 }, | ||||||
2304 | { ISD::CTLZ, MVT::v16i32, 1 }, | ||||||
2305 | { ISD::CTLZ, MVT::v32i16, 8 }, | ||||||
2306 | { ISD::CTLZ, MVT::v64i8, 20 }, | ||||||
2307 | { ISD::CTLZ, MVT::v4i64, 1 }, | ||||||
2308 | { ISD::CTLZ, MVT::v8i32, 1 }, | ||||||
2309 | { ISD::CTLZ, MVT::v16i16, 4 }, | ||||||
2310 | { ISD::CTLZ, MVT::v32i8, 10 }, | ||||||
2311 | { ISD::CTLZ, MVT::v2i64, 1 }, | ||||||
2312 | { ISD::CTLZ, MVT::v4i32, 1 }, | ||||||
2313 | { ISD::CTLZ, MVT::v8i16, 4 }, | ||||||
2314 | { ISD::CTLZ, MVT::v16i8, 4 }, | ||||||
2315 | }; | ||||||
2316 | static const CostTblEntry AVX512BWCostTbl[] = { | ||||||
2317 | { ISD::ABS, MVT::v32i16, 1 }, | ||||||
2318 | { ISD::ABS, MVT::v64i8, 1 }, | ||||||
2319 | { ISD::BITREVERSE, MVT::v8i64, 5 }, | ||||||
2320 | { ISD::BITREVERSE, MVT::v16i32, 5 }, | ||||||
2321 | { ISD::BITREVERSE, MVT::v32i16, 5 }, | ||||||
2322 | { ISD::BITREVERSE, MVT::v64i8, 5 }, | ||||||
2323 | { ISD::CTLZ, MVT::v8i64, 23 }, | ||||||
2324 | { ISD::CTLZ, MVT::v16i32, 22 }, | ||||||
2325 | { ISD::CTLZ, MVT::v32i16, 18 }, | ||||||
2326 | { ISD::CTLZ, MVT::v64i8, 17 }, | ||||||
2327 | { ISD::CTPOP, MVT::v8i64, 7 }, | ||||||
2328 | { ISD::CTPOP, MVT::v16i32, 11 }, | ||||||
2329 | { ISD::CTPOP, MVT::v32i16, 9 }, | ||||||
2330 | { ISD::CTPOP, MVT::v64i8, 6 }, | ||||||
2331 | { ISD::CTTZ, MVT::v8i64, 10 }, | ||||||
2332 | { ISD::CTTZ, MVT::v16i32, 14 }, | ||||||
2333 | { ISD::CTTZ, MVT::v32i16, 12 }, | ||||||
2334 | { ISD::CTTZ, MVT::v64i8, 9 }, | ||||||
2335 | { ISD::SADDSAT, MVT::v32i16, 1 }, | ||||||
2336 | { ISD::SADDSAT, MVT::v64i8, 1 }, | ||||||
2337 | { ISD::SMAX, MVT::v32i16, 1 }, | ||||||
2338 | { ISD::SMAX, MVT::v64i8, 1 }, | ||||||
2339 | { ISD::SMIN, MVT::v32i16, 1 }, | ||||||
2340 | { ISD::SMIN, MVT::v64i8, 1 }, | ||||||
2341 | { ISD::SSUBSAT, MVT::v32i16, 1 }, | ||||||
2342 | { ISD::SSUBSAT, MVT::v64i8, 1 }, | ||||||
2343 | { ISD::UADDSAT, MVT::v32i16, 1 }, | ||||||
2344 | { ISD::UADDSAT, MVT::v64i8, 1 }, | ||||||
2345 | { ISD::UMAX, MVT::v32i16, 1 }, | ||||||
2346 | { ISD::UMAX, MVT::v64i8, 1 }, | ||||||
2347 | { ISD::UMIN, MVT::v32i16, 1 }, | ||||||
2348 | { ISD::UMIN, MVT::v64i8, 1 }, | ||||||
2349 | { ISD::USUBSAT, MVT::v32i16, 1 }, | ||||||
2350 | { ISD::USUBSAT, MVT::v64i8, 1 }, | ||||||
2351 | }; | ||||||
2352 | static const CostTblEntry AVX512CostTbl[] = { | ||||||
2353 | { ISD::ABS, MVT::v8i64, 1 }, | ||||||
2354 | { ISD::ABS, MVT::v16i32, 1 }, | ||||||
2355 | { ISD::ABS, MVT::v32i16, 2 }, // FIXME: include split | ||||||
2356 | { ISD::ABS, MVT::v64i8, 2 }, // FIXME: include split | ||||||
2357 | { ISD::ABS, MVT::v4i64, 1 }, | ||||||
2358 | { ISD::ABS, MVT::v2i64, 1 }, | ||||||
2359 | { ISD::BITREVERSE, MVT::v8i64, 36 }, | ||||||
2360 | { ISD::BITREVERSE, MVT::v16i32, 24 }, | ||||||
2361 | { ISD::BITREVERSE, MVT::v32i16, 10 }, | ||||||
2362 | { ISD::BITREVERSE, MVT::v64i8, 10 }, | ||||||
2363 | { ISD::CTLZ, MVT::v8i64, 29 }, | ||||||
2364 | { ISD::CTLZ, MVT::v16i32, 35 }, | ||||||
2365 | { ISD::CTLZ, MVT::v32i16, 28 }, | ||||||
2366 | { ISD::CTLZ, MVT::v64i8, 18 }, | ||||||
2367 | { ISD::CTPOP, MVT::v8i64, 16 }, | ||||||
2368 | { ISD::CTPOP, MVT::v16i32, 24 }, | ||||||
2369 | { ISD::CTPOP, MVT::v32i16, 18 }, | ||||||
2370 | { ISD::CTPOP, MVT::v64i8, 12 }, | ||||||
2371 | { ISD::CTTZ, MVT::v8i64, 20 }, | ||||||
2372 | { ISD::CTTZ, MVT::v16i32, 28 }, | ||||||
2373 | { ISD::CTTZ, MVT::v32i16, 24 }, | ||||||
2374 | { ISD::CTTZ, MVT::v64i8, 18 }, | ||||||
2375 | { ISD::SMAX, MVT::v8i64, 1 }, | ||||||
2376 | { ISD::SMAX, MVT::v16i32, 1 }, | ||||||
2377 | { ISD::SMAX, MVT::v32i16, 2 }, // FIXME: include split | ||||||
2378 | { ISD::SMAX, MVT::v64i8, 2 }, // FIXME: include split | ||||||
2379 | { ISD::SMAX, MVT::v4i64, 1 }, | ||||||
2380 | { ISD::SMAX, MVT::v2i64, 1 }, | ||||||
2381 | { ISD::SMIN, MVT::v8i64, 1 }, | ||||||
2382 | { ISD::SMIN, MVT::v16i32, 1 }, | ||||||
2383 | { ISD::SMIN, MVT::v32i16, 2 }, // FIXME: include split | ||||||
2384 | { ISD::SMIN, MVT::v64i8, 2 }, // FIXME: include split | ||||||
2385 | { ISD::SMIN, MVT::v4i64, 1 }, | ||||||
2386 | { ISD::SMIN, MVT::v2i64, 1 }, | ||||||
2387 | { ISD::UMAX, MVT::v8i64, 1 }, | ||||||
2388 | { ISD::UMAX, MVT::v16i32, 1 }, | ||||||
2389 | { ISD::UMAX, MVT::v32i16, 2 }, // FIXME: include split | ||||||
2390 | { ISD::UMAX, MVT::v64i8, 2 }, // FIXME: include split | ||||||
2391 | { ISD::UMAX, MVT::v4i64, 1 }, | ||||||
2392 | { ISD::UMAX, MVT::v2i64, 1 }, | ||||||
2393 | { ISD::UMIN, MVT::v8i64, 1 }, | ||||||
2394 | { ISD::UMIN, MVT::v16i32, 1 }, | ||||||
2395 | { ISD::UMIN, MVT::v32i16, 2 }, // FIXME: include split | ||||||
2396 | { ISD::UMIN, MVT::v64i8, 2 }, // FIXME: include split | ||||||
2397 | { ISD::UMIN, MVT::v4i64, 1 }, | ||||||
2398 | { ISD::UMIN, MVT::v2i64, 1 }, | ||||||
2399 | { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd | ||||||
2400 | { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq | ||||||
2401 | { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq | ||||||
2402 | { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq | ||||||
2403 | { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd | ||||||
2404 | { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq | ||||||
2405 | { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq | ||||||
2406 | { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq | ||||||
2407 | { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split | ||||||
2408 | { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split | ||||||
2409 | { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split | ||||||
2410 | { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split | ||||||
2411 | { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split | ||||||
2412 | { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split | ||||||
2413 | { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split | ||||||
2414 | { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split | ||||||
2415 | { ISD::FMAXNUM, MVT::f32, 2 }, | ||||||
2416 | { ISD::FMAXNUM, MVT::v4f32, 2 }, | ||||||
2417 | { ISD::FMAXNUM, MVT::v8f32, 2 }, | ||||||
2418 | { ISD::FMAXNUM, MVT::v16f32, 2 }, | ||||||
2419 | { ISD::FMAXNUM, MVT::f64, 2 }, | ||||||
2420 | { ISD::FMAXNUM, MVT::v2f64, 2 }, | ||||||
2421 | { ISD::FMAXNUM, MVT::v4f64, 2 }, | ||||||
2422 | { ISD::FMAXNUM, MVT::v8f64, 2 }, | ||||||
2423 | }; | ||||||
2424 | static const CostTblEntry XOPCostTbl[] = { | ||||||
2425 | { ISD::BITREVERSE, MVT::v4i64, 4 }, | ||||||
2426 | { ISD::BITREVERSE, MVT::v8i32, 4 }, | ||||||
2427 | { ISD::BITREVERSE, MVT::v16i16, 4 }, | ||||||
2428 | { ISD::BITREVERSE, MVT::v32i8, 4 }, | ||||||
2429 | { ISD::BITREVERSE, MVT::v2i64, 1 }, | ||||||
2430 | { ISD::BITREVERSE, MVT::v4i32, 1 }, | ||||||
2431 | { ISD::BITREVERSE, MVT::v8i16, 1 }, | ||||||
2432 | { ISD::BITREVERSE, MVT::v16i8, 1 }, | ||||||
2433 | { ISD::BITREVERSE, MVT::i64, 3 }, | ||||||
2434 | { ISD::BITREVERSE, MVT::i32, 3 }, | ||||||
2435 | { ISD::BITREVERSE, MVT::i16, 3 }, | ||||||
2436 | { ISD::BITREVERSE, MVT::i8, 3 } | ||||||
2437 | }; | ||||||
2438 | static const CostTblEntry AVX2CostTbl[] = { | ||||||
2439 | { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X) | ||||||
2440 | { ISD::ABS, MVT::v8i32, 1 }, | ||||||
2441 | { ISD::ABS, MVT::v16i16, 1 }, | ||||||
2442 | { ISD::ABS, MVT::v32i8, 1 }, | ||||||
2443 | { ISD::BITREVERSE, MVT::v4i64, 5 }, | ||||||
2444 | { ISD::BITREVERSE, MVT::v8i32, 5 }, | ||||||
2445 | { ISD::BITREVERSE, MVT::v16i16, 5 }, | ||||||
2446 | { ISD::BITREVERSE, MVT::v32i8, 5 }, | ||||||
2447 | { ISD::BSWAP, MVT::v4i64, 1 }, | ||||||
2448 | { ISD::BSWAP, MVT::v8i32, 1 }, | ||||||
2449 | { ISD::BSWAP, MVT::v16i16, 1 }, | ||||||
2450 | { ISD::CTLZ, MVT::v4i64, 23 }, | ||||||
2451 | { ISD::CTLZ, MVT::v8i32, 18 }, | ||||||
2452 | { ISD::CTLZ, MVT::v16i16, 14 }, | ||||||
2453 | { ISD::CTLZ, MVT::v32i8, 9 }, | ||||||
2454 | { ISD::CTPOP, MVT::v4i64, 7 }, | ||||||
2455 | { ISD::CTPOP, MVT::v8i32, 11 }, | ||||||
2456 | { ISD::CTPOP, MVT::v16i16, 9 }, | ||||||
2457 | { ISD::CTPOP, MVT::v32i8, 6 }, | ||||||
2458 | { ISD::CTTZ, MVT::v4i64, 10 }, | ||||||
2459 | { ISD::CTTZ, MVT::v8i32, 14 }, | ||||||
2460 | { ISD::CTTZ, MVT::v16i16, 12 }, | ||||||
2461 | { ISD::CTTZ, MVT::v32i8, 9 }, | ||||||
2462 | { ISD::SADDSAT, MVT::v16i16, 1 }, | ||||||
2463 | { ISD::SADDSAT, MVT::v32i8, 1 }, | ||||||
2464 | { ISD::SMAX, MVT::v8i32, 1 }, | ||||||
2465 | { ISD::SMAX, MVT::v16i16, 1 }, | ||||||
2466 | { ISD::SMAX, MVT::v32i8, 1 }, | ||||||
2467 | { ISD::SMIN, MVT::v8i32, 1 }, | ||||||
2468 | { ISD::SMIN, MVT::v16i16, 1 }, | ||||||
2469 | { ISD::SMIN, MVT::v32i8, 1 }, | ||||||
2470 | { ISD::SSUBSAT, MVT::v16i16, 1 }, | ||||||
2471 | { ISD::SSUBSAT, MVT::v32i8, 1 }, | ||||||
2472 | { ISD::UADDSAT, MVT::v16i16, 1 }, | ||||||
2473 | { ISD::UADDSAT, MVT::v32i8, 1 }, | ||||||
2474 | { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd | ||||||
2475 | { ISD::UMAX, MVT::v8i32, 1 }, | ||||||
2476 | { ISD::UMAX, MVT::v16i16, 1 }, | ||||||
2477 | { ISD::UMAX, MVT::v32i8, 1 }, | ||||||
2478 | { ISD::UMIN, MVT::v8i32, 1 }, | ||||||
2479 | { ISD::UMIN, MVT::v16i16, 1 }, | ||||||
2480 | { ISD::UMIN, MVT::v32i8, 1 }, | ||||||
2481 | { ISD::USUBSAT, MVT::v16i16, 1 }, | ||||||
2482 | { ISD::USUBSAT, MVT::v32i8, 1 }, | ||||||
2483 | { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd | ||||||
2484 | { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS | ||||||
2485 | { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD | ||||||
2486 | { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/ | ||||||
2487 | { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/ | ||||||
2488 | { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/ | ||||||
2489 | { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/ | ||||||
2490 | { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/ | ||||||
2491 | { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/ | ||||||
2492 | }; | ||||||
2493 | static const CostTblEntry AVX1CostTbl[] = { | ||||||
2494 | { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X) | ||||||
2495 | { ISD::ABS, MVT::v8i32, 3 }, | ||||||
2496 | { ISD::ABS, MVT::v16i16, 3 }, | ||||||
2497 | { ISD::ABS, MVT::v32i8, 3 }, | ||||||
2498 | { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert | ||||||
2499 | { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert | ||||||
2500 | { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert | ||||||
2501 | { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert | ||||||
2502 | { ISD::BSWAP, MVT::v4i64, 4 }, | ||||||
2503 | { ISD::BSWAP, MVT::v8i32, 4 }, | ||||||
2504 | { ISD::BSWAP, MVT::v16i16, 4 }, | ||||||
2505 | { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert | ||||||
2506 | { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert | ||||||
2507 | { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert | ||||||
2508 | { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert | ||||||
2509 | { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert | ||||||
2510 | { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert | ||||||
2511 | { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert | ||||||
2512 | { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert | ||||||
2513 | { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert | ||||||
2514 | { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert | ||||||
2515 | { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert | ||||||
2516 | { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert | ||||||
2517 | { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2518 | { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2519 | { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2520 | { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2521 | { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2522 | { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2523 | { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2524 | { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2525 | { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2526 | { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2527 | { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2528 | { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2529 | { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert | ||||||
2530 | { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2531 | { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2532 | { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2533 | { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2534 | { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2535 | { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2536 | { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2537 | { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert | ||||||
2538 | { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert | ||||||
2539 | { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS | ||||||
2540 | { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS | ||||||
2541 | { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ? | ||||||
2542 | { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD | ||||||
2543 | { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD | ||||||
2544 | { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ? | ||||||
2545 | { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/ | ||||||
2546 | { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/ | ||||||
2547 | { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/ | ||||||
2548 | { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/ | ||||||
2549 | { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/ | ||||||
2550 | { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/ | ||||||
2551 | }; | ||||||
2552 | static const CostTblEntry GLMCostTbl[] = { | ||||||
2553 | { ISD::FSQRT, MVT::f32, 19 }, // sqrtss | ||||||
2554 | { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps | ||||||
2555 | { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd | ||||||
2556 | { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd | ||||||
2557 | }; | ||||||
2558 | static const CostTblEntry SLMCostTbl[] = { | ||||||
2559 | { ISD::FSQRT, MVT::f32, 20 }, // sqrtss | ||||||
2560 | { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps | ||||||
2561 | { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd | ||||||
2562 | { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd | ||||||
2563 | }; | ||||||
2564 | static const CostTblEntry SSE42CostTbl[] = { | ||||||
2565 | { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd | ||||||
2566 | { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd | ||||||
2567 | { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/ | ||||||
2568 | { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/ | ||||||
2569 | }; | ||||||
2570 | static const CostTblEntry SSE41CostTbl[] = { | ||||||
2571 | { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X) | ||||||
2572 | { ISD::SMAX, MVT::v4i32, 1 }, | ||||||
2573 | { ISD::SMAX, MVT::v16i8, 1 }, | ||||||
2574 | { ISD::SMIN, MVT::v4i32, 1 }, | ||||||
2575 | { ISD::SMIN, MVT::v16i8, 1 }, | ||||||
2576 | { ISD::UMAX, MVT::v4i32, 1 }, | ||||||
2577 | { ISD::UMAX, MVT::v8i16, 1 }, | ||||||
2578 | { ISD::UMIN, MVT::v4i32, 1 }, | ||||||
2579 | { ISD::UMIN, MVT::v8i16, 1 }, | ||||||
2580 | }; | ||||||
2581 | static const CostTblEntry SSSE3CostTbl[] = { | ||||||
2582 | { ISD::ABS, MVT::v4i32, 1 }, | ||||||
2583 | { ISD::ABS, MVT::v8i16, 1 }, | ||||||
2584 | { ISD::ABS, MVT::v16i8, 1 }, | ||||||
2585 | { ISD::BITREVERSE, MVT::v2i64, 5 }, | ||||||
2586 | { ISD::BITREVERSE, MVT::v4i32, 5 }, | ||||||
2587 | { ISD::BITREVERSE, MVT::v8i16, 5 }, | ||||||
2588 | { ISD::BITREVERSE, MVT::v16i8, 5 }, | ||||||
2589 | { ISD::BSWAP, MVT::v2i64, 1 }, | ||||||
2590 | { ISD::BSWAP, MVT::v4i32, 1 }, | ||||||
2591 | { ISD::BSWAP, MVT::v8i16, 1 }, | ||||||
2592 | { ISD::CTLZ, MVT::v2i64, 23 }, | ||||||
2593 | { ISD::CTLZ, MVT::v4i32, 18 }, | ||||||
2594 | { ISD::CTLZ, MVT::v8i16, 14 }, | ||||||
2595 | { ISD::CTLZ, MVT::v16i8, 9 }, | ||||||
2596 | { ISD::CTPOP, MVT::v2i64, 7 }, | ||||||
2597 | { ISD::CTPOP, MVT::v4i32, 11 }, | ||||||
2598 | { ISD::CTPOP, MVT::v8i16, 9 }, | ||||||
2599 | { ISD::CTPOP, MVT::v16i8, 6 }, | ||||||
2600 | { ISD::CTTZ, MVT::v2i64, 10 }, | ||||||
2601 | { ISD::CTTZ, MVT::v4i32, 14 }, | ||||||
2602 | { ISD::CTTZ, MVT::v8i16, 12 }, | ||||||
2603 | { ISD::CTTZ, MVT::v16i8, 9 } | ||||||
2604 | }; | ||||||
2605 | static const CostTblEntry SSE2CostTbl[] = { | ||||||
2606 | { ISD::ABS, MVT::v2i64, 4 }, | ||||||
2607 | { ISD::ABS, MVT::v4i32, 3 }, | ||||||
2608 | { ISD::ABS, MVT::v8i16, 2 }, | ||||||
2609 | { ISD::ABS, MVT::v16i8, 2 }, | ||||||
2610 | { ISD::BITREVERSE, MVT::v2i64, 29 }, | ||||||
2611 | { ISD::BITREVERSE, MVT::v4i32, 27 }, | ||||||
2612 | { ISD::BITREVERSE, MVT::v8i16, 27 }, | ||||||
2613 | { ISD::BITREVERSE, MVT::v16i8, 20 }, | ||||||
2614 | { ISD::BSWAP, MVT::v2i64, 7 }, | ||||||
2615 | { ISD::BSWAP, MVT::v4i32, 7 }, | ||||||
2616 | { ISD::BSWAP, MVT::v8i16, 7 }, | ||||||
2617 | { ISD::CTLZ, MVT::v2i64, 25 }, | ||||||
2618 | { ISD::CTLZ, MVT::v4i32, 26 }, | ||||||
2619 | { ISD::CTLZ, MVT::v8i16, 20 }, | ||||||
2620 | { ISD::CTLZ, MVT::v16i8, 17 }, | ||||||
2621 | { ISD::CTPOP, MVT::v2i64, 12 }, | ||||||
2622 | { ISD::CTPOP, MVT::v4i32, 15 }, | ||||||
2623 | { ISD::CTPOP, MVT::v8i16, 13 }, | ||||||
2624 | { ISD::CTPOP, MVT::v16i8, 10 }, | ||||||
2625 | { ISD::CTTZ, MVT::v2i64, 14 }, | ||||||
2626 | { ISD::CTTZ, MVT::v4i32, 18 }, | ||||||
2627 | { ISD::CTTZ, MVT::v8i16, 16 }, | ||||||
2628 | { ISD::CTTZ, MVT::v16i8, 13 }, | ||||||
2629 | { ISD::SADDSAT, MVT::v8i16, 1 }, | ||||||
2630 | { ISD::SADDSAT, MVT::v16i8, 1 }, | ||||||
2631 | { ISD::SMAX, MVT::v8i16, 1 }, | ||||||
2632 | { ISD::SMIN, MVT::v8i16, 1 }, | ||||||
2633 | { ISD::SSUBSAT, MVT::v8i16, 1 }, | ||||||
2634 | { ISD::SSUBSAT, MVT::v16i8, 1 }, | ||||||
2635 | { ISD::UADDSAT, MVT::v8i16, 1 }, | ||||||
2636 | { ISD::UADDSAT, MVT::v16i8, 1 }, | ||||||
2637 | { ISD::UMAX, MVT::v8i16, 2 }, | ||||||
2638 | { ISD::UMAX, MVT::v16i8, 1 }, | ||||||
2639 | { ISD::UMIN, MVT::v8i16, 2 }, | ||||||
2640 | { ISD::UMIN, MVT::v16i8, 1 }, | ||||||
2641 | { ISD::USUBSAT, MVT::v8i16, 1 }, | ||||||
2642 | { ISD::USUBSAT, MVT::v16i8, 1 }, | ||||||
2643 | { ISD::FMAXNUM, MVT::f64, 4 }, | ||||||
2644 | { ISD::FMAXNUM, MVT::v2f64, 4 }, | ||||||
2645 | { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/ | ||||||
2646 | { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/ | ||||||
2647 | }; | ||||||
2648 | static const CostTblEntry SSE1CostTbl[] = { | ||||||
2649 | { ISD::FMAXNUM, MVT::f32, 4 }, | ||||||
2650 | { ISD::FMAXNUM, MVT::v4f32, 4 }, | ||||||
2651 | { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/ | ||||||
2652 | { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/ | ||||||
2653 | }; | ||||||
2654 | static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets | ||||||
2655 | { ISD::CTTZ, MVT::i64, 1 }, | ||||||
2656 | }; | ||||||
2657 | static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets | ||||||
2658 | { ISD::CTTZ, MVT::i32, 1 }, | ||||||
2659 | { ISD::CTTZ, MVT::i16, 1 }, | ||||||
2660 | { ISD::CTTZ, MVT::i8, 1 }, | ||||||
2661 | }; | ||||||
2662 | static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets | ||||||
2663 | { ISD::CTLZ, MVT::i64, 1 }, | ||||||
2664 | }; | ||||||
2665 | static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets | ||||||
2666 | { ISD::CTLZ, MVT::i32, 1 }, | ||||||
2667 | { ISD::CTLZ, MVT::i16, 1 }, | ||||||
2668 | { ISD::CTLZ, MVT::i8, 1 }, | ||||||
2669 | }; | ||||||
2670 | static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets | ||||||
2671 | { ISD::CTPOP, MVT::i64, 1 }, | ||||||
2672 | }; | ||||||
2673 | static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets | ||||||
2674 | { ISD::CTPOP, MVT::i32, 1 }, | ||||||
2675 | { ISD::CTPOP, MVT::i16, 1 }, | ||||||
2676 | { ISD::CTPOP, MVT::i8, 1 }, | ||||||
2677 | }; | ||||||
2678 | static const CostTblEntry X64CostTbl[] = { // 64-bit targets | ||||||
2679 | { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV | ||||||
2680 | { ISD::BITREVERSE, MVT::i64, 14 }, | ||||||
2681 | { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV | ||||||
2682 | { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH | ||||||
2683 | { ISD::CTPOP, MVT::i64, 10 }, | ||||||
2684 | { ISD::SADDO, MVT::i64, 1 }, | ||||||
2685 | { ISD::UADDO, MVT::i64, 1 }, | ||||||
2686 | { ISD::UMULO, MVT::i64, 2 }, // mulq + seto | ||||||
2687 | }; | ||||||
2688 | static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets | ||||||
2689 | { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV | ||||||
2690 | { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV | ||||||
2691 | { ISD::BITREVERSE, MVT::i32, 14 }, | ||||||
2692 | { ISD::BITREVERSE, MVT::i16, 14 }, | ||||||
2693 | { ISD::BITREVERSE, MVT::i8, 11 }, | ||||||
2694 | { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV | ||||||
2695 | { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV | ||||||
2696 | { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV | ||||||
2697 | { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH | ||||||
2698 | { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH | ||||||
2699 | { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH | ||||||
2700 | { ISD::CTPOP, MVT::i32, 8 }, | ||||||
2701 | { ISD::CTPOP, MVT::i16, 9 }, | ||||||
2702 | { ISD::CTPOP, MVT::i8, 7 }, | ||||||
2703 | { ISD::SADDO, MVT::i32, 1 }, | ||||||
2704 | { ISD::SADDO, MVT::i16, 1 }, | ||||||
2705 | { ISD::SADDO, MVT::i8, 1 }, | ||||||
2706 | { ISD::UADDO, MVT::i32, 1 }, | ||||||
2707 | { ISD::UADDO, MVT::i16, 1 }, | ||||||
2708 | { ISD::UADDO, MVT::i8, 1 }, | ||||||
2709 | { ISD::UMULO, MVT::i32, 2 }, // mul + seto | ||||||
2710 | { ISD::UMULO, MVT::i16, 2 }, | ||||||
2711 | { ISD::UMULO, MVT::i8, 2 }, | ||||||
2712 | }; | ||||||
2713 | |||||||
2714 | Type *RetTy = ICA.getReturnType(); | ||||||
2715 | Type *OpTy = RetTy; | ||||||
2716 | Intrinsic::ID IID = ICA.getID(); | ||||||
2717 | unsigned ISD = ISD::DELETED_NODE; | ||||||
2718 | switch (IID) { | ||||||
2719 | default: | ||||||
2720 | break; | ||||||
2721 | case Intrinsic::abs: | ||||||
2722 | ISD = ISD::ABS; | ||||||
2723 | break; | ||||||
2724 | case Intrinsic::bitreverse: | ||||||
2725 | ISD = ISD::BITREVERSE; | ||||||
2726 | break; | ||||||
2727 | case Intrinsic::bswap: | ||||||
2728 | ISD = ISD::BSWAP; | ||||||
2729 | break; | ||||||
2730 | case Intrinsic::ctlz: | ||||||
2731 | ISD = ISD::CTLZ; | ||||||
2732 | break; | ||||||
2733 | case Intrinsic::ctpop: | ||||||
2734 | ISD = ISD::CTPOP; | ||||||
2735 | break; | ||||||
2736 | case Intrinsic::cttz: | ||||||
2737 | ISD = ISD::CTTZ; | ||||||
2738 | break; | ||||||
2739 | case Intrinsic::maxnum: | ||||||
2740 | case Intrinsic::minnum: | ||||||
2741 | // FMINNUM has same costs so don't duplicate. | ||||||
2742 | ISD = ISD::FMAXNUM; | ||||||
2743 | break; | ||||||
2744 | case Intrinsic::sadd_sat: | ||||||
2745 | ISD = ISD::SADDSAT; | ||||||
2746 | break; | ||||||
2747 | case Intrinsic::smax: | ||||||
2748 | ISD = ISD::SMAX; | ||||||
2749 | break; | ||||||
2750 | case Intrinsic::smin: | ||||||
2751 | ISD = ISD::SMIN; | ||||||
2752 | break; | ||||||
2753 | case Intrinsic::ssub_sat: | ||||||
2754 | ISD = ISD::SSUBSAT; | ||||||
2755 | break; | ||||||
2756 | case Intrinsic::uadd_sat: | ||||||
2757 | ISD = ISD::UADDSAT; | ||||||
2758 | break; | ||||||
2759 | case Intrinsic::umax: | ||||||
2760 | ISD = ISD::UMAX; | ||||||
2761 | break; | ||||||
2762 | case Intrinsic::umin: | ||||||
2763 | ISD = ISD::UMIN; | ||||||
2764 | break; | ||||||
2765 | case Intrinsic::usub_sat: | ||||||
2766 | ISD = ISD::USUBSAT; | ||||||
2767 | break; | ||||||
2768 | case Intrinsic::sqrt: | ||||||
2769 | ISD = ISD::FSQRT; | ||||||
2770 | break; | ||||||
2771 | case Intrinsic::sadd_with_overflow: | ||||||
2772 | case Intrinsic::ssub_with_overflow: | ||||||
2773 | // SSUBO has same costs so don't duplicate. | ||||||
2774 | ISD = ISD::SADDO; | ||||||
2775 | OpTy = RetTy->getContainedType(0); | ||||||
2776 | break; | ||||||
2777 | case Intrinsic::uadd_with_overflow: | ||||||
2778 | case Intrinsic::usub_with_overflow: | ||||||
2779 | // USUBO has same costs so don't duplicate. | ||||||
2780 | ISD = ISD::UADDO; | ||||||
2781 | OpTy = RetTy->getContainedType(0); | ||||||
2782 | break; | ||||||
2783 | case Intrinsic::umul_with_overflow: | ||||||
2784 | case Intrinsic::smul_with_overflow: | ||||||
2785 | // SMULO has same costs so don't duplicate. | ||||||
2786 | ISD = ISD::UMULO; | ||||||
2787 | OpTy = RetTy->getContainedType(0); | ||||||
2788 | break; | ||||||
2789 | } | ||||||
2790 | |||||||
2791 | if (ISD != ISD::DELETED_NODE) { | ||||||
2792 | // Legalize the type. | ||||||
2793 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy); | ||||||
2794 | MVT MTy = LT.second; | ||||||
2795 | |||||||
2796 | // Attempt to lookup cost. | ||||||
2797 | if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() && | ||||||
2798 | MTy.isVector()) { | ||||||
2799 | // With PSHUFB the code is very similar for all types. If we have integer | ||||||
2800 | // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types | ||||||
2801 | // we also need a PSHUFB. | ||||||
2802 | unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2; | ||||||
2803 | |||||||
2804 | // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB | ||||||
2805 | // instructions. We also need an extract and an insert. | ||||||
2806 | if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) || | ||||||
2807 | (ST->hasBWI() && MTy.is512BitVector()))) | ||||||
2808 | Cost = Cost * 2 + 2; | ||||||
2809 | |||||||
2810 | return LT.first * Cost; | ||||||
2811 | } | ||||||
2812 | |||||||
2813 | auto adjustTableCost = [](const CostTblEntry &Entry, int LegalizationCost, | ||||||
2814 | FastMathFlags FMF) { | ||||||
2815 | // If there are no NANs to deal with, then these are reduced to a | ||||||
2816 | // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we | ||||||
2817 | // assume is used in the non-fast case. | ||||||
2818 | if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) { | ||||||
2819 | if (FMF.noNaNs()) | ||||||
2820 | return LegalizationCost * 1; | ||||||
2821 | } | ||||||
2822 | return LegalizationCost * (int)Entry.Cost; | ||||||
2823 | }; | ||||||
2824 | |||||||
2825 | if (ST->useGLMDivSqrtCosts()) | ||||||
2826 | if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy)) | ||||||
2827 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2828 | |||||||
2829 | if (ST->isSLM()) | ||||||
2830 | if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy)) | ||||||
2831 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2832 | |||||||
2833 | if (ST->hasCDI()) | ||||||
2834 | if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy)) | ||||||
2835 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2836 | |||||||
2837 | if (ST->hasBWI()) | ||||||
2838 | if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) | ||||||
2839 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2840 | |||||||
2841 | if (ST->hasAVX512()) | ||||||
2842 | if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) | ||||||
2843 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2844 | |||||||
2845 | if (ST->hasXOP()) | ||||||
2846 | if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) | ||||||
2847 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2848 | |||||||
2849 | if (ST->hasAVX2()) | ||||||
2850 | if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) | ||||||
2851 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2852 | |||||||
2853 | if (ST->hasAVX()) | ||||||
2854 | if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) | ||||||
2855 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2856 | |||||||
2857 | if (ST->hasSSE42()) | ||||||
2858 | if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) | ||||||
2859 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2860 | |||||||
2861 | if (ST->hasSSE41()) | ||||||
2862 | if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) | ||||||
2863 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2864 | |||||||
2865 | if (ST->hasSSSE3()) | ||||||
2866 | if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy)) | ||||||
2867 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2868 | |||||||
2869 | if (ST->hasSSE2()) | ||||||
2870 | if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) | ||||||
2871 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2872 | |||||||
2873 | if (ST->hasSSE1()) | ||||||
2874 | if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) | ||||||
2875 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2876 | |||||||
2877 | if (ST->hasBMI()) { | ||||||
2878 | if (ST->is64Bit()) | ||||||
2879 | if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy)) | ||||||
2880 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2881 | |||||||
2882 | if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy)) | ||||||
2883 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2884 | } | ||||||
2885 | |||||||
2886 | if (ST->hasLZCNT()) { | ||||||
2887 | if (ST->is64Bit()) | ||||||
2888 | if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy)) | ||||||
2889 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2890 | |||||||
2891 | if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy)) | ||||||
2892 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2893 | } | ||||||
2894 | |||||||
2895 | if (ST->hasPOPCNT()) { | ||||||
2896 | if (ST->is64Bit()) | ||||||
2897 | if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy)) | ||||||
2898 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2899 | |||||||
2900 | if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy)) | ||||||
2901 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2902 | } | ||||||
2903 | |||||||
2904 | // TODO - add BMI (TZCNT) scalar handling | ||||||
2905 | |||||||
2906 | if (ST->is64Bit()) | ||||||
2907 | if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) | ||||||
2908 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2909 | |||||||
2910 | if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) | ||||||
2911 | return adjustTableCost(*Entry, LT.first, ICA.getFlags()); | ||||||
2912 | } | ||||||
2913 | |||||||
2914 | return BaseT::getIntrinsicInstrCost(ICA, CostKind); | ||||||
2915 | } | ||||||
2916 | |||||||
2917 | InstructionCost | ||||||
2918 | X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA, | ||||||
2919 | TTI::TargetCostKind CostKind) { | ||||||
2920 | if (ICA.isTypeBasedOnly()) | ||||||
2921 | return getTypeBasedIntrinsicInstrCost(ICA, CostKind); | ||||||
2922 | |||||||
2923 | static const CostTblEntry AVX512CostTbl[] = { | ||||||
2924 | { ISD::ROTL, MVT::v8i64, 1 }, | ||||||
2925 | { ISD::ROTL, MVT::v4i64, 1 }, | ||||||
2926 | { ISD::ROTL, MVT::v2i64, 1 }, | ||||||
2927 | { ISD::ROTL, MVT::v16i32, 1 }, | ||||||
2928 | { ISD::ROTL, MVT::v8i32, 1 }, | ||||||
2929 | { ISD::ROTL, MVT::v4i32, 1 }, | ||||||
2930 | { ISD::ROTR, MVT::v8i64, 1 }, | ||||||
2931 | { ISD::ROTR, MVT::v4i64, 1 }, | ||||||
2932 | { ISD::ROTR, MVT::v2i64, 1 }, | ||||||
2933 | { ISD::ROTR, MVT::v16i32, 1 }, | ||||||
2934 | { ISD::ROTR, MVT::v8i32, 1 }, | ||||||
2935 | { ISD::ROTR, MVT::v4i32, 1 } | ||||||
2936 | }; | ||||||
2937 | // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y)) | ||||||
2938 | static const CostTblEntry XOPCostTbl[] = { | ||||||
2939 | { ISD::ROTL, MVT::v4i64, 4 }, | ||||||
2940 | { ISD::ROTL, MVT::v8i32, 4 }, | ||||||
2941 | { ISD::ROTL, MVT::v16i16, 4 }, | ||||||
2942 | { ISD::ROTL, MVT::v32i8, 4 }, | ||||||
2943 | { ISD::ROTL, MVT::v2i64, 1 }, | ||||||
2944 | { ISD::ROTL, MVT::v4i32, 1 }, | ||||||
2945 | { ISD::ROTL, MVT::v8i16, 1 }, | ||||||
2946 | { ISD::ROTL, MVT::v16i8, 1 }, | ||||||
2947 | { ISD::ROTR, MVT::v4i64, 6 }, | ||||||
2948 | { ISD::ROTR, MVT::v8i32, 6 }, | ||||||
2949 | { ISD::ROTR, MVT::v16i16, 6 }, | ||||||
2950 | { ISD::ROTR, MVT::v32i8, 6 }, | ||||||
2951 | { ISD::ROTR, MVT::v2i64, 2 }, | ||||||
2952 | { ISD::ROTR, MVT::v4i32, 2 }, | ||||||
2953 | { ISD::ROTR, MVT::v8i16, 2 }, | ||||||
2954 | { ISD::ROTR, MVT::v16i8, 2 } | ||||||
2955 | }; | ||||||
2956 | static const CostTblEntry X64CostTbl[] = { // 64-bit targets | ||||||
2957 | { ISD::ROTL, MVT::i64, 1 }, | ||||||
2958 | { ISD::ROTR, MVT::i64, 1 }, | ||||||
2959 | { ISD::FSHL, MVT::i64, 4 } | ||||||
2960 | }; | ||||||
2961 | static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets | ||||||
2962 | { ISD::ROTL, MVT::i32, 1 }, | ||||||
2963 | { ISD::ROTL, MVT::i16, 1 }, | ||||||
2964 | { ISD::ROTL, MVT::i8, 1 }, | ||||||
2965 | { ISD::ROTR, MVT::i32, 1 }, | ||||||
2966 | { ISD::ROTR, MVT::i16, 1 }, | ||||||
2967 | { ISD::ROTR, MVT::i8, 1 }, | ||||||
2968 | { ISD::FSHL, MVT::i32, 4 }, | ||||||
2969 | { ISD::FSHL, MVT::i16, 4 }, | ||||||
2970 | { ISD::FSHL, MVT::i8, 4 } | ||||||
2971 | }; | ||||||
2972 | |||||||
2973 | Intrinsic::ID IID = ICA.getID(); | ||||||
2974 | Type *RetTy = ICA.getReturnType(); | ||||||
2975 | const SmallVectorImpl<const Value *> &Args = ICA.getArgs(); | ||||||
2976 | unsigned ISD = ISD::DELETED_NODE; | ||||||
2977 | switch (IID) { | ||||||
2978 | default: | ||||||
2979 | break; | ||||||
2980 | case Intrinsic::fshl: | ||||||
2981 | ISD = ISD::FSHL; | ||||||
2982 | if (Args[0] == Args[1]) | ||||||
2983 | ISD = ISD::ROTL; | ||||||
2984 | break; | ||||||
2985 | case Intrinsic::fshr: | ||||||
2986 | // FSHR has same costs so don't duplicate. | ||||||
2987 | ISD = ISD::FSHL; | ||||||
2988 | if (Args[0] == Args[1]) | ||||||
2989 | ISD = ISD::ROTR; | ||||||
2990 | break; | ||||||
2991 | } | ||||||
2992 | |||||||
2993 | if (ISD != ISD::DELETED_NODE) { | ||||||
2994 | // Legalize the type. | ||||||
2995 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy); | ||||||
2996 | MVT MTy = LT.second; | ||||||
2997 | |||||||
2998 | // Attempt to lookup cost. | ||||||
2999 | if (ST->hasAVX512()) | ||||||
3000 | if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) | ||||||
3001 | return LT.first * Entry->Cost; | ||||||
3002 | |||||||
3003 | if (ST->hasXOP()) | ||||||
3004 | if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) | ||||||
3005 | return LT.first * Entry->Cost; | ||||||
3006 | |||||||
3007 | if (ST->is64Bit()) | ||||||
3008 | if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy)) | ||||||
3009 | return LT.first * Entry->Cost; | ||||||
3010 | |||||||
3011 | if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy)) | ||||||
3012 | return LT.first * Entry->Cost; | ||||||
3013 | } | ||||||
3014 | |||||||
3015 | return BaseT::getIntrinsicInstrCost(ICA, CostKind); | ||||||
3016 | } | ||||||
3017 | |||||||
3018 | int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { | ||||||
3019 | static const CostTblEntry SLMCostTbl[] = { | ||||||
3020 | { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 }, | ||||||
3021 | { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 }, | ||||||
3022 | { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 }, | ||||||
3023 | { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 } | ||||||
3024 | }; | ||||||
3025 | |||||||
3026 | assert(Val->isVectorTy() && "This must be a vector type")((Val->isVectorTy() && "This must be a vector type" ) ? static_cast<void> (0) : __assert_fail ("Val->isVectorTy() && \"This must be a vector type\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3026, __PRETTY_FUNCTION__)); | ||||||
3027 | Type *ScalarType = Val->getScalarType(); | ||||||
3028 | int RegisterFileMoveCost = 0; | ||||||
3029 | |||||||
3030 | if (Index != -1U && (Opcode
| ||||||
3031 | Opcode == Instruction::InsertElement)) { | ||||||
3032 | // Legalize the type. | ||||||
3033 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val); | ||||||
3034 | |||||||
3035 | // This type is legalized to a scalar type. | ||||||
3036 | if (!LT.second.isVector()) | ||||||
3037 | return 0; | ||||||
3038 | |||||||
3039 | // The type may be split. Normalize the index to the new type. | ||||||
3040 | unsigned NumElts = LT.second.getVectorNumElements(); | ||||||
3041 | unsigned SubNumElts = NumElts; | ||||||
3042 | Index = Index % NumElts; | ||||||
3043 | |||||||
3044 | // For >128-bit vectors, we need to extract higher 128-bit subvectors. | ||||||
3045 | // For inserts, we also need to insert the subvector back. | ||||||
3046 | if (LT.second.getSizeInBits() > 128) { | ||||||
3047 | assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector")(((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector" ) ? static_cast<void> (0) : __assert_fail ("(LT.second.getSizeInBits() % 128) == 0 && \"Illegal vector\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3047, __PRETTY_FUNCTION__)); | ||||||
3048 | unsigned NumSubVecs = LT.second.getSizeInBits() / 128; | ||||||
3049 | SubNumElts = NumElts / NumSubVecs; | ||||||
3050 | if (SubNumElts <= Index) { | ||||||
3051 | RegisterFileMoveCost += (Opcode
| ||||||
3052 | Index %= SubNumElts; | ||||||
| |||||||
3053 | } | ||||||
3054 | } | ||||||
3055 | |||||||
3056 | if (Index == 0) { | ||||||
3057 | // Floating point scalars are already located in index #0. | ||||||
3058 | // Many insertions to #0 can fold away for scalar fp-ops, so let's assume | ||||||
3059 | // true for all. | ||||||
3060 | if (ScalarType->isFloatingPointTy()) | ||||||
3061 | return RegisterFileMoveCost; | ||||||
3062 | |||||||
3063 | // Assume movd/movq XMM -> GPR is relatively cheap on all targets. | ||||||
3064 | if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement) | ||||||
3065 | return 1 + RegisterFileMoveCost; | ||||||
3066 | } | ||||||
3067 | |||||||
3068 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | ||||||
3069 | assert(ISD && "Unexpected vector opcode")((ISD && "Unexpected vector opcode") ? static_cast< void> (0) : __assert_fail ("ISD && \"Unexpected vector opcode\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3069, __PRETTY_FUNCTION__)); | ||||||
3070 | MVT MScalarTy = LT.second.getScalarType(); | ||||||
3071 | if (ST->isSLM()) | ||||||
3072 | if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy)) | ||||||
3073 | return Entry->Cost + RegisterFileMoveCost; | ||||||
3074 | |||||||
3075 | // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets. | ||||||
3076 | if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || | ||||||
3077 | (MScalarTy.isInteger() && ST->hasSSE41())) | ||||||
3078 | return 1 + RegisterFileMoveCost; | ||||||
3079 | |||||||
3080 | // Assume insertps is relatively cheap on all targets. | ||||||
3081 | if (MScalarTy == MVT::f32 && ST->hasSSE41() && | ||||||
3082 | Opcode == Instruction::InsertElement) | ||||||
3083 | return 1 + RegisterFileMoveCost; | ||||||
3084 | |||||||
3085 | // For extractions we just need to shuffle the element to index 0, which | ||||||
3086 | // should be very cheap (assume cost = 1). For insertions we need to shuffle | ||||||
3087 | // the elements to its destination. In both cases we must handle the | ||||||
3088 | // subvector move(s). | ||||||
3089 | // If the vector type is already less than 128-bits then don't reduce it. | ||||||
3090 | // TODO: Under what circumstances should we shuffle using the full width? | ||||||
3091 | int ShuffleCost = 1; | ||||||
3092 | if (Opcode == Instruction::InsertElement) { | ||||||
3093 | auto *SubTy = cast<VectorType>(Val); | ||||||
3094 | EVT VT = TLI->getValueType(DL, Val); | ||||||
3095 | if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128) | ||||||
3096 | SubTy = FixedVectorType::get(ScalarType, SubNumElts); | ||||||
3097 | ShuffleCost = | ||||||
3098 | getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy); | ||||||
3099 | } | ||||||
3100 | int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1; | ||||||
3101 | return ShuffleCost + IntOrFpCost + RegisterFileMoveCost; | ||||||
3102 | } | ||||||
3103 | |||||||
3104 | // Add to the base cost if we know that the extracted element of a vector is | ||||||
3105 | // destined to be moved to and used in the integer register file. | ||||||
3106 | if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy()) | ||||||
3107 | RegisterFileMoveCost += 1; | ||||||
3108 | |||||||
3109 | return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost; | ||||||
3110 | } | ||||||
3111 | |||||||
3112 | unsigned X86TTIImpl::getScalarizationOverhead(VectorType *Ty, | ||||||
3113 | const APInt &DemandedElts, | ||||||
3114 | bool Insert, bool Extract) { | ||||||
3115 | unsigned Cost = 0; | ||||||
3116 | |||||||
3117 | // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much | ||||||
3118 | // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT. | ||||||
3119 | if (Insert) { | ||||||
3120 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); | ||||||
3121 | MVT MScalarTy = LT.second.getScalarType(); | ||||||
3122 | |||||||
3123 | if ((MScalarTy == MVT::i16 && ST->hasSSE2()) || | ||||||
3124 | (MScalarTy.isInteger() && ST->hasSSE41()) || | ||||||
3125 | (MScalarTy == MVT::f32 && ST->hasSSE41())) { | ||||||
3126 | // For types we can insert directly, insertion into 128-bit sub vectors is | ||||||
3127 | // cheap, followed by a cheap chain of concatenations. | ||||||
3128 | if (LT.second.getSizeInBits() <= 128) { | ||||||
3129 | Cost += | ||||||
3130 | BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false); | ||||||
3131 | } else { | ||||||
3132 | // In each 128-lane, if at least one index is demanded but not all | ||||||
3133 | // indices are demanded and this 128-lane is not the first 128-lane of | ||||||
3134 | // the legalized-vector, then this 128-lane needs a extracti128; If in | ||||||
3135 | // each 128-lane, there is at least one demanded index, this 128-lane | ||||||
3136 | // needs a inserti128. | ||||||
3137 | |||||||
3138 | // The following cases will help you build a better understanding: | ||||||
3139 | // Assume we insert several elements into a v8i32 vector in avx2, | ||||||
3140 | // Case#1: inserting into 1th index needs vpinsrd + inserti128. | ||||||
3141 | // Case#2: inserting into 5th index needs extracti128 + vpinsrd + | ||||||
3142 | // inserti128. | ||||||
3143 | // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128. | ||||||
3144 | unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * LT.first; | ||||||
3145 | unsigned NumElts = LT.second.getVectorNumElements() * LT.first; | ||||||
3146 | APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts); | ||||||
3147 | unsigned Scale = NumElts / Num128Lanes; | ||||||
3148 | // We iterate each 128-lane, and check if we need a | ||||||
3149 | // extracti128/inserti128 for this 128-lane. | ||||||
3150 | for (unsigned I = 0; I < NumElts; I += Scale) { | ||||||
3151 | APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale); | ||||||
3152 | APInt MaskedDE = Mask & WidenedDemandedElts; | ||||||
3153 | unsigned Population = MaskedDE.countPopulation(); | ||||||
3154 | Cost += (Population > 0 && Population != Scale && | ||||||
3155 | I % LT.second.getVectorNumElements() != 0); | ||||||
3156 | Cost += Population > 0; | ||||||
3157 | } | ||||||
3158 | Cost += DemandedElts.countPopulation(); | ||||||
3159 | |||||||
3160 | // For vXf32 cases, insertion into the 0'th index in each v4f32 | ||||||
3161 | // 128-bit vector is free. | ||||||
3162 | // NOTE: This assumes legalization widens vXf32 vectors. | ||||||
3163 | if (MScalarTy == MVT::f32) | ||||||
3164 | for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements(); | ||||||
3165 | i < e; i += 4) | ||||||
3166 | if (DemandedElts[i]) | ||||||
3167 | Cost--; | ||||||
3168 | } | ||||||
3169 | } else if (LT.second.isVector()) { | ||||||
3170 | // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded | ||||||
3171 | // integer element as a SCALAR_TO_VECTOR, then we build the vector as a | ||||||
3172 | // series of UNPCK followed by CONCAT_VECTORS - all of these can be | ||||||
3173 | // considered cheap. | ||||||
3174 | if (Ty->isIntOrIntVectorTy()) | ||||||
3175 | Cost += DemandedElts.countPopulation(); | ||||||
3176 | |||||||
3177 | // Get the smaller of the legalized or original pow2-extended number of | ||||||
3178 | // vector elements, which represents the number of unpacks we'll end up | ||||||
3179 | // performing. | ||||||
3180 | unsigned NumElts = LT.second.getVectorNumElements(); | ||||||
3181 | unsigned Pow2Elts = | ||||||
3182 | PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements()); | ||||||
3183 | Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first; | ||||||
3184 | } | ||||||
3185 | } | ||||||
3186 | |||||||
3187 | // TODO: Use default extraction for now, but we should investigate extending this | ||||||
3188 | // to handle repeated subvector extraction. | ||||||
3189 | if (Extract) | ||||||
3190 | Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract); | ||||||
3191 | |||||||
3192 | return Cost; | ||||||
3193 | } | ||||||
3194 | |||||||
3195 | InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, | ||||||
3196 | MaybeAlign Alignment, | ||||||
3197 | unsigned AddressSpace, | ||||||
3198 | TTI::TargetCostKind CostKind, | ||||||
3199 | const Instruction *I) { | ||||||
3200 | // TODO: Handle other cost kinds. | ||||||
3201 | if (CostKind != TTI::TCK_RecipThroughput) { | ||||||
3202 | if (auto *SI = dyn_cast_or_null<StoreInst>(I)) { | ||||||
3203 | // Store instruction with index and scale costs 2 Uops. | ||||||
3204 | // Check the preceding GEP to identify non-const indices. | ||||||
3205 | if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) { | ||||||
3206 | if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); })) | ||||||
3207 | return TTI::TCC_Basic * 2; | ||||||
3208 | } | ||||||
3209 | } | ||||||
3210 | return TTI::TCC_Basic; | ||||||
3211 | } | ||||||
3212 | |||||||
3213 | // Handle non-power-of-two vectors such as <3 x float> | ||||||
3214 | if (auto *VTy = dyn_cast<FixedVectorType>(Src)) { | ||||||
3215 | unsigned NumElem = VTy->getNumElements(); | ||||||
3216 | |||||||
3217 | // Handle a few common cases: | ||||||
3218 | // <3 x float> | ||||||
3219 | if (NumElem == 3 && VTy->getScalarSizeInBits() == 32) | ||||||
3220 | // Cost = 64 bit store + extract + 32 bit store. | ||||||
3221 | return 3; | ||||||
3222 | |||||||
3223 | // <3 x double> | ||||||
3224 | if (NumElem == 3 && VTy->getScalarSizeInBits() == 64) | ||||||
3225 | // Cost = 128 bit store + unpack + 64 bit store. | ||||||
3226 | return 3; | ||||||
3227 | |||||||
3228 | // Assume that all other non-power-of-two numbers are scalarized. | ||||||
3229 | if (!isPowerOf2_32(NumElem)) { | ||||||
3230 | APInt DemandedElts = APInt::getAllOnesValue(NumElem); | ||||||
3231 | InstructionCost Cost = BaseT::getMemoryOpCost( | ||||||
3232 | Opcode, VTy->getScalarType(), Alignment, AddressSpace, CostKind); | ||||||
3233 | int SplitCost = getScalarizationOverhead(VTy, DemandedElts, | ||||||
3234 | Opcode == Instruction::Load, | ||||||
3235 | Opcode == Instruction::Store); | ||||||
3236 | return NumElem * Cost + SplitCost; | ||||||
3237 | } | ||||||
3238 | } | ||||||
3239 | |||||||
3240 | // Type legalization can't handle structs | ||||||
3241 | if (TLI->getValueType(DL, Src, true) == MVT::Other) | ||||||
3242 | return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace, | ||||||
3243 | CostKind); | ||||||
3244 | |||||||
3245 | // Legalize the type. | ||||||
3246 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src); | ||||||
3247 | assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&(((Opcode == Instruction::Load || Opcode == Instruction::Store ) && "Invalid Opcode") ? static_cast<void> (0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3248, __PRETTY_FUNCTION__)) | ||||||
3248 | "Invalid Opcode")(((Opcode == Instruction::Load || Opcode == Instruction::Store ) && "Invalid Opcode") ? static_cast<void> (0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3248, __PRETTY_FUNCTION__)); | ||||||
3249 | |||||||
3250 | // Each load/store unit costs 1. | ||||||
3251 | int Cost = LT.first * 1; | ||||||
3252 | |||||||
3253 | // This isn't exactly right. We're using slow unaligned 32-byte accesses as a | ||||||
3254 | // proxy for a double-pumped AVX memory interface such as on Sandybridge. | ||||||
3255 | if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow()) | ||||||
3256 | Cost *= 2; | ||||||
3257 | |||||||
3258 | return Cost; | ||||||
3259 | } | ||||||
3260 | |||||||
3261 | InstructionCost | ||||||
3262 | X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment, | ||||||
3263 | unsigned AddressSpace, | ||||||
3264 | TTI::TargetCostKind CostKind) { | ||||||
3265 | bool IsLoad = (Instruction::Load == Opcode); | ||||||
3266 | bool IsStore = (Instruction::Store == Opcode); | ||||||
3267 | |||||||
3268 | auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy); | ||||||
3269 | if (!SrcVTy) | ||||||
3270 | // To calculate scalar take the regular cost, without mask | ||||||
3271 | return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind); | ||||||
3272 | |||||||
3273 | unsigned NumElem = SrcVTy->getNumElements(); | ||||||
3274 | auto *MaskTy = | ||||||
3275 | FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem); | ||||||
3276 | if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) || | ||||||
3277 | (IsStore && !isLegalMaskedStore(SrcVTy, Alignment)) || | ||||||
3278 | !isPowerOf2_32(NumElem)) { | ||||||
3279 | // Scalarization | ||||||
3280 | APInt DemandedElts = APInt::getAllOnesValue(NumElem); | ||||||
3281 | InstructionCost MaskSplitCost = | ||||||
3282 | getScalarizationOverhead(MaskTy, DemandedElts, false, true); | ||||||
3283 | InstructionCost ScalarCompareCost = getCmpSelInstrCost( | ||||||
3284 | Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr, | ||||||
3285 | CmpInst::BAD_ICMP_PREDICATE, CostKind); | ||||||
3286 | InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind); | ||||||
3287 | InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost); | ||||||
3288 | InstructionCost ValueSplitCost = | ||||||
3289 | getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore); | ||||||
3290 | InstructionCost MemopCost = | ||||||
3291 | NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(), | ||||||
3292 | Alignment, AddressSpace, CostKind); | ||||||
3293 | return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost; | ||||||
3294 | } | ||||||
3295 | |||||||
3296 | // Legalize the type. | ||||||
3297 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy); | ||||||
3298 | auto VT = TLI->getValueType(DL, SrcVTy); | ||||||
3299 | int Cost = 0; | ||||||
3300 | if (VT.isSimple() && LT.second != VT.getSimpleVT() && | ||||||
3301 | LT.second.getVectorNumElements() == NumElem) | ||||||
3302 | // Promotion requires expand/truncate for data and a shuffle for mask. | ||||||
3303 | Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) + | ||||||
3304 | getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr); | ||||||
3305 | |||||||
3306 | else if (LT.second.getVectorNumElements() > NumElem) { | ||||||
3307 | auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(), | ||||||
3308 | LT.second.getVectorNumElements()); | ||||||
3309 | // Expanding requires fill mask with zeroes | ||||||
3310 | Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy); | ||||||
3311 | } | ||||||
3312 | |||||||
3313 | // Pre-AVX512 - each maskmov load costs 2 + store costs ~8. | ||||||
3314 | if (!ST->hasAVX512()) | ||||||
3315 | return Cost + LT.first * (IsLoad ? 2 : 8); | ||||||
3316 | |||||||
3317 | // AVX-512 masked load/store is cheapper | ||||||
3318 | return Cost + LT.first; | ||||||
3319 | } | ||||||
3320 | |||||||
3321 | int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE, | ||||||
3322 | const SCEV *Ptr) { | ||||||
3323 | // Address computations in vectorized code with non-consecutive addresses will | ||||||
3324 | // likely result in more instructions compared to scalar code where the | ||||||
3325 | // computation can more often be merged into the index mode. The resulting | ||||||
3326 | // extra micro-ops can significantly decrease throughput. | ||||||
3327 | const unsigned NumVectorInstToHideOverhead = 10; | ||||||
3328 | |||||||
3329 | // Cost modeling of Strided Access Computation is hidden by the indexing | ||||||
3330 | // modes of X86 regardless of the stride value. We dont believe that there | ||||||
3331 | // is a difference between constant strided access in gerenal and constant | ||||||
3332 | // strided value which is less than or equal to 64. | ||||||
3333 | // Even in the case of (loop invariant) stride whose value is not known at | ||||||
3334 | // compile time, the address computation will not incur more than one extra | ||||||
3335 | // ADD instruction. | ||||||
3336 | if (Ty->isVectorTy() && SE) { | ||||||
3337 | if (!BaseT::isStridedAccess(Ptr)) | ||||||
3338 | return NumVectorInstToHideOverhead; | ||||||
3339 | if (!BaseT::getConstantStrideStep(SE, Ptr)) | ||||||
3340 | return 1; | ||||||
3341 | } | ||||||
3342 | |||||||
3343 | return BaseT::getAddressComputationCost(Ty, SE, Ptr); | ||||||
3344 | } | ||||||
3345 | |||||||
3346 | InstructionCost | ||||||
3347 | X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy, | ||||||
3348 | bool IsPairwise, | ||||||
3349 | TTI::TargetCostKind CostKind) { | ||||||
3350 | // Just use the default implementation for pair reductions. | ||||||
3351 | if (IsPairwise) | ||||||
3352 | return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise, CostKind); | ||||||
3353 | |||||||
3354 | // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput | ||||||
3355 | // and make it as the cost. | ||||||
3356 | |||||||
3357 | static const CostTblEntry SLMCostTblNoPairWise[] = { | ||||||
3358 | { ISD::FADD, MVT::v2f64, 3 }, | ||||||
3359 | { ISD::ADD, MVT::v2i64, 5 }, | ||||||
3360 | }; | ||||||
3361 | |||||||
3362 | static const CostTblEntry SSE2CostTblNoPairWise[] = { | ||||||
3363 | { ISD::FADD, MVT::v2f64, 2 }, | ||||||
3364 | { ISD::FADD, MVT::v4f32, 4 }, | ||||||
3365 | { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6". | ||||||
3366 | { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32 | ||||||
3367 | { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3". | ||||||
3368 | { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3". | ||||||
3369 | { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3". | ||||||
3370 | { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3". | ||||||
3371 | { ISD::ADD, MVT::v2i8, 2 }, | ||||||
3372 | { ISD::ADD, MVT::v4i8, 2 }, | ||||||
3373 | { ISD::ADD, MVT::v8i8, 2 }, | ||||||
3374 | { ISD::ADD, MVT::v16i8, 3 }, | ||||||
3375 | }; | ||||||
3376 | |||||||
3377 | static const CostTblEntry AVX1CostTblNoPairWise[] = { | ||||||
3378 | { ISD::FADD, MVT::v4f64, 3 }, | ||||||
3379 | { ISD::FADD, MVT::v4f32, 3 }, | ||||||
3380 | { ISD::FADD, MVT::v8f32, 4 }, | ||||||
3381 | { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5". | ||||||
3382 | { ISD::ADD, MVT::v4i64, 3 }, | ||||||
3383 | { ISD::ADD, MVT::v8i32, 5 }, | ||||||
3384 | { ISD::ADD, MVT::v16i16, 5 }, | ||||||
3385 | { ISD::ADD, MVT::v32i8, 4 }, | ||||||
3386 | }; | ||||||
3387 | |||||||
3388 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | ||||||
3389 | assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> ( 0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3389, __PRETTY_FUNCTION__)); | ||||||
3390 | |||||||
3391 | // Before legalizing the type, give a chance to look up illegal narrow types | ||||||
3392 | // in the table. | ||||||
3393 | // FIXME: Is there a better way to do this? | ||||||
3394 | EVT VT = TLI->getValueType(DL, ValTy); | ||||||
3395 | if (VT.isSimple()) { | ||||||
3396 | MVT MTy = VT.getSimpleVT(); | ||||||
3397 | if (ST->isSLM()) | ||||||
3398 | if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) | ||||||
3399 | return Entry->Cost; | ||||||
3400 | |||||||
3401 | if (ST->hasAVX()) | ||||||
3402 | if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) | ||||||
3403 | return Entry->Cost; | ||||||
3404 | |||||||
3405 | if (ST->hasSSE2()) | ||||||
3406 | if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) | ||||||
3407 | return Entry->Cost; | ||||||
3408 | } | ||||||
3409 | |||||||
3410 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); | ||||||
3411 | |||||||
3412 | MVT MTy = LT.second; | ||||||
3413 | |||||||
3414 | auto *ValVTy = cast<FixedVectorType>(ValTy); | ||||||
3415 | |||||||
3416 | // Special case: vXi8 mul reductions are performed as vXi16. | ||||||
3417 | if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) { | ||||||
3418 | auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16); | ||||||
3419 | auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements()); | ||||||
3420 | return *getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy, | ||||||
3421 | TargetTransformInfo::CastContextHint::None, | ||||||
3422 | CostKind) | ||||||
3423 | .getValue() + | ||||||
3424 | getArithmeticReductionCost(Opcode, WideVecTy, IsPairwise, CostKind); | ||||||
3425 | } | ||||||
3426 | |||||||
3427 | unsigned ArithmeticCost = 0; | ||||||
3428 | if (LT.first != 1 && MTy.isVector() && | ||||||
3429 | MTy.getVectorNumElements() < ValVTy->getNumElements()) { | ||||||
3430 | // Type needs to be split. We need LT.first - 1 arithmetic ops. | ||||||
3431 | auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), | ||||||
3432 | MTy.getVectorNumElements()); | ||||||
3433 | ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); | ||||||
3434 | ArithmeticCost *= LT.first - 1; | ||||||
3435 | } | ||||||
3436 | |||||||
3437 | if (ST->isSLM()) | ||||||
3438 | if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy)) | ||||||
3439 | return ArithmeticCost + Entry->Cost; | ||||||
3440 | |||||||
3441 | if (ST->hasAVX()) | ||||||
3442 | if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) | ||||||
3443 | return ArithmeticCost + Entry->Cost; | ||||||
3444 | |||||||
3445 | if (ST->hasSSE2()) | ||||||
3446 | if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) | ||||||
3447 | return ArithmeticCost + Entry->Cost; | ||||||
3448 | |||||||
3449 | // FIXME: These assume a naive kshift+binop lowering, which is probably | ||||||
3450 | // conservative in most cases. | ||||||
3451 | static const CostTblEntry AVX512BoolReduction[] = { | ||||||
3452 | { ISD::AND, MVT::v2i1, 3 }, | ||||||
3453 | { ISD::AND, MVT::v4i1, 5 }, | ||||||
3454 | { ISD::AND, MVT::v8i1, 7 }, | ||||||
3455 | { ISD::AND, MVT::v16i1, 9 }, | ||||||
3456 | { ISD::AND, MVT::v32i1, 11 }, | ||||||
3457 | { ISD::AND, MVT::v64i1, 13 }, | ||||||
3458 | { ISD::OR, MVT::v2i1, 3 }, | ||||||
3459 | { ISD::OR, MVT::v4i1, 5 }, | ||||||
3460 | { ISD::OR, MVT::v8i1, 7 }, | ||||||
3461 | { ISD::OR, MVT::v16i1, 9 }, | ||||||
3462 | { ISD::OR, MVT::v32i1, 11 }, | ||||||
3463 | { ISD::OR, MVT::v64i1, 13 }, | ||||||
3464 | }; | ||||||
3465 | |||||||
3466 | static const CostTblEntry AVX2BoolReduction[] = { | ||||||
3467 | { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp | ||||||
3468 | { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp | ||||||
3469 | { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp | ||||||
3470 | { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp | ||||||
3471 | }; | ||||||
3472 | |||||||
3473 | static const CostTblEntry AVX1BoolReduction[] = { | ||||||
3474 | { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp | ||||||
3475 | { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp | ||||||
3476 | { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp | ||||||
3477 | { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp | ||||||
3478 | { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp | ||||||
3479 | { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp | ||||||
3480 | { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp | ||||||
3481 | { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp | ||||||
3482 | }; | ||||||
3483 | |||||||
3484 | static const CostTblEntry SSE2BoolReduction[] = { | ||||||
3485 | { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp | ||||||
3486 | { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp | ||||||
3487 | { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp | ||||||
3488 | { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp | ||||||
3489 | { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp | ||||||
3490 | { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp | ||||||
3491 | { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp | ||||||
3492 | { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp | ||||||
3493 | }; | ||||||
3494 | |||||||
3495 | // Handle bool allof/anyof patterns. | ||||||
3496 | if (ValVTy->getElementType()->isIntegerTy(1)) { | ||||||
3497 | unsigned ArithmeticCost = 0; | ||||||
3498 | if (LT.first != 1 && MTy.isVector() && | ||||||
3499 | MTy.getVectorNumElements() < ValVTy->getNumElements()) { | ||||||
3500 | // Type needs to be split. We need LT.first - 1 arithmetic ops. | ||||||
3501 | auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(), | ||||||
3502 | MTy.getVectorNumElements()); | ||||||
3503 | ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind); | ||||||
3504 | ArithmeticCost *= LT.first - 1; | ||||||
3505 | } | ||||||
3506 | |||||||
3507 | if (ST->hasAVX512()) | ||||||
3508 | if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy)) | ||||||
3509 | return ArithmeticCost + Entry->Cost; | ||||||
3510 | if (ST->hasAVX2()) | ||||||
3511 | if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy)) | ||||||
3512 | return ArithmeticCost + Entry->Cost; | ||||||
3513 | if (ST->hasAVX()) | ||||||
3514 | if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy)) | ||||||
3515 | return ArithmeticCost + Entry->Cost; | ||||||
3516 | if (ST->hasSSE2()) | ||||||
3517 | if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy)) | ||||||
3518 | return ArithmeticCost + Entry->Cost; | ||||||
3519 | |||||||
3520 | return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise, | ||||||
3521 | CostKind); | ||||||
3522 | } | ||||||
3523 | |||||||
3524 | unsigned NumVecElts = ValVTy->getNumElements(); | ||||||
3525 | unsigned ScalarSize = ValVTy->getScalarSizeInBits(); | ||||||
3526 | |||||||
3527 | // Special case power of 2 reductions where the scalar type isn't changed | ||||||
3528 | // by type legalization. | ||||||
3529 | if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits()) | ||||||
3530 | return BaseT::getArithmeticReductionCost(Opcode, ValVTy, IsPairwise, | ||||||
3531 | CostKind); | ||||||
3532 | |||||||
3533 | unsigned ReductionCost = 0; | ||||||
3534 | |||||||
3535 | auto *Ty = ValVTy; | ||||||
3536 | if (LT.first != 1 && MTy.isVector() && | ||||||
3537 | MTy.getVectorNumElements() < ValVTy->getNumElements()) { | ||||||
3538 | // Type needs to be split. We need LT.first - 1 arithmetic ops. | ||||||
3539 | Ty = FixedVectorType::get(ValVTy->getElementType(), | ||||||
3540 | MTy.getVectorNumElements()); | ||||||
3541 | ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind); | ||||||
3542 | ReductionCost *= LT.first - 1; | ||||||
3543 | NumVecElts = MTy.getVectorNumElements(); | ||||||
3544 | } | ||||||
3545 | |||||||
3546 | // Now handle reduction with the legal type, taking into account size changes | ||||||
3547 | // at each level. | ||||||
3548 | while (NumVecElts > 1) { | ||||||
3549 | // Determine the size of the remaining vector we need to reduce. | ||||||
3550 | unsigned Size = NumVecElts * ScalarSize; | ||||||
3551 | NumVecElts /= 2; | ||||||
3552 | // If we're reducing from 256/512 bits, use an extract_subvector. | ||||||
3553 | if (Size > 128) { | ||||||
3554 | auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); | ||||||
3555 | ReductionCost += | ||||||
3556 | getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); | ||||||
3557 | Ty = SubTy; | ||||||
3558 | } else if (Size == 128) { | ||||||
3559 | // Reducing from 128 bits is a permute of v2f64/v2i64. | ||||||
3560 | FixedVectorType *ShufTy; | ||||||
3561 | if (ValVTy->isFloatingPointTy()) | ||||||
3562 | ShufTy = | ||||||
3563 | FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2); | ||||||
3564 | else | ||||||
3565 | ShufTy = | ||||||
3566 | FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2); | ||||||
3567 | ReductionCost += | ||||||
3568 | getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); | ||||||
3569 | } else if (Size == 64) { | ||||||
3570 | // Reducing from 64 bits is a shuffle of v4f32/v4i32. | ||||||
3571 | FixedVectorType *ShufTy; | ||||||
3572 | if (ValVTy->isFloatingPointTy()) | ||||||
3573 | ShufTy = | ||||||
3574 | FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4); | ||||||
3575 | else | ||||||
3576 | ShufTy = | ||||||
3577 | FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4); | ||||||
3578 | ReductionCost += | ||||||
3579 | getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); | ||||||
3580 | } else { | ||||||
3581 | // Reducing from smaller size is a shift by immediate. | ||||||
3582 | auto *ShiftTy = FixedVectorType::get( | ||||||
3583 | Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size); | ||||||
3584 | ReductionCost += getArithmeticInstrCost( | ||||||
3585 | Instruction::LShr, ShiftTy, CostKind, | ||||||
3586 | TargetTransformInfo::OK_AnyValue, | ||||||
3587 | TargetTransformInfo::OK_UniformConstantValue, | ||||||
3588 | TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); | ||||||
3589 | } | ||||||
3590 | |||||||
3591 | // Add the arithmetic op for this level. | ||||||
3592 | ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind); | ||||||
3593 | } | ||||||
3594 | |||||||
3595 | // Add the final extract element to the cost. | ||||||
3596 | return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); | ||||||
3597 | } | ||||||
3598 | |||||||
3599 | InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy, | ||||||
3600 | bool IsUnsigned) { | ||||||
3601 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); | ||||||
3602 | |||||||
3603 | MVT MTy = LT.second; | ||||||
3604 | |||||||
3605 | int ISD; | ||||||
3606 | if (Ty->isIntOrIntVectorTy()) { | ||||||
3607 | ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; | ||||||
3608 | } else { | ||||||
3609 | assert(Ty->isFPOrFPVectorTy() &&((Ty->isFPOrFPVectorTy() && "Expected float point or integer vector type." ) ? static_cast<void> (0) : __assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3610, __PRETTY_FUNCTION__)) | ||||||
3610 | "Expected float point or integer vector type.")((Ty->isFPOrFPVectorTy() && "Expected float point or integer vector type." ) ? static_cast<void> (0) : __assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3610, __PRETTY_FUNCTION__)); | ||||||
3611 | ISD = ISD::FMINNUM; | ||||||
3612 | } | ||||||
3613 | |||||||
3614 | static const CostTblEntry SSE1CostTbl[] = { | ||||||
3615 | {ISD::FMINNUM, MVT::v4f32, 1}, | ||||||
3616 | }; | ||||||
3617 | |||||||
3618 | static const CostTblEntry SSE2CostTbl[] = { | ||||||
3619 | {ISD::FMINNUM, MVT::v2f64, 1}, | ||||||
3620 | {ISD::SMIN, MVT::v8i16, 1}, | ||||||
3621 | {ISD::UMIN, MVT::v16i8, 1}, | ||||||
3622 | }; | ||||||
3623 | |||||||
3624 | static const CostTblEntry SSE41CostTbl[] = { | ||||||
3625 | {ISD::SMIN, MVT::v4i32, 1}, | ||||||
3626 | {ISD::UMIN, MVT::v4i32, 1}, | ||||||
3627 | {ISD::UMIN, MVT::v8i16, 1}, | ||||||
3628 | {ISD::SMIN, MVT::v16i8, 1}, | ||||||
3629 | }; | ||||||
3630 | |||||||
3631 | static const CostTblEntry SSE42CostTbl[] = { | ||||||
3632 | {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd | ||||||
3633 | }; | ||||||
3634 | |||||||
3635 | static const CostTblEntry AVX1CostTbl[] = { | ||||||
3636 | {ISD::FMINNUM, MVT::v8f32, 1}, | ||||||
3637 | {ISD::FMINNUM, MVT::v4f64, 1}, | ||||||
3638 | {ISD::SMIN, MVT::v8i32, 3}, | ||||||
3639 | {ISD::UMIN, MVT::v8i32, 3}, | ||||||
3640 | {ISD::SMIN, MVT::v16i16, 3}, | ||||||
3641 | {ISD::UMIN, MVT::v16i16, 3}, | ||||||
3642 | {ISD::SMIN, MVT::v32i8, 3}, | ||||||
3643 | {ISD::UMIN, MVT::v32i8, 3}, | ||||||
3644 | }; | ||||||
3645 | |||||||
3646 | static const CostTblEntry AVX2CostTbl[] = { | ||||||
3647 | {ISD::SMIN, MVT::v8i32, 1}, | ||||||
3648 | {ISD::UMIN, MVT::v8i32, 1}, | ||||||
3649 | {ISD::SMIN, MVT::v16i16, 1}, | ||||||
3650 | {ISD::UMIN, MVT::v16i16, 1}, | ||||||
3651 | {ISD::SMIN, MVT::v32i8, 1}, | ||||||
3652 | {ISD::UMIN, MVT::v32i8, 1}, | ||||||
3653 | }; | ||||||
3654 | |||||||
3655 | static const CostTblEntry AVX512CostTbl[] = { | ||||||
3656 | {ISD::FMINNUM, MVT::v16f32, 1}, | ||||||
3657 | {ISD::FMINNUM, MVT::v8f64, 1}, | ||||||
3658 | {ISD::SMIN, MVT::v2i64, 1}, | ||||||
3659 | {ISD::UMIN, MVT::v2i64, 1}, | ||||||
3660 | {ISD::SMIN, MVT::v4i64, 1}, | ||||||
3661 | {ISD::UMIN, MVT::v4i64, 1}, | ||||||
3662 | {ISD::SMIN, MVT::v8i64, 1}, | ||||||
3663 | {ISD::UMIN, MVT::v8i64, 1}, | ||||||
3664 | {ISD::SMIN, MVT::v16i32, 1}, | ||||||
3665 | {ISD::UMIN, MVT::v16i32, 1}, | ||||||
3666 | }; | ||||||
3667 | |||||||
3668 | static const CostTblEntry AVX512BWCostTbl[] = { | ||||||
3669 | {ISD::SMIN, MVT::v32i16, 1}, | ||||||
3670 | {ISD::UMIN, MVT::v32i16, 1}, | ||||||
3671 | {ISD::SMIN, MVT::v64i8, 1}, | ||||||
3672 | {ISD::UMIN, MVT::v64i8, 1}, | ||||||
3673 | }; | ||||||
3674 | |||||||
3675 | // If we have a native MIN/MAX instruction for this type, use it. | ||||||
3676 | if (ST->hasBWI()) | ||||||
3677 | if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) | ||||||
3678 | return LT.first * Entry->Cost; | ||||||
3679 | |||||||
3680 | if (ST->hasAVX512()) | ||||||
3681 | if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) | ||||||
3682 | return LT.first * Entry->Cost; | ||||||
3683 | |||||||
3684 | if (ST->hasAVX2()) | ||||||
3685 | if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy)) | ||||||
3686 | return LT.first * Entry->Cost; | ||||||
3687 | |||||||
3688 | if (ST->hasAVX()) | ||||||
3689 | if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy)) | ||||||
3690 | return LT.first * Entry->Cost; | ||||||
3691 | |||||||
3692 | if (ST->hasSSE42()) | ||||||
3693 | if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy)) | ||||||
3694 | return LT.first * Entry->Cost; | ||||||
3695 | |||||||
3696 | if (ST->hasSSE41()) | ||||||
3697 | if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy)) | ||||||
3698 | return LT.first * Entry->Cost; | ||||||
3699 | |||||||
3700 | if (ST->hasSSE2()) | ||||||
3701 | if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy)) | ||||||
3702 | return LT.first * Entry->Cost; | ||||||
3703 | |||||||
3704 | if (ST->hasSSE1()) | ||||||
3705 | if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy)) | ||||||
3706 | return LT.first * Entry->Cost; | ||||||
3707 | |||||||
3708 | unsigned CmpOpcode; | ||||||
3709 | if (Ty->isFPOrFPVectorTy()) { | ||||||
3710 | CmpOpcode = Instruction::FCmp; | ||||||
3711 | } else { | ||||||
3712 | assert(Ty->isIntOrIntVectorTy() &&((Ty->isIntOrIntVectorTy() && "expecting floating point or integer type for min/max reduction" ) ? static_cast<void> (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3713, __PRETTY_FUNCTION__)) | ||||||
3713 | "expecting floating point or integer type for min/max reduction")((Ty->isIntOrIntVectorTy() && "expecting floating point or integer type for min/max reduction" ) ? static_cast<void> (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3713, __PRETTY_FUNCTION__)); | ||||||
3714 | CmpOpcode = Instruction::ICmp; | ||||||
3715 | } | ||||||
3716 | |||||||
3717 | TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; | ||||||
3718 | // Otherwise fall back to cmp+select. | ||||||
3719 | InstructionCost Result = | ||||||
3720 | getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE, | ||||||
3721 | CostKind) + | ||||||
3722 | getCmpSelInstrCost(Instruction::Select, Ty, CondTy, | ||||||
3723 | CmpInst::BAD_ICMP_PREDICATE, CostKind); | ||||||
3724 | return Result; | ||||||
3725 | } | ||||||
3726 | |||||||
3727 | InstructionCost | ||||||
3728 | X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy, | ||||||
3729 | bool IsPairwise, bool IsUnsigned, | ||||||
3730 | TTI::TargetCostKind CostKind) { | ||||||
3731 | // Just use the default implementation for pair reductions. | ||||||
3732 | if (IsPairwise) | ||||||
3733 | return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned, | ||||||
3734 | CostKind); | ||||||
3735 | |||||||
3736 | std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); | ||||||
3737 | |||||||
3738 | MVT MTy = LT.second; | ||||||
3739 | |||||||
3740 | int ISD; | ||||||
3741 | if (ValTy->isIntOrIntVectorTy()) { | ||||||
3742 | ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN; | ||||||
3743 | } else { | ||||||
3744 | assert(ValTy->isFPOrFPVectorTy() &&((ValTy->isFPOrFPVectorTy() && "Expected float point or integer vector type." ) ? static_cast<void> (0) : __assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3745, __PRETTY_FUNCTION__)) | ||||||
3745 | "Expected float point or integer vector type.")((ValTy->isFPOrFPVectorTy() && "Expected float point or integer vector type." ) ? static_cast<void> (0) : __assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3745, __PRETTY_FUNCTION__)); | ||||||
3746 | ISD = ISD::FMINNUM; | ||||||
3747 | } | ||||||
3748 | |||||||
3749 | // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput | ||||||
3750 | // and make it as the cost. | ||||||
3751 | |||||||
3752 | static const CostTblEntry SSE2CostTblNoPairWise[] = { | ||||||
3753 | {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw | ||||||
3754 | {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw | ||||||
3755 | {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw | ||||||
3756 | }; | ||||||
3757 | |||||||
3758 | static const CostTblEntry SSE41CostTblNoPairWise[] = { | ||||||
3759 | {ISD::SMIN, MVT::v2i16, 3}, // same as sse2 | ||||||
3760 | {ISD::SMIN, MVT::v4i16, 5}, // same as sse2 | ||||||
3761 | {ISD::UMIN, MVT::v2i16, 5}, // same as sse2 | ||||||
3762 | {ISD::UMIN, MVT::v4i16, 7}, // same as sse2 | ||||||
3763 | {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor | ||||||
3764 | {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax | ||||||
3765 | {ISD::SMIN, MVT::v2i8, 3}, // pminsb | ||||||
3766 | {ISD::SMIN, MVT::v4i8, 5}, // pminsb | ||||||
3767 | {ISD::SMIN, MVT::v8i8, 7}, // pminsb | ||||||
3768 | {ISD::SMIN, MVT::v16i8, 6}, | ||||||
3769 | {ISD::UMIN, MVT::v2i8, 3}, // same as sse2 | ||||||
3770 | {ISD::UMIN, MVT::v4i8, 5}, // same as sse2 | ||||||
3771 | {ISD::UMIN, MVT::v8i8, 7}, // same as sse2 | ||||||
3772 | {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax | ||||||
3773 | }; | ||||||
3774 | |||||||
3775 | static const CostTblEntry AVX1CostTblNoPairWise[] = { | ||||||
3776 | {ISD::SMIN, MVT::v16i16, 6}, | ||||||
3777 | {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax | ||||||
3778 | {ISD::SMIN, MVT::v32i8, 8}, | ||||||
3779 | {ISD::UMIN, MVT::v32i8, 8}, | ||||||
3780 | }; | ||||||
3781 | |||||||
3782 | static const CostTblEntry AVX512BWCostTblNoPairWise[] = { | ||||||
3783 | {ISD::SMIN, MVT::v32i16, 8}, | ||||||
3784 | {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax | ||||||
3785 | {ISD::SMIN, MVT::v64i8, 10}, | ||||||
3786 | {ISD::UMIN, MVT::v64i8, 10}, | ||||||
3787 | }; | ||||||
3788 | |||||||
3789 | // Before legalizing the type, give a chance to look up illegal narrow types | ||||||
3790 | // in the table. | ||||||
3791 | // FIXME: Is there a better way to do this? | ||||||
3792 | EVT VT = TLI->getValueType(DL, ValTy); | ||||||
3793 | if (VT.isSimple()) { | ||||||
3794 | MVT MTy = VT.getSimpleVT(); | ||||||
3795 | if (ST->hasBWI()) | ||||||
3796 | if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) | ||||||
3797 | return Entry->Cost; | ||||||
3798 | |||||||
3799 | if (ST->hasAVX()) | ||||||
3800 | if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) | ||||||
3801 | return Entry->Cost; | ||||||
3802 | |||||||
3803 | if (ST->hasSSE41()) | ||||||
3804 | if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) | ||||||
3805 | return Entry->Cost; | ||||||
3806 | |||||||
3807 | if (ST->hasSSE2()) | ||||||
3808 | if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) | ||||||
3809 | return Entry->Cost; | ||||||
3810 | } | ||||||
3811 | |||||||
3812 | auto *ValVTy = cast<FixedVectorType>(ValTy); | ||||||
3813 | unsigned NumVecElts = ValVTy->getNumElements(); | ||||||
3814 | |||||||
3815 | auto *Ty = ValVTy; | ||||||
3816 | InstructionCost MinMaxCost = 0; | ||||||
3817 | if (LT.first != 1 && MTy.isVector() && | ||||||
3818 | MTy.getVectorNumElements() < ValVTy->getNumElements()) { | ||||||
3819 | // Type needs to be split. We need LT.first - 1 operations ops. | ||||||
3820 | Ty = FixedVectorType::get(ValVTy->getElementType(), | ||||||
3821 | MTy.getVectorNumElements()); | ||||||
3822 | auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(), | ||||||
3823 | MTy.getVectorNumElements()); | ||||||
3824 | MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned); | ||||||
3825 | MinMaxCost *= LT.first - 1; | ||||||
3826 | NumVecElts = MTy.getVectorNumElements(); | ||||||
3827 | } | ||||||
3828 | |||||||
3829 | if (ST->hasBWI()) | ||||||
3830 | if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy)) | ||||||
3831 | return MinMaxCost + Entry->Cost; | ||||||
3832 | |||||||
3833 | if (ST->hasAVX()) | ||||||
3834 | if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy)) | ||||||
3835 | return MinMaxCost + Entry->Cost; | ||||||
3836 | |||||||
3837 | if (ST->hasSSE41()) | ||||||
3838 | if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy)) | ||||||
3839 | return MinMaxCost + Entry->Cost; | ||||||
3840 | |||||||
3841 | if (ST->hasSSE2()) | ||||||
3842 | if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy)) | ||||||
3843 | return MinMaxCost + Entry->Cost; | ||||||
3844 | |||||||
3845 | unsigned ScalarSize = ValTy->getScalarSizeInBits(); | ||||||
3846 | |||||||
3847 | // Special case power of 2 reductions where the scalar type isn't changed | ||||||
3848 | // by type legalization. | ||||||
3849 | if (!isPowerOf2_32(ValVTy->getNumElements()) || | ||||||
3850 | ScalarSize != MTy.getScalarSizeInBits()) | ||||||
3851 | return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned, | ||||||
3852 | CostKind); | ||||||
3853 | |||||||
3854 | // Now handle reduction with the legal type, taking into account size changes | ||||||
3855 | // at each level. | ||||||
3856 | while (NumVecElts > 1) { | ||||||
3857 | // Determine the size of the remaining vector we need to reduce. | ||||||
3858 | unsigned Size = NumVecElts * ScalarSize; | ||||||
3859 | NumVecElts /= 2; | ||||||
3860 | // If we're reducing from 256/512 bits, use an extract_subvector. | ||||||
3861 | if (Size > 128) { | ||||||
3862 | auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts); | ||||||
3863 | MinMaxCost += | ||||||
3864 | getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy); | ||||||
3865 | Ty = SubTy; | ||||||
3866 | } else if (Size == 128) { | ||||||
3867 | // Reducing from 128 bits is a permute of v2f64/v2i64. | ||||||
3868 | VectorType *ShufTy; | ||||||
3869 | if (ValTy->isFloatingPointTy()) | ||||||
3870 | ShufTy = | ||||||
3871 | FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2); | ||||||
3872 | else | ||||||
3873 | ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2); | ||||||
3874 | MinMaxCost += | ||||||
3875 | getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); | ||||||
3876 | } else if (Size == 64) { | ||||||
3877 | // Reducing from 64 bits is a shuffle of v4f32/v4i32. | ||||||
3878 | FixedVectorType *ShufTy; | ||||||
3879 | if (ValTy->isFloatingPointTy()) | ||||||
3880 | ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4); | ||||||
3881 | else | ||||||
3882 | ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4); | ||||||
3883 | MinMaxCost += | ||||||
3884 | getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr); | ||||||
3885 | } else { | ||||||
3886 | // Reducing from smaller size is a shift by immediate. | ||||||
3887 | auto *ShiftTy = FixedVectorType::get( | ||||||
3888 | Type::getIntNTy(ValTy->getContext(), Size), 128 / Size); | ||||||
3889 | MinMaxCost += getArithmeticInstrCost( | ||||||
3890 | Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput, | ||||||
3891 | TargetTransformInfo::OK_AnyValue, | ||||||
3892 | TargetTransformInfo::OK_UniformConstantValue, | ||||||
3893 | TargetTransformInfo::OP_None, TargetTransformInfo::OP_None); | ||||||
3894 | } | ||||||
3895 | |||||||
3896 | // Add the arithmetic op for this level. | ||||||
3897 | auto *SubCondTy = | ||||||
3898 | FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements()); | ||||||
3899 | MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned); | ||||||
3900 | } | ||||||
3901 | |||||||
3902 | // Add the final extract element to the cost. | ||||||
3903 | return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0); | ||||||
3904 | } | ||||||
3905 | |||||||
3906 | /// Calculate the cost of materializing a 64-bit value. This helper | ||||||
3907 | /// method might only calculate a fraction of a larger immediate. Therefore it | ||||||
3908 | /// is valid to return a cost of ZERO. | ||||||
3909 | int X86TTIImpl::getIntImmCost(int64_t Val) { | ||||||
3910 | if (Val == 0) | ||||||
3911 | return TTI::TCC_Free; | ||||||
3912 | |||||||
3913 | if (isInt<32>(Val)) | ||||||
3914 | return TTI::TCC_Basic; | ||||||
3915 | |||||||
3916 | return 2 * TTI::TCC_Basic; | ||||||
3917 | } | ||||||
3918 | |||||||
3919 | int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty, | ||||||
3920 | TTI::TargetCostKind CostKind) { | ||||||
3921 | assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3921, __PRETTY_FUNCTION__)); | ||||||
3922 | |||||||
3923 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); | ||||||
3924 | if (BitSize == 0) | ||||||
3925 | return ~0U; | ||||||
3926 | |||||||
3927 | // Never hoist constants larger than 128bit, because this might lead to | ||||||
3928 | // incorrect code generation or assertions in codegen. | ||||||
3929 | // Fixme: Create a cost model for types larger than i128 once the codegen | ||||||
3930 | // issues have been fixed. | ||||||
3931 | if (BitSize > 128) | ||||||
3932 | return TTI::TCC_Free; | ||||||
3933 | |||||||
3934 | if (Imm == 0) | ||||||
3935 | return TTI::TCC_Free; | ||||||
3936 | |||||||
3937 | // Sign-extend all constants to a multiple of 64-bit. | ||||||
3938 | APInt ImmVal = Imm; | ||||||
3939 | if (BitSize % 64 != 0) | ||||||
3940 | ImmVal = Imm.sext(alignTo(BitSize, 64)); | ||||||
3941 | |||||||
3942 | // Split the constant into 64-bit chunks and calculate the cost for each | ||||||
3943 | // chunk. | ||||||
3944 | int Cost = 0; | ||||||
3945 | for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) { | ||||||
3946 | APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64); | ||||||
3947 | int64_t Val = Tmp.getSExtValue(); | ||||||
3948 | Cost += getIntImmCost(Val); | ||||||
3949 | } | ||||||
3950 | // We need at least one instruction to materialize the constant. | ||||||
3951 | return std::max(1, Cost); | ||||||
3952 | } | ||||||
3953 | |||||||
3954 | int X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx, | ||||||
3955 | const APInt &Imm, Type *Ty, | ||||||
3956 | TTI::TargetCostKind CostKind, | ||||||
3957 | Instruction *Inst) { | ||||||
3958 | assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 3958, __PRETTY_FUNCTION__)); | ||||||
3959 | |||||||
3960 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); | ||||||
3961 | // There is no cost model for constants with a bit size of 0. Return TCC_Free | ||||||
3962 | // here, so that constant hoisting will ignore this constant. | ||||||
3963 | if (BitSize == 0) | ||||||
3964 | return TTI::TCC_Free; | ||||||
3965 | |||||||
3966 | unsigned ImmIdx = ~0U; | ||||||
3967 | switch (Opcode) { | ||||||
3968 | default: | ||||||
3969 | return TTI::TCC_Free; | ||||||
3970 | case Instruction::GetElementPtr: | ||||||
3971 | // Always hoist the base address of a GetElementPtr. This prevents the | ||||||
3972 | // creation of new constants for every base constant that gets constant | ||||||
3973 | // folded with the offset. | ||||||
3974 | if (Idx == 0) | ||||||
3975 | return 2 * TTI::TCC_Basic; | ||||||
3976 | return TTI::TCC_Free; | ||||||
3977 | case Instruction::Store: | ||||||
3978 | ImmIdx = 0; | ||||||
3979 | break; | ||||||
3980 | case Instruction::ICmp: | ||||||
3981 | // This is an imperfect hack to prevent constant hoisting of | ||||||
3982 | // compares that might be trying to check if a 64-bit value fits in | ||||||
3983 | // 32-bits. The backend can optimize these cases using a right shift by 32. | ||||||
3984 | // Ideally we would check the compare predicate here. There also other | ||||||
3985 | // similar immediates the backend can use shifts for. | ||||||
3986 | if (Idx == 1 && Imm.getBitWidth() == 64) { | ||||||
3987 | uint64_t ImmVal = Imm.getZExtValue(); | ||||||
3988 | if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff) | ||||||
3989 | return TTI::TCC_Free; | ||||||
3990 | } | ||||||
3991 | ImmIdx = 1; | ||||||
3992 | break; | ||||||
3993 | case Instruction::And: | ||||||
3994 | // We support 64-bit ANDs with immediates with 32-bits of leading zeroes | ||||||
3995 | // by using a 32-bit operation with implicit zero extension. Detect such | ||||||
3996 | // immediates here as the normal path expects bit 31 to be sign extended. | ||||||
3997 | if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue())) | ||||||
3998 | return TTI::TCC_Free; | ||||||
3999 | ImmIdx = 1; | ||||||
4000 | break; | ||||||
4001 | case Instruction::Add: | ||||||
4002 | case Instruction::Sub: | ||||||
4003 | // For add/sub, we can use the opposite instruction for INT32_MIN. | ||||||
4004 | if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000) | ||||||
4005 | return TTI::TCC_Free; | ||||||
4006 | ImmIdx = 1; | ||||||
4007 | break; | ||||||
4008 | case Instruction::UDiv: | ||||||
4009 | case Instruction::SDiv: | ||||||
4010 | case Instruction::URem: | ||||||
4011 | case Instruction::SRem: | ||||||
4012 | // Division by constant is typically expanded later into a different | ||||||
4013 | // instruction sequence. This completely changes the constants. | ||||||
4014 | // Report them as "free" to stop ConstantHoist from marking them as opaque. | ||||||
4015 | return TTI::TCC_Free; | ||||||
4016 | case Instruction::Mul: | ||||||
4017 | case Instruction::Or: | ||||||
4018 | case Instruction::Xor: | ||||||
4019 | ImmIdx = 1; | ||||||
4020 | break; | ||||||
4021 | // Always return TCC_Free for the shift value of a shift instruction. | ||||||
4022 | case Instruction::Shl: | ||||||
4023 | case Instruction::LShr: | ||||||
4024 | case Instruction::AShr: | ||||||
4025 | if (Idx == 1) | ||||||
4026 | return TTI::TCC_Free; | ||||||
4027 | break; | ||||||
4028 | case Instruction::Trunc: | ||||||
4029 | case Instruction::ZExt: | ||||||
4030 | case Instruction::SExt: | ||||||
4031 | case Instruction::IntToPtr: | ||||||
4032 | case Instruction::PtrToInt: | ||||||
4033 | case Instruction::BitCast: | ||||||
4034 | case Instruction::PHI: | ||||||
4035 | case Instruction::Call: | ||||||
4036 | case Instruction::Select: | ||||||
4037 | case Instruction::Ret: | ||||||
4038 | case Instruction::Load: | ||||||
4039 | break; | ||||||
4040 | } | ||||||
4041 | |||||||
4042 | if (Idx == ImmIdx) { | ||||||
4043 | int NumConstants = divideCeil(BitSize, 64); | ||||||
4044 | int Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); | ||||||
4045 | return (Cost <= NumConstants * TTI::TCC_Basic) | ||||||
4046 | ? static_cast<int>(TTI::TCC_Free) | ||||||
4047 | : Cost; | ||||||
4048 | } | ||||||
4049 | |||||||
4050 | return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); | ||||||
4051 | } | ||||||
4052 | |||||||
4053 | int X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx, | ||||||
4054 | const APInt &Imm, Type *Ty, | ||||||
4055 | TTI::TargetCostKind CostKind) { | ||||||
4056 | assert(Ty->isIntegerTy())((Ty->isIntegerTy()) ? static_cast<void> (0) : __assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 4056, __PRETTY_FUNCTION__)); | ||||||
4057 | |||||||
4058 | unsigned BitSize = Ty->getPrimitiveSizeInBits(); | ||||||
4059 | // There is no cost model for constants with a bit size of 0. Return TCC_Free | ||||||
4060 | // here, so that constant hoisting will ignore this constant. | ||||||
4061 | if (BitSize == 0) | ||||||
4062 | return TTI::TCC_Free; | ||||||
4063 | |||||||
4064 | switch (IID) { | ||||||
4065 | default: | ||||||
4066 | return TTI::TCC_Free; | ||||||
4067 | case Intrinsic::sadd_with_overflow: | ||||||
4068 | case Intrinsic::uadd_with_overflow: | ||||||
4069 | case Intrinsic::ssub_with_overflow: | ||||||
4070 | case Intrinsic::usub_with_overflow: | ||||||
4071 | case Intrinsic::smul_with_overflow: | ||||||
4072 | case Intrinsic::umul_with_overflow: | ||||||
4073 | if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue())) | ||||||
4074 | return TTI::TCC_Free; | ||||||
4075 | break; | ||||||
4076 | case Intrinsic::experimental_stackmap: | ||||||
4077 | if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) | ||||||
4078 | return TTI::TCC_Free; | ||||||
4079 | break; | ||||||
4080 | case Intrinsic::experimental_patchpoint_void: | ||||||
4081 | case Intrinsic::experimental_patchpoint_i64: | ||||||
4082 | if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))) | ||||||
4083 | return TTI::TCC_Free; | ||||||
4084 | break; | ||||||
4085 | } | ||||||
4086 | return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind); | ||||||
4087 | } | ||||||
4088 | |||||||
4089 | unsigned X86TTIImpl::getCFInstrCost(unsigned Opcode, | ||||||
4090 | TTI::TargetCostKind CostKind, | ||||||
4091 | const Instruction *I) { | ||||||
4092 | if (CostKind != TTI::TCK_RecipThroughput) | ||||||
4093 | return Opcode == Instruction::PHI ? 0 : 1; | ||||||
4094 | // Branches are assumed to be predicted. | ||||||
4095 | return 0; | ||||||
4096 | } | ||||||
4097 | |||||||
4098 | int X86TTIImpl::getGatherOverhead() const { | ||||||
4099 | // Some CPUs have more overhead for gather. The specified overhead is relative | ||||||
4100 | // to the Load operation. "2" is the number provided by Intel architects. This | ||||||
4101 | // parameter is used for cost estimation of Gather Op and comparison with | ||||||
4102 | // other alternatives. | ||||||
4103 | // TODO: Remove the explicit hasAVX512()?, That would mean we would only | ||||||
4104 | // enable gather with a -march. | ||||||
4105 | if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather())) | ||||||
4106 | return 2; | ||||||
4107 | |||||||
4108 | return 1024; | ||||||
4109 | } | ||||||
4110 | |||||||
4111 | int X86TTIImpl::getScatterOverhead() const { | ||||||
4112 | if (ST->hasAVX512()) | ||||||
4113 | return 2; | ||||||
4114 | |||||||
4115 | return 1024; | ||||||
4116 | } | ||||||
4117 | |||||||
4118 | // Return an average cost of Gather / Scatter instruction, maybe improved later. | ||||||
4119 | // FIXME: Add TargetCostKind support. | ||||||
4120 | InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, | ||||||
4121 | const Value *Ptr, Align Alignment, | ||||||
4122 | unsigned AddressSpace) { | ||||||
4123 | |||||||
4124 | assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost")((isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost" ) ? static_cast<void> (0) : __assert_fail ("isa<VectorType>(SrcVTy) && \"Unexpected type in getGSVectorCost\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 4124, __PRETTY_FUNCTION__)); | ||||||
4125 | unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); | ||||||
4126 | |||||||
4127 | // Try to reduce index size from 64 bit (default for GEP) | ||||||
4128 | // to 32. It is essential for VF 16. If the index can't be reduced to 32, the | ||||||
4129 | // operation will use 16 x 64 indices which do not fit in a zmm and needs | ||||||
4130 | // to split. Also check that the base pointer is the same for all lanes, | ||||||
4131 | // and that there's at most one variable index. | ||||||
4132 | auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) { | ||||||
4133 | unsigned IndexSize = DL.getPointerSizeInBits(); | ||||||
4134 | const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); | ||||||
4135 | if (IndexSize < 64 || !GEP) | ||||||
4136 | return IndexSize; | ||||||
4137 | |||||||
4138 | unsigned NumOfVarIndices = 0; | ||||||
4139 | const Value *Ptrs = GEP->getPointerOperand(); | ||||||
4140 | if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs)) | ||||||
4141 | return IndexSize; | ||||||
4142 | for (unsigned i = 1; i < GEP->getNumOperands(); ++i) { | ||||||
4143 | if (isa<Constant>(GEP->getOperand(i))) | ||||||
4144 | continue; | ||||||
4145 | Type *IndxTy = GEP->getOperand(i)->getType(); | ||||||
4146 | if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy)) | ||||||
4147 | IndxTy = IndexVTy->getElementType(); | ||||||
4148 | if ((IndxTy->getPrimitiveSizeInBits() == 64 && | ||||||
4149 | !isa<SExtInst>(GEP->getOperand(i))) || | ||||||
4150 | ++NumOfVarIndices > 1) | ||||||
4151 | return IndexSize; // 64 | ||||||
4152 | } | ||||||
4153 | return (unsigned)32; | ||||||
4154 | }; | ||||||
4155 | |||||||
4156 | // Trying to reduce IndexSize to 32 bits for vector 16. | ||||||
4157 | // By default the IndexSize is equal to pointer size. | ||||||
4158 | unsigned IndexSize = (ST->hasAVX512() && VF >= 16) | ||||||
4159 | ? getIndexSizeInBits(Ptr, DL) | ||||||
4160 | : DL.getPointerSizeInBits(); | ||||||
4161 | |||||||
4162 | auto *IndexVTy = FixedVectorType::get( | ||||||
4163 | IntegerType::get(SrcVTy->getContext(), IndexSize), VF); | ||||||
4164 | std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy); | ||||||
4165 | std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy); | ||||||
4166 | int SplitFactor = std::max(IdxsLT.first, SrcLT.first); | ||||||
4167 | if (SplitFactor > 1) { | ||||||
4168 | // Handle splitting of vector of pointers | ||||||
4169 | auto *SplitSrcTy = | ||||||
4170 | FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor); | ||||||
4171 | return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment, | ||||||
4172 | AddressSpace); | ||||||
4173 | } | ||||||
4174 | |||||||
4175 | // The gather / scatter cost is given by Intel architects. It is a rough | ||||||
4176 | // number since we are looking at one instruction in a time. | ||||||
4177 | const int GSOverhead = (Opcode == Instruction::Load) | ||||||
4178 | ? getGatherOverhead() | ||||||
4179 | : getScatterOverhead(); | ||||||
4180 | return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), | ||||||
4181 | MaybeAlign(Alignment), AddressSpace, | ||||||
4182 | TTI::TCK_RecipThroughput); | ||||||
4183 | } | ||||||
4184 | |||||||
4185 | /// Return the cost of full scalarization of gather / scatter operation. | ||||||
4186 | /// | ||||||
4187 | /// Opcode - Load or Store instruction. | ||||||
4188 | /// SrcVTy - The type of the data vector that should be gathered or scattered. | ||||||
4189 | /// VariableMask - The mask is non-constant at compile time. | ||||||
4190 | /// Alignment - Alignment for one element. | ||||||
4191 | /// AddressSpace - pointer[s] address space. | ||||||
4192 | /// | ||||||
4193 | /// FIXME: Add TargetCostKind support. | ||||||
4194 | InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy, | ||||||
4195 | bool VariableMask, Align Alignment, | ||||||
4196 | unsigned AddressSpace) { | ||||||
4197 | unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); | ||||||
4198 | APInt DemandedElts = APInt::getAllOnesValue(VF); | ||||||
4199 | TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput; | ||||||
4200 | |||||||
4201 | InstructionCost MaskUnpackCost = 0; | ||||||
4202 | if (VariableMask) { | ||||||
4203 | auto *MaskTy = | ||||||
4204 | FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF); | ||||||
4205 | MaskUnpackCost = | ||||||
4206 | getScalarizationOverhead(MaskTy, DemandedElts, false, true); | ||||||
4207 | InstructionCost ScalarCompareCost = getCmpSelInstrCost( | ||||||
4208 | Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr, | ||||||
4209 | CmpInst::BAD_ICMP_PREDICATE, CostKind); | ||||||
4210 | int BranchCost = getCFInstrCost(Instruction::Br, CostKind); | ||||||
4211 | MaskUnpackCost += VF * (BranchCost + ScalarCompareCost); | ||||||
4212 | } | ||||||
4213 | |||||||
4214 | // The cost of the scalar loads/stores. | ||||||
4215 | InstructionCost MemoryOpCost = | ||||||
4216 | VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(), | ||||||
4217 | MaybeAlign(Alignment), AddressSpace, CostKind); | ||||||
4218 | |||||||
4219 | int InsertExtractCost = 0; | ||||||
4220 | if (Opcode == Instruction::Load) | ||||||
4221 | for (unsigned i = 0; i < VF; ++i) | ||||||
4222 | // Add the cost of inserting each scalar load into the vector | ||||||
4223 | InsertExtractCost += | ||||||
4224 | getVectorInstrCost(Instruction::InsertElement, SrcVTy, i); | ||||||
4225 | else | ||||||
4226 | for (unsigned i = 0; i < VF; ++i) | ||||||
4227 | // Add the cost of extracting each element out of the data vector | ||||||
4228 | InsertExtractCost += | ||||||
4229 | getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i); | ||||||
4230 | |||||||
4231 | return MemoryOpCost + MaskUnpackCost + InsertExtractCost; | ||||||
4232 | } | ||||||
4233 | |||||||
4234 | /// Calculate the cost of Gather / Scatter operation | ||||||
4235 | InstructionCost X86TTIImpl::getGatherScatterOpCost( | ||||||
4236 | unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask, | ||||||
4237 | Align Alignment, TTI::TargetCostKind CostKind, | ||||||
4238 | const Instruction *I = nullptr) { | ||||||
4239 | if (CostKind != TTI::TCK_RecipThroughput) { | ||||||
4240 | if ((Opcode == Instruction::Load && | ||||||
4241 | isLegalMaskedGather(SrcVTy, Align(Alignment))) || | ||||||
4242 | (Opcode == Instruction::Store && | ||||||
4243 | isLegalMaskedScatter(SrcVTy, Align(Alignment)))) | ||||||
4244 | return 1; | ||||||
4245 | return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask, | ||||||
4246 | Alignment, CostKind, I); | ||||||
4247 | } | ||||||
4248 | |||||||
4249 | assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter")((SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter" ) ? static_cast<void> (0) : __assert_fail ("SrcVTy->isVectorTy() && \"Unexpected data type for Gather/Scatter\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 4249, __PRETTY_FUNCTION__)); | ||||||
4250 | unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements(); | ||||||
4251 | PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType()); | ||||||
4252 | if (!PtrTy && Ptr->getType()->isVectorTy()) | ||||||
4253 | PtrTy = dyn_cast<PointerType>( | ||||||
4254 | cast<VectorType>(Ptr->getType())->getElementType()); | ||||||
4255 | assert(PtrTy && "Unexpected type for Ptr argument")((PtrTy && "Unexpected type for Ptr argument") ? static_cast <void> (0) : __assert_fail ("PtrTy && \"Unexpected type for Ptr argument\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 4255, __PRETTY_FUNCTION__)); | ||||||
4256 | unsigned AddressSpace = PtrTy->getAddressSpace(); | ||||||
4257 | |||||||
4258 | bool Scalarize = false; | ||||||
4259 | if ((Opcode == Instruction::Load && | ||||||
4260 | !isLegalMaskedGather(SrcVTy, Align(Alignment))) || | ||||||
4261 | (Opcode == Instruction::Store && | ||||||
4262 | !isLegalMaskedScatter(SrcVTy, Align(Alignment)))) | ||||||
4263 | Scalarize = true; | ||||||
4264 | // Gather / Scatter for vector 2 is not profitable on KNL / SKX | ||||||
4265 | // Vector-4 of gather/scatter instruction does not exist on KNL. | ||||||
4266 | // We can extend it to 8 elements, but zeroing upper bits of | ||||||
4267 | // the mask vector will add more instructions. Right now we give the scalar | ||||||
4268 | // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction | ||||||
4269 | // is better in the VariableMask case. | ||||||
4270 | if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX()))) | ||||||
4271 | Scalarize = true; | ||||||
4272 | |||||||
4273 | if (Scalarize) | ||||||
4274 | return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment, | ||||||
4275 | AddressSpace); | ||||||
4276 | |||||||
4277 | return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace); | ||||||
4278 | } | ||||||
4279 | |||||||
4280 | bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1, | ||||||
4281 | TargetTransformInfo::LSRCost &C2) { | ||||||
4282 | // X86 specific here are "instruction number 1st priority". | ||||||
4283 | return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost, | ||||||
4284 | C1.NumIVMuls, C1.NumBaseAdds, | ||||||
4285 | C1.ScaleCost, C1.ImmCost, C1.SetupCost) < | ||||||
4286 | std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost, | ||||||
4287 | C2.NumIVMuls, C2.NumBaseAdds, | ||||||
4288 | C2.ScaleCost, C2.ImmCost, C2.SetupCost); | ||||||
4289 | } | ||||||
4290 | |||||||
4291 | bool X86TTIImpl::canMacroFuseCmp() { | ||||||
4292 | return ST->hasMacroFusion() || ST->hasBranchFusion(); | ||||||
4293 | } | ||||||
4294 | |||||||
4295 | bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) { | ||||||
4296 | if (!ST->hasAVX()) | ||||||
4297 | return false; | ||||||
4298 | |||||||
4299 | // The backend can't handle a single element vector. | ||||||
4300 | if (isa<VectorType>(DataTy) && | ||||||
4301 | cast<FixedVectorType>(DataTy)->getNumElements() == 1) | ||||||
4302 | return false; | ||||||
4303 | Type *ScalarTy = DataTy->getScalarType(); | ||||||
4304 | |||||||
4305 | if (ScalarTy->isPointerTy()) | ||||||
4306 | return true; | ||||||
4307 | |||||||
4308 | if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) | ||||||
4309 | return true; | ||||||
4310 | |||||||
4311 | if (!ScalarTy->isIntegerTy()) | ||||||
4312 | return false; | ||||||
4313 | |||||||
4314 | unsigned IntWidth = ScalarTy->getIntegerBitWidth(); | ||||||
4315 | return IntWidth == 32 || IntWidth == 64 || | ||||||
4316 | ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI()); | ||||||
4317 | } | ||||||
4318 | |||||||
4319 | bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) { | ||||||
4320 | return isLegalMaskedLoad(DataType, Alignment); | ||||||
4321 | } | ||||||
4322 | |||||||
4323 | bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) { | ||||||
4324 | unsigned DataSize = DL.getTypeStoreSize(DataType); | ||||||
4325 | // The only supported nontemporal loads are for aligned vectors of 16 or 32 | ||||||
4326 | // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2 | ||||||
4327 | // (the equivalent stores only require AVX). | ||||||
4328 | if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32)) | ||||||
4329 | return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2(); | ||||||
4330 | |||||||
4331 | return false; | ||||||
4332 | } | ||||||
4333 | |||||||
4334 | bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) { | ||||||
4335 | unsigned DataSize = DL.getTypeStoreSize(DataType); | ||||||
4336 | |||||||
4337 | // SSE4A supports nontemporal stores of float and double at arbitrary | ||||||
4338 | // alignment. | ||||||
4339 | if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy())) | ||||||
4340 | return true; | ||||||
4341 | |||||||
4342 | // Besides the SSE4A subtarget exception above, only aligned stores are | ||||||
4343 | // available nontemporaly on any other subtarget. And only stores with a size | ||||||
4344 | // of 4..32 bytes (powers of 2, only) are permitted. | ||||||
4345 | if (Alignment < DataSize || DataSize < 4 || DataSize > 32 || | ||||||
4346 | !isPowerOf2_32(DataSize)) | ||||||
4347 | return false; | ||||||
4348 | |||||||
4349 | // 32-byte vector nontemporal stores are supported by AVX (the equivalent | ||||||
4350 | // loads require AVX2). | ||||||
4351 | if (DataSize == 32) | ||||||
4352 | return ST->hasAVX(); | ||||||
4353 | else if (DataSize == 16) | ||||||
4354 | return ST->hasSSE1(); | ||||||
4355 | return true; | ||||||
4356 | } | ||||||
4357 | |||||||
4358 | bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) { | ||||||
4359 | if (!isa<VectorType>(DataTy)) | ||||||
4360 | return false; | ||||||
4361 | |||||||
4362 | if (!ST->hasAVX512()) | ||||||
4363 | return false; | ||||||
4364 | |||||||
4365 | // The backend can't handle a single element vector. | ||||||
4366 | if (cast<FixedVectorType>(DataTy)->getNumElements() == 1) | ||||||
4367 | return false; | ||||||
4368 | |||||||
4369 | Type *ScalarTy = cast<VectorType>(DataTy)->getElementType(); | ||||||
4370 | |||||||
4371 | if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) | ||||||
4372 | return true; | ||||||
4373 | |||||||
4374 | if (!ScalarTy->isIntegerTy()) | ||||||
4375 | return false; | ||||||
4376 | |||||||
4377 | unsigned IntWidth = ScalarTy->getIntegerBitWidth(); | ||||||
4378 | return IntWidth == 32 || IntWidth == 64 || | ||||||
4379 | ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2()); | ||||||
4380 | } | ||||||
4381 | |||||||
4382 | bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) { | ||||||
4383 | return isLegalMaskedExpandLoad(DataTy); | ||||||
4384 | } | ||||||
4385 | |||||||
4386 | bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) { | ||||||
4387 | // Some CPUs have better gather performance than others. | ||||||
4388 | // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only | ||||||
4389 | // enable gather with a -march. | ||||||
4390 | if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2()))) | ||||||
4391 | return false; | ||||||
4392 | |||||||
4393 | // This function is called now in two cases: from the Loop Vectorizer | ||||||
4394 | // and from the Scalarizer. | ||||||
4395 | // When the Loop Vectorizer asks about legality of the feature, | ||||||
4396 | // the vectorization factor is not calculated yet. The Loop Vectorizer | ||||||
4397 | // sends a scalar type and the decision is based on the width of the | ||||||
4398 | // scalar element. | ||||||
4399 | // Later on, the cost model will estimate usage this intrinsic based on | ||||||
4400 | // the vector type. | ||||||
4401 | // The Scalarizer asks again about legality. It sends a vector type. | ||||||
4402 | // In this case we can reject non-power-of-2 vectors. | ||||||
4403 | // We also reject single element vectors as the type legalizer can't | ||||||
4404 | // scalarize it. | ||||||
4405 | if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) { | ||||||
4406 | unsigned NumElts = DataVTy->getNumElements(); | ||||||
4407 | if (NumElts == 1) | ||||||
4408 | return false; | ||||||
4409 | } | ||||||
4410 | Type *ScalarTy = DataTy->getScalarType(); | ||||||
4411 | if (ScalarTy->isPointerTy()) | ||||||
4412 | return true; | ||||||
4413 | |||||||
4414 | if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy()) | ||||||
4415 | return true; | ||||||
4416 | |||||||
4417 | if (!ScalarTy->isIntegerTy()) | ||||||
4418 | return false; | ||||||
4419 | |||||||
4420 | unsigned IntWidth = ScalarTy->getIntegerBitWidth(); | ||||||
4421 | return IntWidth == 32 || IntWidth == 64; | ||||||
4422 | } | ||||||
4423 | |||||||
4424 | bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) { | ||||||
4425 | // AVX2 doesn't support scatter | ||||||
4426 | if (!ST->hasAVX512()) | ||||||
4427 | return false; | ||||||
4428 | return isLegalMaskedGather(DataType, Alignment); | ||||||
4429 | } | ||||||
4430 | |||||||
4431 | bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) { | ||||||
4432 | EVT VT = TLI->getValueType(DL, DataType); | ||||||
4433 | return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT); | ||||||
4434 | } | ||||||
4435 | |||||||
4436 | bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) { | ||||||
4437 | return false; | ||||||
4438 | } | ||||||
4439 | |||||||
4440 | bool X86TTIImpl::areInlineCompatible(const Function *Caller, | ||||||
4441 | const Function *Callee) const { | ||||||
4442 | const TargetMachine &TM = getTLI()->getTargetMachine(); | ||||||
4443 | |||||||
4444 | // Work this as a subsetting of subtarget features. | ||||||
4445 | const FeatureBitset &CallerBits = | ||||||
4446 | TM.getSubtargetImpl(*Caller)->getFeatureBits(); | ||||||
4447 | const FeatureBitset &CalleeBits = | ||||||
4448 | TM.getSubtargetImpl(*Callee)->getFeatureBits(); | ||||||
4449 | |||||||
4450 | FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList; | ||||||
4451 | FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList; | ||||||
4452 | return (RealCallerBits & RealCalleeBits) == RealCalleeBits; | ||||||
4453 | } | ||||||
4454 | |||||||
4455 | bool X86TTIImpl::areFunctionArgsABICompatible( | ||||||
4456 | const Function *Caller, const Function *Callee, | ||||||
4457 | SmallPtrSetImpl<Argument *> &Args) const { | ||||||
4458 | if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args)) | ||||||
4459 | return false; | ||||||
4460 | |||||||
4461 | // If we get here, we know the target features match. If one function | ||||||
4462 | // considers 512-bit vectors legal and the other does not, consider them | ||||||
4463 | // incompatible. | ||||||
4464 | const TargetMachine &TM = getTLI()->getTargetMachine(); | ||||||
4465 | |||||||
4466 | if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() == | ||||||
4467 | TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs()) | ||||||
4468 | return true; | ||||||
4469 | |||||||
4470 | // Consider the arguments compatible if they aren't vectors or aggregates. | ||||||
4471 | // FIXME: Look at the size of vectors. | ||||||
4472 | // FIXME: Look at the element types of aggregates to see if there are vectors. | ||||||
4473 | // FIXME: The API of this function seems intended to allow arguments | ||||||
4474 | // to be removed from the set, but the caller doesn't check if the set | ||||||
4475 | // becomes empty so that may not work in practice. | ||||||
4476 | return llvm::none_of(Args, [](Argument *A) { | ||||||
4477 | auto *EltTy = cast<PointerType>(A->getType())->getElementType(); | ||||||
4478 | return EltTy->isVectorTy() || EltTy->isAggregateType(); | ||||||
4479 | }); | ||||||
4480 | } | ||||||
4481 | |||||||
4482 | X86TTIImpl::TTI::MemCmpExpansionOptions | ||||||
4483 | X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const { | ||||||
4484 | TTI::MemCmpExpansionOptions Options; | ||||||
4485 | Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize); | ||||||
4486 | Options.NumLoadsPerBlock = 2; | ||||||
4487 | // All GPR and vector loads can be unaligned. | ||||||
4488 | Options.AllowOverlappingLoads = true; | ||||||
4489 | if (IsZeroCmp) { | ||||||
4490 | // Only enable vector loads for equality comparison. Right now the vector | ||||||
4491 | // version is not as fast for three way compare (see #33329). | ||||||
4492 | const unsigned PreferredWidth = ST->getPreferVectorWidth(); | ||||||
4493 | if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64); | ||||||
4494 | if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32); | ||||||
4495 | if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16); | ||||||
4496 | } | ||||||
4497 | if (ST->is64Bit()) { | ||||||
4498 | Options.LoadSizes.push_back(8); | ||||||
4499 | } | ||||||
4500 | Options.LoadSizes.push_back(4); | ||||||
4501 | Options.LoadSizes.push_back(2); | ||||||
4502 | Options.LoadSizes.push_back(1); | ||||||
4503 | return Options; | ||||||
4504 | } | ||||||
4505 | |||||||
4506 | bool X86TTIImpl::enableInterleavedAccessVectorization() { | ||||||
4507 | // TODO: We expect this to be beneficial regardless of arch, | ||||||
4508 | // but there are currently some unexplained performance artifacts on Atom. | ||||||
4509 | // As a temporary solution, disable on Atom. | ||||||
4510 | return !(ST->isAtom()); | ||||||
4511 | } | ||||||
4512 | |||||||
4513 | // Get estimation for interleaved load/store operations for AVX2. | ||||||
4514 | // \p Factor is the interleaved-access factor (stride) - number of | ||||||
4515 | // (interleaved) elements in the group. | ||||||
4516 | // \p Indices contains the indices for a strided load: when the | ||||||
4517 | // interleaved load has gaps they indicate which elements are used. | ||||||
4518 | // If Indices is empty (or if the number of indices is equal to the size | ||||||
4519 | // of the interleaved-access as given in \p Factor) the access has no gaps. | ||||||
4520 | // | ||||||
4521 | // As opposed to AVX-512, AVX2 does not have generic shuffles that allow | ||||||
4522 | // computing the cost using a generic formula as a function of generic | ||||||
4523 | // shuffles. We therefore use a lookup table instead, filled according to | ||||||
4524 | // the instruction sequences that codegen currently generates. | ||||||
4525 | InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX2( | ||||||
4526 | unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, | ||||||
4527 | ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, | ||||||
4528 | TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { | ||||||
4529 | |||||||
4530 | if (UseMaskForCond || UseMaskForGaps) | ||||||
4531 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, | ||||||
4532 | Alignment, AddressSpace, CostKind, | ||||||
4533 | UseMaskForCond, UseMaskForGaps); | ||||||
4534 | |||||||
4535 | // We currently Support only fully-interleaved groups, with no gaps. | ||||||
4536 | // TODO: Support also strided loads (interleaved-groups with gaps). | ||||||
4537 | if (Indices.size() && Indices.size() != Factor) | ||||||
4538 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, | ||||||
4539 | Alignment, AddressSpace, | ||||||
4540 | CostKind); | ||||||
4541 | |||||||
4542 | // VecTy for interleave memop is <VF*Factor x Elt>. | ||||||
4543 | // So, for VF=4, Interleave Factor = 3, Element type = i32 we have | ||||||
4544 | // VecTy = <12 x i32>. | ||||||
4545 | MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; | ||||||
4546 | |||||||
4547 | // This function can be called with VecTy=<6xi128>, Factor=3, in which case | ||||||
4548 | // the VF=2, while v2i128 is an unsupported MVT vector type | ||||||
4549 | // (see MachineValueType.h::getVectorVT()). | ||||||
4550 | if (!LegalVT.isVector()) | ||||||
4551 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, | ||||||
4552 | Alignment, AddressSpace, | ||||||
4553 | CostKind); | ||||||
4554 | |||||||
4555 | unsigned VF = VecTy->getNumElements() / Factor; | ||||||
4556 | Type *ScalarTy = VecTy->getElementType(); | ||||||
4557 | |||||||
4558 | // Calculate the number of memory operations (NumOfMemOps), required | ||||||
4559 | // for load/store the VecTy. | ||||||
4560 | unsigned VecTySize = DL.getTypeStoreSize(VecTy); | ||||||
4561 | unsigned LegalVTSize = LegalVT.getStoreSize(); | ||||||
4562 | unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; | ||||||
4563 | |||||||
4564 | // Get the cost of one memory operation. | ||||||
4565 | auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(), | ||||||
4566 | LegalVT.getVectorNumElements()); | ||||||
4567 | InstructionCost MemOpCost = getMemoryOpCost( | ||||||
4568 | Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace, CostKind); | ||||||
4569 | |||||||
4570 | auto *VT = FixedVectorType::get(ScalarTy, VF); | ||||||
4571 | EVT ETy = TLI->getValueType(DL, VT); | ||||||
4572 | if (!ETy.isSimple()) | ||||||
4573 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, | ||||||
4574 | Alignment, AddressSpace, | ||||||
4575 | CostKind); | ||||||
4576 | |||||||
4577 | // TODO: Complete for other data-types and strides. | ||||||
4578 | // Each combination of Stride, ElementTy and VF results in a different | ||||||
4579 | // sequence; The cost tables are therefore accessed with: | ||||||
4580 | // Factor (stride) and VectorType=VFxElemType. | ||||||
4581 | // The Cost accounts only for the shuffle sequence; | ||||||
4582 | // The cost of the loads/stores is accounted for separately. | ||||||
4583 | // | ||||||
4584 | static const CostTblEntry AVX2InterleavedLoadTbl[] = { | ||||||
4585 | { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64 | ||||||
4586 | { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64 | ||||||
4587 | |||||||
4588 | { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8 | ||||||
4589 | { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8 | ||||||
4590 | { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8 | ||||||
4591 | { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8 | ||||||
4592 | { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8 | ||||||
4593 | { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32 | ||||||
4594 | |||||||
4595 | { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8 | ||||||
4596 | { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8 | ||||||
4597 | { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8 | ||||||
4598 | { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8 | ||||||
4599 | { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8 | ||||||
4600 | |||||||
4601 | { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32 | ||||||
4602 | }; | ||||||
4603 | |||||||
4604 | static const CostTblEntry AVX2InterleavedStoreTbl[] = { | ||||||
4605 | { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store) | ||||||
4606 | { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store) | ||||||
4607 | |||||||
4608 | { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store) | ||||||
4609 | { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store) | ||||||
4610 | { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store) | ||||||
4611 | { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store) | ||||||
4612 | { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store) | ||||||
4613 | |||||||
4614 | { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store) | ||||||
4615 | { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store) | ||||||
4616 | { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store) | ||||||
4617 | { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store) | ||||||
4618 | { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store) | ||||||
4619 | }; | ||||||
4620 | |||||||
4621 | if (Opcode == Instruction::Load) { | ||||||
4622 | if (const auto *Entry = | ||||||
4623 | CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT())) | ||||||
4624 | return NumOfMemOps * MemOpCost + Entry->Cost; | ||||||
4625 | } else { | ||||||
4626 | assert(Opcode == Instruction::Store &&((Opcode == Instruction::Store && "Expected Store Instruction at this point" ) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 4627, __PRETTY_FUNCTION__)) | ||||||
4627 | "Expected Store Instruction at this point")((Opcode == Instruction::Store && "Expected Store Instruction at this point" ) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 4627, __PRETTY_FUNCTION__)); | ||||||
4628 | if (const auto *Entry = | ||||||
4629 | CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT())) | ||||||
4630 | return NumOfMemOps * MemOpCost + Entry->Cost; | ||||||
4631 | } | ||||||
4632 | |||||||
4633 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, | ||||||
4634 | Alignment, AddressSpace, CostKind); | ||||||
4635 | } | ||||||
4636 | |||||||
4637 | // Get estimation for interleaved load/store operations and strided load. | ||||||
4638 | // \p Indices contains indices for strided load. | ||||||
4639 | // \p Factor - the factor of interleaving. | ||||||
4640 | // AVX-512 provides 3-src shuffles that significantly reduces the cost. | ||||||
4641 | InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512( | ||||||
4642 | unsigned Opcode, FixedVectorType *VecTy, unsigned Factor, | ||||||
4643 | ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace, | ||||||
4644 | TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) { | ||||||
4645 | |||||||
4646 | if (UseMaskForCond || UseMaskForGaps) | ||||||
4647 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, | ||||||
4648 | Alignment, AddressSpace, CostKind, | ||||||
4649 | UseMaskForCond, UseMaskForGaps); | ||||||
4650 | |||||||
4651 | // VecTy for interleave memop is <VF*Factor x Elt>. | ||||||
4652 | // So, for VF=4, Interleave Factor = 3, Element type = i32 we have | ||||||
4653 | // VecTy = <12 x i32>. | ||||||
4654 | |||||||
4655 | // Calculate the number of memory operations (NumOfMemOps), required | ||||||
4656 | // for load/store the VecTy. | ||||||
4657 | MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second; | ||||||
4658 | unsigned VecTySize = DL.getTypeStoreSize(VecTy); | ||||||
4659 | unsigned LegalVTSize = LegalVT.getStoreSize(); | ||||||
4660 | unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize; | ||||||
4661 | |||||||
4662 | // Get the cost of one memory operation. | ||||||
4663 | auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(), | ||||||
4664 | LegalVT.getVectorNumElements()); | ||||||
4665 | InstructionCost MemOpCost = getMemoryOpCost( | ||||||
4666 | Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace, CostKind); | ||||||
4667 | |||||||
4668 | unsigned VF = VecTy->getNumElements() / Factor; | ||||||
4669 | MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF); | ||||||
4670 | |||||||
4671 | if (Opcode == Instruction::Load) { | ||||||
4672 | // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl) | ||||||
4673 | // contain the cost of the optimized shuffle sequence that the | ||||||
4674 | // X86InterleavedAccess pass will generate. | ||||||
4675 | // The cost of loads and stores are computed separately from the table. | ||||||
4676 | |||||||
4677 | // X86InterleavedAccess support only the following interleaved-access group. | ||||||
4678 | static const CostTblEntry AVX512InterleavedLoadTbl[] = { | ||||||
4679 | {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8 | ||||||
4680 | {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8 | ||||||
4681 | {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8 | ||||||
4682 | }; | ||||||
4683 | |||||||
4684 | if (const auto *Entry = | ||||||
4685 | CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT)) | ||||||
4686 | return NumOfMemOps * MemOpCost + Entry->Cost; | ||||||
4687 | //If an entry does not exist, fallback to the default implementation. | ||||||
4688 | |||||||
4689 | // Kind of shuffle depends on number of loaded values. | ||||||
4690 | // If we load the entire data in one register, we can use a 1-src shuffle. | ||||||
4691 | // Otherwise, we'll merge 2 sources in each operation. | ||||||
4692 | TTI::ShuffleKind ShuffleKind = | ||||||
4693 | (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc; | ||||||
4694 | |||||||
4695 | unsigned ShuffleCost = | ||||||
4696 | getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr); | ||||||
4697 | |||||||
4698 | unsigned NumOfLoadsInInterleaveGrp = | ||||||
4699 | Indices.size() ? Indices.size() : Factor; | ||||||
4700 | auto *ResultTy = FixedVectorType::get(VecTy->getElementType(), | ||||||
4701 | VecTy->getNumElements() / Factor); | ||||||
4702 | unsigned NumOfResults = | ||||||
4703 | getTLI()->getTypeLegalizationCost(DL, ResultTy).first * | ||||||
4704 | NumOfLoadsInInterleaveGrp; | ||||||
4705 | |||||||
4706 | // About a half of the loads may be folded in shuffles when we have only | ||||||
4707 | // one result. If we have more than one result, we do not fold loads at all. | ||||||
4708 | unsigned NumOfUnfoldedLoads = | ||||||
4709 | NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2; | ||||||
4710 | |||||||
4711 | // Get a number of shuffle operations per result. | ||||||
4712 | unsigned NumOfShufflesPerResult = | ||||||
4713 | std::max((unsigned)1, (unsigned)(NumOfMemOps - 1)); | ||||||
4714 | |||||||
4715 | // The SK_MergeTwoSrc shuffle clobbers one of src operands. | ||||||
4716 | // When we have more than one destination, we need additional instructions | ||||||
4717 | // to keep sources. | ||||||
4718 | unsigned NumOfMoves = 0; | ||||||
4719 | if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc) | ||||||
4720 | NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2; | ||||||
4721 | |||||||
4722 | InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost + | ||||||
4723 | NumOfUnfoldedLoads * MemOpCost + NumOfMoves; | ||||||
4724 | |||||||
4725 | return Cost; | ||||||
4726 | } | ||||||
4727 | |||||||
4728 | // Store. | ||||||
4729 | assert(Opcode == Instruction::Store &&((Opcode == Instruction::Store && "Expected Store Instruction at this point" ) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 4730, __PRETTY_FUNCTION__)) | ||||||
4730 | "Expected Store Instruction at this point")((Opcode == Instruction::Store && "Expected Store Instruction at this point" ) ? static_cast<void> (0) : __assert_fail ("Opcode == Instruction::Store && \"Expected Store Instruction at this point\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/lib/Target/X86/X86TargetTransformInfo.cpp" , 4730, __PRETTY_FUNCTION__)); | ||||||
4731 | // X86InterleavedAccess support only the following interleaved-access group. | ||||||
4732 | static const CostTblEntry AVX512InterleavedStoreTbl[] = { | ||||||
4733 | {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store) | ||||||
4734 | {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store) | ||||||
4735 | {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store) | ||||||
4736 | |||||||
4737 | {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store) | ||||||
4738 | {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store) | ||||||
4739 | {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store) | ||||||
4740 | {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store) | ||||||
4741 | }; | ||||||
4742 | |||||||
4743 | if (const auto *Entry = | ||||||
4744 | CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT)) | ||||||
4745 | return NumOfMemOps * MemOpCost + Entry->Cost; | ||||||
4746 | //If an entry does not exist, fallback to the default implementation. | ||||||
4747 | |||||||
4748 | // There is no strided stores meanwhile. And store can't be folded in | ||||||
4749 | // shuffle. | ||||||
4750 | unsigned NumOfSources = Factor; // The number of values to be merged. | ||||||
4751 | unsigned ShuffleCost = | ||||||
4752 | getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr); | ||||||
4753 | unsigned NumOfShufflesPerStore = NumOfSources - 1; | ||||||
4754 | |||||||
4755 | // The SK_MergeTwoSrc shuffle clobbers one of src operands. | ||||||
4756 | // We need additional instructions to keep sources. | ||||||
4757 | unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2; | ||||||
4758 | InstructionCost Cost = | ||||||
4759 | NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) + | ||||||
4760 | NumOfMoves; | ||||||
4761 | return Cost; | ||||||
4762 | } | ||||||
4763 | |||||||
4764 | InstructionCost X86TTIImpl::getInterleavedMemoryOpCost( | ||||||
4765 | unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, | ||||||
4766 | Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, | ||||||
4767 | bool UseMaskForCond, bool UseMaskForGaps) { | ||||||
4768 | auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) { | ||||||
4769 | Type *EltTy = cast<VectorType>(VecTy)->getElementType(); | ||||||
4770 | if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) || | ||||||
4771 | EltTy->isIntegerTy(32) || EltTy->isPointerTy()) | ||||||
4772 | return true; | ||||||
4773 | if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8)) | ||||||
4774 | return HasBW; | ||||||
4775 | return false; | ||||||
4776 | }; | ||||||
4777 | if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI())) | ||||||
4778 | return getInterleavedMemoryOpCostAVX512( | ||||||
4779 | Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment, | ||||||
4780 | AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); | ||||||
4781 | if (ST->hasAVX2()) | ||||||
| |||||||
4782 | return getInterleavedMemoryOpCostAVX2( | ||||||
4783 | Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment, | ||||||
4784 | AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps); | ||||||
4785 | |||||||
4786 | return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices, | ||||||
4787 | Alignment, AddressSpace, CostKind, | ||||||
4788 | UseMaskForCond, UseMaskForGaps); | ||||||
4789 | } |
1 | //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===// | ||||||||||||
2 | // | ||||||||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||||||||
6 | // | ||||||||||||
7 | //===----------------------------------------------------------------------===// | ||||||||||||
8 | // | ||||||||||||
9 | /// \file | ||||||||||||
10 | /// This file provides a helper that implements much of the TTI interface in | ||||||||||||
11 | /// terms of the target-independent code generator and TargetLowering | ||||||||||||
12 | /// interfaces. | ||||||||||||
13 | // | ||||||||||||
14 | //===----------------------------------------------------------------------===// | ||||||||||||
15 | |||||||||||||
16 | #ifndef LLVM_CODEGEN_BASICTTIIMPL_H | ||||||||||||
17 | #define LLVM_CODEGEN_BASICTTIIMPL_H | ||||||||||||
18 | |||||||||||||
19 | #include "llvm/ADT/APInt.h" | ||||||||||||
20 | #include "llvm/ADT/ArrayRef.h" | ||||||||||||
21 | #include "llvm/ADT/BitVector.h" | ||||||||||||
22 | #include "llvm/ADT/SmallPtrSet.h" | ||||||||||||
23 | #include "llvm/ADT/SmallVector.h" | ||||||||||||
24 | #include "llvm/Analysis/LoopInfo.h" | ||||||||||||
25 | #include "llvm/Analysis/TargetTransformInfo.h" | ||||||||||||
26 | #include "llvm/Analysis/TargetTransformInfoImpl.h" | ||||||||||||
27 | #include "llvm/CodeGen/ISDOpcodes.h" | ||||||||||||
28 | #include "llvm/CodeGen/TargetLowering.h" | ||||||||||||
29 | #include "llvm/CodeGen/TargetSubtargetInfo.h" | ||||||||||||
30 | #include "llvm/CodeGen/ValueTypes.h" | ||||||||||||
31 | #include "llvm/IR/BasicBlock.h" | ||||||||||||
32 | #include "llvm/IR/Constant.h" | ||||||||||||
33 | #include "llvm/IR/Constants.h" | ||||||||||||
34 | #include "llvm/IR/DataLayout.h" | ||||||||||||
35 | #include "llvm/IR/DerivedTypes.h" | ||||||||||||
36 | #include "llvm/IR/InstrTypes.h" | ||||||||||||
37 | #include "llvm/IR/Instruction.h" | ||||||||||||
38 | #include "llvm/IR/Instructions.h" | ||||||||||||
39 | #include "llvm/IR/Intrinsics.h" | ||||||||||||
40 | #include "llvm/IR/Operator.h" | ||||||||||||
41 | #include "llvm/IR/Type.h" | ||||||||||||
42 | #include "llvm/IR/Value.h" | ||||||||||||
43 | #include "llvm/Support/Casting.h" | ||||||||||||
44 | #include "llvm/Support/CommandLine.h" | ||||||||||||
45 | #include "llvm/Support/ErrorHandling.h" | ||||||||||||
46 | #include "llvm/Support/MachineValueType.h" | ||||||||||||
47 | #include "llvm/Support/MathExtras.h" | ||||||||||||
48 | #include "llvm/Target/TargetMachine.h" | ||||||||||||
49 | #include <algorithm> | ||||||||||||
50 | #include <cassert> | ||||||||||||
51 | #include <cstdint> | ||||||||||||
52 | #include <limits> | ||||||||||||
53 | #include <utility> | ||||||||||||
54 | |||||||||||||
55 | namespace llvm { | ||||||||||||
56 | |||||||||||||
57 | class Function; | ||||||||||||
58 | class GlobalValue; | ||||||||||||
59 | class LLVMContext; | ||||||||||||
60 | class ScalarEvolution; | ||||||||||||
61 | class SCEV; | ||||||||||||
62 | class TargetMachine; | ||||||||||||
63 | |||||||||||||
64 | extern cl::opt<unsigned> PartialUnrollingThreshold; | ||||||||||||
65 | |||||||||||||
66 | /// Base class which can be used to help build a TTI implementation. | ||||||||||||
67 | /// | ||||||||||||
68 | /// This class provides as much implementation of the TTI interface as is | ||||||||||||
69 | /// possible using the target independent parts of the code generator. | ||||||||||||
70 | /// | ||||||||||||
71 | /// In order to subclass it, your class must implement a getST() method to | ||||||||||||
72 | /// return the subtarget, and a getTLI() method to return the target lowering. | ||||||||||||
73 | /// We need these methods implemented in the derived class so that this class | ||||||||||||
74 | /// doesn't have to duplicate storage for them. | ||||||||||||
75 | template <typename T> | ||||||||||||
76 | class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> { | ||||||||||||
77 | private: | ||||||||||||
78 | using BaseT = TargetTransformInfoImplCRTPBase<T>; | ||||||||||||
79 | using TTI = TargetTransformInfo; | ||||||||||||
80 | |||||||||||||
81 | /// Helper function to access this as a T. | ||||||||||||
82 | T *thisT() { return static_cast<T *>(this); } | ||||||||||||
83 | |||||||||||||
84 | /// Estimate a cost of Broadcast as an extract and sequence of insert | ||||||||||||
85 | /// operations. | ||||||||||||
86 | unsigned getBroadcastShuffleOverhead(FixedVectorType *VTy) { | ||||||||||||
87 | unsigned Cost = 0; | ||||||||||||
88 | // Broadcast cost is equal to the cost of extracting the zero'th element | ||||||||||||
89 | // plus the cost of inserting it into every element of the result vector. | ||||||||||||
90 | Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 0); | ||||||||||||
91 | |||||||||||||
92 | for (int i = 0, e = VTy->getNumElements(); i < e; ++i) { | ||||||||||||
93 | Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i); | ||||||||||||
94 | } | ||||||||||||
95 | return Cost; | ||||||||||||
96 | } | ||||||||||||
97 | |||||||||||||
98 | /// Estimate a cost of shuffle as a sequence of extract and insert | ||||||||||||
99 | /// operations. | ||||||||||||
100 | unsigned getPermuteShuffleOverhead(FixedVectorType *VTy) { | ||||||||||||
101 | unsigned Cost = 0; | ||||||||||||
102 | // Shuffle cost is equal to the cost of extracting element from its argument | ||||||||||||
103 | // plus the cost of inserting them onto the result vector. | ||||||||||||
104 | |||||||||||||
105 | // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from | ||||||||||||
106 | // index 0 of first vector, index 1 of second vector,index 2 of first | ||||||||||||
107 | // vector and finally index 3 of second vector and insert them at index | ||||||||||||
108 | // <0,1,2,3> of result vector. | ||||||||||||
109 | for (int i = 0, e = VTy->getNumElements(); i < e; ++i) { | ||||||||||||
110 | Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i); | ||||||||||||
111 | Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, i); | ||||||||||||
112 | } | ||||||||||||
113 | return Cost; | ||||||||||||
114 | } | ||||||||||||
115 | |||||||||||||
116 | /// Estimate a cost of subvector extraction as a sequence of extract and | ||||||||||||
117 | /// insert operations. | ||||||||||||
118 | unsigned getExtractSubvectorOverhead(VectorType *VTy, int Index, | ||||||||||||
119 | FixedVectorType *SubVTy) { | ||||||||||||
120 | assert(VTy && SubVTy &&((VTy && SubVTy && "Can only extract subvectors from vectors" ) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 121, __PRETTY_FUNCTION__)) | ||||||||||||
121 | "Can only extract subvectors from vectors")((VTy && SubVTy && "Can only extract subvectors from vectors" ) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 121, __PRETTY_FUNCTION__)); | ||||||||||||
122 | int NumSubElts = SubVTy->getNumElements(); | ||||||||||||
123 | assert((!isa<FixedVectorType>(VTy) ||(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && "SK_ExtractSubvector index out of range") ? static_cast<void > (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 126, __PRETTY_FUNCTION__)) | ||||||||||||
124 | (Index + NumSubElts) <=(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && "SK_ExtractSubvector index out of range") ? static_cast<void > (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 126, __PRETTY_FUNCTION__)) | ||||||||||||
125 | (int)cast<FixedVectorType>(VTy)->getNumElements()) &&(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && "SK_ExtractSubvector index out of range") ? static_cast<void > (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 126, __PRETTY_FUNCTION__)) | ||||||||||||
126 | "SK_ExtractSubvector index out of range")(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && "SK_ExtractSubvector index out of range") ? static_cast<void > (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 126, __PRETTY_FUNCTION__)); | ||||||||||||
127 | |||||||||||||
128 | unsigned Cost = 0; | ||||||||||||
129 | // Subvector extraction cost is equal to the cost of extracting element from | ||||||||||||
130 | // the source type plus the cost of inserting them into the result vector | ||||||||||||
131 | // type. | ||||||||||||
132 | for (int i = 0; i != NumSubElts; ++i) { | ||||||||||||
133 | Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, | ||||||||||||
134 | i + Index); | ||||||||||||
135 | Cost += | ||||||||||||
136 | thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy, i); | ||||||||||||
137 | } | ||||||||||||
138 | return Cost; | ||||||||||||
139 | } | ||||||||||||
140 | |||||||||||||
141 | /// Estimate a cost of subvector insertion as a sequence of extract and | ||||||||||||
142 | /// insert operations. | ||||||||||||
143 | unsigned getInsertSubvectorOverhead(VectorType *VTy, int Index, | ||||||||||||
144 | FixedVectorType *SubVTy) { | ||||||||||||
145 | assert(VTy && SubVTy &&((VTy && SubVTy && "Can only insert subvectors into vectors" ) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 146, __PRETTY_FUNCTION__)) | ||||||||||||
146 | "Can only insert subvectors into vectors")((VTy && SubVTy && "Can only insert subvectors into vectors" ) ? static_cast<void> (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 146, __PRETTY_FUNCTION__)); | ||||||||||||
147 | int NumSubElts = SubVTy->getNumElements(); | ||||||||||||
148 | assert((!isa<FixedVectorType>(VTy) ||(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && "SK_InsertSubvector index out of range") ? static_cast<void > (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 151, __PRETTY_FUNCTION__)) | ||||||||||||
149 | (Index + NumSubElts) <=(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && "SK_InsertSubvector index out of range") ? static_cast<void > (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 151, __PRETTY_FUNCTION__)) | ||||||||||||
150 | (int)cast<FixedVectorType>(VTy)->getNumElements()) &&(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && "SK_InsertSubvector index out of range") ? static_cast<void > (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 151, __PRETTY_FUNCTION__)) | ||||||||||||
151 | "SK_InsertSubvector index out of range")(((!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && "SK_InsertSubvector index out of range") ? static_cast<void > (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 151, __PRETTY_FUNCTION__)); | ||||||||||||
152 | |||||||||||||
153 | unsigned Cost = 0; | ||||||||||||
154 | // Subvector insertion cost is equal to the cost of extracting element from | ||||||||||||
155 | // the source type plus the cost of inserting them into the result vector | ||||||||||||
156 | // type. | ||||||||||||
157 | for (int i = 0; i != NumSubElts; ++i) { | ||||||||||||
158 | Cost += | ||||||||||||
159 | thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy, i); | ||||||||||||
160 | Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, | ||||||||||||
161 | i + Index); | ||||||||||||
162 | } | ||||||||||||
163 | return Cost; | ||||||||||||
164 | } | ||||||||||||
165 | |||||||||||||
166 | /// Local query method delegates up to T which *must* implement this! | ||||||||||||
167 | const TargetSubtargetInfo *getST() const { | ||||||||||||
168 | return static_cast<const T *>(this)->getST(); | ||||||||||||
169 | } | ||||||||||||
170 | |||||||||||||
171 | /// Local query method delegates up to T which *must* implement this! | ||||||||||||
172 | const TargetLoweringBase *getTLI() const { | ||||||||||||
173 | return static_cast<const T *>(this)->getTLI(); | ||||||||||||
174 | } | ||||||||||||
175 | |||||||||||||
176 | static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) { | ||||||||||||
177 | switch (M) { | ||||||||||||
178 | case TTI::MIM_Unindexed: | ||||||||||||
179 | return ISD::UNINDEXED; | ||||||||||||
180 | case TTI::MIM_PreInc: | ||||||||||||
181 | return ISD::PRE_INC; | ||||||||||||
182 | case TTI::MIM_PreDec: | ||||||||||||
183 | return ISD::PRE_DEC; | ||||||||||||
184 | case TTI::MIM_PostInc: | ||||||||||||
185 | return ISD::POST_INC; | ||||||||||||
186 | case TTI::MIM_PostDec: | ||||||||||||
187 | return ISD::POST_DEC; | ||||||||||||
188 | } | ||||||||||||
189 | llvm_unreachable("Unexpected MemIndexedMode")::llvm::llvm_unreachable_internal("Unexpected MemIndexedMode" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 189); | ||||||||||||
190 | } | ||||||||||||
191 | |||||||||||||
192 | protected: | ||||||||||||
193 | explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL) | ||||||||||||
194 | : BaseT(DL) {} | ||||||||||||
195 | virtual ~BasicTTIImplBase() = default; | ||||||||||||
196 | |||||||||||||
197 | using TargetTransformInfoImplBase::DL; | ||||||||||||
198 | |||||||||||||
199 | public: | ||||||||||||
200 | /// \name Scalar TTI Implementations | ||||||||||||
201 | /// @{ | ||||||||||||
202 | bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth, | ||||||||||||
203 | unsigned AddressSpace, Align Alignment, | ||||||||||||
204 | bool *Fast) const { | ||||||||||||
205 | EVT E = EVT::getIntegerVT(Context, BitWidth); | ||||||||||||
206 | return getTLI()->allowsMisalignedMemoryAccesses( | ||||||||||||
207 | E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast); | ||||||||||||
208 | } | ||||||||||||
209 | |||||||||||||
210 | bool hasBranchDivergence() { return false; } | ||||||||||||
211 | |||||||||||||
212 | bool useGPUDivergenceAnalysis() { return false; } | ||||||||||||
213 | |||||||||||||
214 | bool isSourceOfDivergence(const Value *V) { return false; } | ||||||||||||
215 | |||||||||||||
216 | bool isAlwaysUniform(const Value *V) { return false; } | ||||||||||||
217 | |||||||||||||
218 | unsigned getFlatAddressSpace() { | ||||||||||||
219 | // Return an invalid address space. | ||||||||||||
220 | return -1; | ||||||||||||
221 | } | ||||||||||||
222 | |||||||||||||
223 | bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes, | ||||||||||||
224 | Intrinsic::ID IID) const { | ||||||||||||
225 | return false; | ||||||||||||
226 | } | ||||||||||||
227 | |||||||||||||
228 | bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const { | ||||||||||||
229 | return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS); | ||||||||||||
230 | } | ||||||||||||
231 | |||||||||||||
232 | unsigned getAssumedAddrSpace(const Value *V) const { | ||||||||||||
233 | return getTLI()->getTargetMachine().getAssumedAddrSpace(V); | ||||||||||||
234 | } | ||||||||||||
235 | |||||||||||||
236 | Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV, | ||||||||||||
237 | Value *NewV) const { | ||||||||||||
238 | return nullptr; | ||||||||||||
239 | } | ||||||||||||
240 | |||||||||||||
241 | bool isLegalAddImmediate(int64_t imm) { | ||||||||||||
242 | return getTLI()->isLegalAddImmediate(imm); | ||||||||||||
243 | } | ||||||||||||
244 | |||||||||||||
245 | bool isLegalICmpImmediate(int64_t imm) { | ||||||||||||
246 | return getTLI()->isLegalICmpImmediate(imm); | ||||||||||||
247 | } | ||||||||||||
248 | |||||||||||||
249 | bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, | ||||||||||||
250 | bool HasBaseReg, int64_t Scale, | ||||||||||||
251 | unsigned AddrSpace, Instruction *I = nullptr) { | ||||||||||||
252 | TargetLoweringBase::AddrMode AM; | ||||||||||||
253 | AM.BaseGV = BaseGV; | ||||||||||||
254 | AM.BaseOffs = BaseOffset; | ||||||||||||
255 | AM.HasBaseReg = HasBaseReg; | ||||||||||||
256 | AM.Scale = Scale; | ||||||||||||
257 | return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I); | ||||||||||||
258 | } | ||||||||||||
259 | |||||||||||||
260 | bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty, | ||||||||||||
261 | const DataLayout &DL) const { | ||||||||||||
262 | EVT VT = getTLI()->getValueType(DL, Ty); | ||||||||||||
263 | return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT); | ||||||||||||
264 | } | ||||||||||||
265 | |||||||||||||
266 | bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty, | ||||||||||||
267 | const DataLayout &DL) const { | ||||||||||||
268 | EVT VT = getTLI()->getValueType(DL, Ty); | ||||||||||||
269 | return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT); | ||||||||||||
270 | } | ||||||||||||
271 | |||||||||||||
272 | bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) { | ||||||||||||
273 | return TargetTransformInfoImplBase::isLSRCostLess(C1, C2); | ||||||||||||
274 | } | ||||||||||||
275 | |||||||||||||
276 | bool isNumRegsMajorCostOfLSR() { | ||||||||||||
277 | return TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR(); | ||||||||||||
278 | } | ||||||||||||
279 | |||||||||||||
280 | bool isProfitableLSRChainElement(Instruction *I) { | ||||||||||||
281 | return TargetTransformInfoImplBase::isProfitableLSRChainElement(I); | ||||||||||||
282 | } | ||||||||||||
283 | |||||||||||||
284 | int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset, | ||||||||||||
285 | bool HasBaseReg, int64_t Scale, unsigned AddrSpace) { | ||||||||||||
286 | TargetLoweringBase::AddrMode AM; | ||||||||||||
287 | AM.BaseGV = BaseGV; | ||||||||||||
288 | AM.BaseOffs = BaseOffset; | ||||||||||||
289 | AM.HasBaseReg = HasBaseReg; | ||||||||||||
290 | AM.Scale = Scale; | ||||||||||||
291 | return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace); | ||||||||||||
292 | } | ||||||||||||
293 | |||||||||||||
294 | bool isTruncateFree(Type *Ty1, Type *Ty2) { | ||||||||||||
295 | return getTLI()->isTruncateFree(Ty1, Ty2); | ||||||||||||
296 | } | ||||||||||||
297 | |||||||||||||
298 | bool isProfitableToHoist(Instruction *I) { | ||||||||||||
299 | return getTLI()->isProfitableToHoist(I); | ||||||||||||
300 | } | ||||||||||||
301 | |||||||||||||
302 | bool useAA() const { return getST()->useAA(); } | ||||||||||||
303 | |||||||||||||
304 | bool isTypeLegal(Type *Ty) { | ||||||||||||
305 | EVT VT = getTLI()->getValueType(DL, Ty); | ||||||||||||
306 | return getTLI()->isTypeLegal(VT); | ||||||||||||
307 | } | ||||||||||||
308 | |||||||||||||
309 | unsigned getRegUsageForType(Type *Ty) { | ||||||||||||
310 | return getTLI()->getTypeLegalizationCost(DL, Ty).first; | ||||||||||||
311 | } | ||||||||||||
312 | |||||||||||||
313 | int getGEPCost(Type *PointeeType, const Value *Ptr, | ||||||||||||
314 | ArrayRef<const Value *> Operands) { | ||||||||||||
315 | return BaseT::getGEPCost(PointeeType, Ptr, Operands); | ||||||||||||
316 | } | ||||||||||||
317 | |||||||||||||
318 | unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI, | ||||||||||||
319 | unsigned &JumpTableSize, | ||||||||||||
320 | ProfileSummaryInfo *PSI, | ||||||||||||
321 | BlockFrequencyInfo *BFI) { | ||||||||||||
322 | /// Try to find the estimated number of clusters. Note that the number of | ||||||||||||
323 | /// clusters identified in this function could be different from the actual | ||||||||||||
324 | /// numbers found in lowering. This function ignore switches that are | ||||||||||||
325 | /// lowered with a mix of jump table / bit test / BTree. This function was | ||||||||||||
326 | /// initially intended to be used when estimating the cost of switch in | ||||||||||||
327 | /// inline cost heuristic, but it's a generic cost model to be used in other | ||||||||||||
328 | /// places (e.g., in loop unrolling). | ||||||||||||
329 | unsigned N = SI.getNumCases(); | ||||||||||||
330 | const TargetLoweringBase *TLI = getTLI(); | ||||||||||||
331 | const DataLayout &DL = this->getDataLayout(); | ||||||||||||
332 | |||||||||||||
333 | JumpTableSize = 0; | ||||||||||||
334 | bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent()); | ||||||||||||
335 | |||||||||||||
336 | // Early exit if both a jump table and bit test are not allowed. | ||||||||||||
337 | if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N)) | ||||||||||||
338 | return N; | ||||||||||||
339 | |||||||||||||
340 | APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue(); | ||||||||||||
341 | APInt MinCaseVal = MaxCaseVal; | ||||||||||||
342 | for (auto CI : SI.cases()) { | ||||||||||||
343 | const APInt &CaseVal = CI.getCaseValue()->getValue(); | ||||||||||||
344 | if (CaseVal.sgt(MaxCaseVal)) | ||||||||||||
345 | MaxCaseVal = CaseVal; | ||||||||||||
346 | if (CaseVal.slt(MinCaseVal)) | ||||||||||||
347 | MinCaseVal = CaseVal; | ||||||||||||
348 | } | ||||||||||||
349 | |||||||||||||
350 | // Check if suitable for a bit test | ||||||||||||
351 | if (N <= DL.getIndexSizeInBits(0u)) { | ||||||||||||
352 | SmallPtrSet<const BasicBlock *, 4> Dests; | ||||||||||||
353 | for (auto I : SI.cases()) | ||||||||||||
354 | Dests.insert(I.getCaseSuccessor()); | ||||||||||||
355 | |||||||||||||
356 | if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal, | ||||||||||||
357 | DL)) | ||||||||||||
358 | return 1; | ||||||||||||
359 | } | ||||||||||||
360 | |||||||||||||
361 | // Check if suitable for a jump table. | ||||||||||||
362 | if (IsJTAllowed) { | ||||||||||||
363 | if (N < 2 || N < TLI->getMinimumJumpTableEntries()) | ||||||||||||
364 | return N; | ||||||||||||
365 | uint64_t Range = | ||||||||||||
366 | (MaxCaseVal - MinCaseVal) | ||||||||||||
367 | .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1; | ||||||||||||
368 | // Check whether a range of clusters is dense enough for a jump table | ||||||||||||
369 | if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) { | ||||||||||||
370 | JumpTableSize = Range; | ||||||||||||
371 | return 1; | ||||||||||||
372 | } | ||||||||||||
373 | } | ||||||||||||
374 | return N; | ||||||||||||
375 | } | ||||||||||||
376 | |||||||||||||
377 | bool shouldBuildLookupTables() { | ||||||||||||
378 | const TargetLoweringBase *TLI = getTLI(); | ||||||||||||
379 | return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) || | ||||||||||||
380 | TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other); | ||||||||||||
381 | } | ||||||||||||
382 | |||||||||||||
383 | bool shouldBuildRelLookupTables() { | ||||||||||||
384 | const TargetMachine &TM = getTLI()->getTargetMachine(); | ||||||||||||
385 | // If non-PIC mode, do not generate a relative lookup table. | ||||||||||||
386 | if (!TM.isPositionIndependent()) | ||||||||||||
387 | return false; | ||||||||||||
388 | |||||||||||||
389 | /// Relative lookup table entries consist of 32-bit offsets. | ||||||||||||
390 | /// Do not generate relative lookup tables for large code models | ||||||||||||
391 | /// in 64-bit achitectures where 32-bit offsets might not be enough. | ||||||||||||
392 | if (TM.getCodeModel() == CodeModel::Medium || | ||||||||||||
393 | TM.getCodeModel() == CodeModel::Large) | ||||||||||||
394 | return false; | ||||||||||||
395 | |||||||||||||
396 | Triple TargetTriple = TM.getTargetTriple(); | ||||||||||||
397 | if (!TargetTriple.isArch64Bit()) | ||||||||||||
398 | return false; | ||||||||||||
399 | |||||||||||||
400 | // TODO: Triggers an issue in aarch64, so temporarily disable it. | ||||||||||||
401 | // See https://reviews.llvm.org/D99572 for more information. | ||||||||||||
402 | if (TargetTriple.getArch() == Triple::aarch64) | ||||||||||||
403 | return false; | ||||||||||||
404 | |||||||||||||
405 | return true; | ||||||||||||
406 | } | ||||||||||||
407 | |||||||||||||
408 | bool haveFastSqrt(Type *Ty) { | ||||||||||||
409 | const TargetLoweringBase *TLI = getTLI(); | ||||||||||||
410 | EVT VT = TLI->getValueType(DL, Ty); | ||||||||||||
411 | return TLI->isTypeLegal(VT) && | ||||||||||||
412 | TLI->isOperationLegalOrCustom(ISD::FSQRT, VT); | ||||||||||||
413 | } | ||||||||||||
414 | |||||||||||||
415 | bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { | ||||||||||||
416 | return true; | ||||||||||||
417 | } | ||||||||||||
418 | |||||||||||||
419 | unsigned getFPOpCost(Type *Ty) { | ||||||||||||
420 | // Check whether FADD is available, as a proxy for floating-point in | ||||||||||||
421 | // general. | ||||||||||||
422 | const TargetLoweringBase *TLI = getTLI(); | ||||||||||||
423 | EVT VT = TLI->getValueType(DL, Ty); | ||||||||||||
424 | if (TLI->isOperationLegalOrCustomOrPromote(ISD::FADD, VT)) | ||||||||||||
425 | return TargetTransformInfo::TCC_Basic; | ||||||||||||
426 | return TargetTransformInfo::TCC_Expensive; | ||||||||||||
427 | } | ||||||||||||
428 | |||||||||||||
429 | unsigned getInliningThresholdMultiplier() { return 1; } | ||||||||||||
430 | unsigned adjustInliningThreshold(const CallBase *CB) { return 0; } | ||||||||||||
431 | |||||||||||||
432 | int getInlinerVectorBonusPercent() { return 150; } | ||||||||||||
433 | |||||||||||||
434 | void getUnrollingPreferences(Loop *L, ScalarEvolution &SE, | ||||||||||||
435 | TTI::UnrollingPreferences &UP) { | ||||||||||||
436 | // This unrolling functionality is target independent, but to provide some | ||||||||||||
437 | // motivation for its intended use, for x86: | ||||||||||||
438 | |||||||||||||
439 | // According to the Intel 64 and IA-32 Architectures Optimization Reference | ||||||||||||
440 | // Manual, Intel Core models and later have a loop stream detector (and | ||||||||||||
441 | // associated uop queue) that can benefit from partial unrolling. | ||||||||||||
442 | // The relevant requirements are: | ||||||||||||
443 | // - The loop must have no more than 4 (8 for Nehalem and later) branches | ||||||||||||
444 | // taken, and none of them may be calls. | ||||||||||||
445 | // - The loop can have no more than 18 (28 for Nehalem and later) uops. | ||||||||||||
446 | |||||||||||||
447 | // According to the Software Optimization Guide for AMD Family 15h | ||||||||||||
448 | // Processors, models 30h-4fh (Steamroller and later) have a loop predictor | ||||||||||||
449 | // and loop buffer which can benefit from partial unrolling. | ||||||||||||
450 | // The relevant requirements are: | ||||||||||||
451 | // - The loop must have fewer than 16 branches | ||||||||||||
452 | // - The loop must have less than 40 uops in all executed loop branches | ||||||||||||
453 | |||||||||||||
454 | // The number of taken branches in a loop is hard to estimate here, and | ||||||||||||
455 | // benchmarking has revealed that it is better not to be conservative when | ||||||||||||
456 | // estimating the branch count. As a result, we'll ignore the branch limits | ||||||||||||
457 | // until someone finds a case where it matters in practice. | ||||||||||||
458 | |||||||||||||
459 | unsigned MaxOps; | ||||||||||||
460 | const TargetSubtargetInfo *ST = getST(); | ||||||||||||
461 | if (PartialUnrollingThreshold.getNumOccurrences() > 0) | ||||||||||||
462 | MaxOps = PartialUnrollingThreshold; | ||||||||||||
463 | else if (ST->getSchedModel().LoopMicroOpBufferSize > 0) | ||||||||||||
464 | MaxOps = ST->getSchedModel().LoopMicroOpBufferSize; | ||||||||||||
465 | else | ||||||||||||
466 | return; | ||||||||||||
467 | |||||||||||||
468 | // Scan the loop: don't unroll loops with calls. | ||||||||||||
469 | for (BasicBlock *BB : L->blocks()) { | ||||||||||||
470 | for (Instruction &I : *BB) { | ||||||||||||
471 | if (isa<CallInst>(I) || isa<InvokeInst>(I)) { | ||||||||||||
472 | if (const Function *F = cast<CallBase>(I).getCalledFunction()) { | ||||||||||||
473 | if (!thisT()->isLoweredToCall(F)) | ||||||||||||
474 | continue; | ||||||||||||
475 | } | ||||||||||||
476 | |||||||||||||
477 | return; | ||||||||||||
478 | } | ||||||||||||
479 | } | ||||||||||||
480 | } | ||||||||||||
481 | |||||||||||||
482 | // Enable runtime and partial unrolling up to the specified size. | ||||||||||||
483 | // Enable using trip count upper bound to unroll loops. | ||||||||||||
484 | UP.Partial = UP.Runtime = UP.UpperBound = true; | ||||||||||||
485 | UP.PartialThreshold = MaxOps; | ||||||||||||
486 | |||||||||||||
487 | // Avoid unrolling when optimizing for size. | ||||||||||||
488 | UP.OptSizeThreshold = 0; | ||||||||||||
489 | UP.PartialOptSizeThreshold = 0; | ||||||||||||
490 | |||||||||||||
491 | // Set number of instructions optimized when "back edge" | ||||||||||||
492 | // becomes "fall through" to default value of 2. | ||||||||||||
493 | UP.BEInsns = 2; | ||||||||||||
494 | } | ||||||||||||
495 | |||||||||||||
496 | void getPeelingPreferences(Loop *L, ScalarEvolution &SE, | ||||||||||||
497 | TTI::PeelingPreferences &PP) { | ||||||||||||
498 | PP.PeelCount = 0; | ||||||||||||
499 | PP.AllowPeeling = true; | ||||||||||||
500 | PP.AllowLoopNestsPeeling = false; | ||||||||||||
501 | PP.PeelProfiledIterations = true; | ||||||||||||
502 | } | ||||||||||||
503 | |||||||||||||
504 | bool isHardwareLoopProfitable(Loop *L, ScalarEvolution &SE, | ||||||||||||
505 | AssumptionCache &AC, | ||||||||||||
506 | TargetLibraryInfo *LibInfo, | ||||||||||||
507 | HardwareLoopInfo &HWLoopInfo) { | ||||||||||||
508 | return BaseT::isHardwareLoopProfitable(L, SE, AC, LibInfo, HWLoopInfo); | ||||||||||||
509 | } | ||||||||||||
510 | |||||||||||||
511 | bool preferPredicateOverEpilogue(Loop *L, LoopInfo *LI, ScalarEvolution &SE, | ||||||||||||
512 | AssumptionCache &AC, TargetLibraryInfo *TLI, | ||||||||||||
513 | DominatorTree *DT, | ||||||||||||
514 | const LoopAccessInfo *LAI) { | ||||||||||||
515 | return BaseT::preferPredicateOverEpilogue(L, LI, SE, AC, TLI, DT, LAI); | ||||||||||||
516 | } | ||||||||||||
517 | |||||||||||||
518 | bool emitGetActiveLaneMask() { | ||||||||||||
519 | return BaseT::emitGetActiveLaneMask(); | ||||||||||||
520 | } | ||||||||||||
521 | |||||||||||||
522 | Optional<Instruction *> instCombineIntrinsic(InstCombiner &IC, | ||||||||||||
523 | IntrinsicInst &II) { | ||||||||||||
524 | return BaseT::instCombineIntrinsic(IC, II); | ||||||||||||
525 | } | ||||||||||||
526 | |||||||||||||
527 | Optional<Value *> simplifyDemandedUseBitsIntrinsic(InstCombiner &IC, | ||||||||||||
528 | IntrinsicInst &II, | ||||||||||||
529 | APInt DemandedMask, | ||||||||||||
530 | KnownBits &Known, | ||||||||||||
531 | bool &KnownBitsComputed) { | ||||||||||||
532 | return BaseT::simplifyDemandedUseBitsIntrinsic(IC, II, DemandedMask, Known, | ||||||||||||
533 | KnownBitsComputed); | ||||||||||||
534 | } | ||||||||||||
535 | |||||||||||||
536 | Optional<Value *> simplifyDemandedVectorEltsIntrinsic( | ||||||||||||
537 | InstCombiner &IC, IntrinsicInst &II, APInt DemandedElts, APInt &UndefElts, | ||||||||||||
538 | APInt &UndefElts2, APInt &UndefElts3, | ||||||||||||
539 | std::function<void(Instruction *, unsigned, APInt, APInt &)> | ||||||||||||
540 | SimplifyAndSetOp) { | ||||||||||||
541 | return BaseT::simplifyDemandedVectorEltsIntrinsic( | ||||||||||||
542 | IC, II, DemandedElts, UndefElts, UndefElts2, UndefElts3, | ||||||||||||
543 | SimplifyAndSetOp); | ||||||||||||
544 | } | ||||||||||||
545 | |||||||||||||
546 | InstructionCost getInstructionLatency(const Instruction *I) { | ||||||||||||
547 | if (isa<LoadInst>(I)) | ||||||||||||
548 | return getST()->getSchedModel().DefaultLoadLatency; | ||||||||||||
549 | |||||||||||||
550 | return BaseT::getInstructionLatency(I); | ||||||||||||
551 | } | ||||||||||||
552 | |||||||||||||
553 | virtual Optional<unsigned> | ||||||||||||
554 | getCacheSize(TargetTransformInfo::CacheLevel Level) const { | ||||||||||||
555 | return Optional<unsigned>( | ||||||||||||
556 | getST()->getCacheSize(static_cast<unsigned>(Level))); | ||||||||||||
557 | } | ||||||||||||
558 | |||||||||||||
559 | virtual Optional<unsigned> | ||||||||||||
560 | getCacheAssociativity(TargetTransformInfo::CacheLevel Level) const { | ||||||||||||
561 | Optional<unsigned> TargetResult = | ||||||||||||
562 | getST()->getCacheAssociativity(static_cast<unsigned>(Level)); | ||||||||||||
563 | |||||||||||||
564 | if (TargetResult) | ||||||||||||
565 | return TargetResult; | ||||||||||||
566 | |||||||||||||
567 | return BaseT::getCacheAssociativity(Level); | ||||||||||||
568 | } | ||||||||||||
569 | |||||||||||||
570 | virtual unsigned getCacheLineSize() const { | ||||||||||||
571 | return getST()->getCacheLineSize(); | ||||||||||||
572 | } | ||||||||||||
573 | |||||||||||||
574 | virtual unsigned getPrefetchDistance() const { | ||||||||||||
575 | return getST()->getPrefetchDistance(); | ||||||||||||
576 | } | ||||||||||||
577 | |||||||||||||
578 | virtual unsigned getMinPrefetchStride(unsigned NumMemAccesses, | ||||||||||||
579 | unsigned NumStridedMemAccesses, | ||||||||||||
580 | unsigned NumPrefetches, | ||||||||||||
581 | bool HasCall) const { | ||||||||||||
582 | return getST()->getMinPrefetchStride(NumMemAccesses, NumStridedMemAccesses, | ||||||||||||
583 | NumPrefetches, HasCall); | ||||||||||||
584 | } | ||||||||||||
585 | |||||||||||||
586 | virtual unsigned getMaxPrefetchIterationsAhead() const { | ||||||||||||
587 | return getST()->getMaxPrefetchIterationsAhead(); | ||||||||||||
588 | } | ||||||||||||
589 | |||||||||||||
590 | virtual bool enableWritePrefetching() const { | ||||||||||||
591 | return getST()->enableWritePrefetching(); | ||||||||||||
592 | } | ||||||||||||
593 | |||||||||||||
594 | /// @} | ||||||||||||
595 | |||||||||||||
596 | /// \name Vector TTI Implementations | ||||||||||||
597 | /// @{ | ||||||||||||
598 | |||||||||||||
599 | TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const { | ||||||||||||
600 | return TypeSize::getFixed(32); | ||||||||||||
601 | } | ||||||||||||
602 | |||||||||||||
603 | Optional<unsigned> getMaxVScale() const { return None; } | ||||||||||||
604 | |||||||||||||
605 | /// Estimate the overhead of scalarizing an instruction. Insert and Extract | ||||||||||||
606 | /// are set if the demanded result elements need to be inserted and/or | ||||||||||||
607 | /// extracted from vectors. | ||||||||||||
608 | unsigned getScalarizationOverhead(VectorType *InTy, const APInt &DemandedElts, | ||||||||||||
609 | bool Insert, bool Extract) { | ||||||||||||
610 | /// FIXME: a bitfield is not a reasonable abstraction for talking about | ||||||||||||
611 | /// which elements are needed from a scalable vector | ||||||||||||
612 | auto *Ty = cast<FixedVectorType>(InTy); | ||||||||||||
613 | |||||||||||||
614 | assert(DemandedElts.getBitWidth() == Ty->getNumElements() &&((DemandedElts.getBitWidth() == Ty->getNumElements() && "Vector size mismatch") ? static_cast<void> (0) : __assert_fail ("DemandedElts.getBitWidth() == Ty->getNumElements() && \"Vector size mismatch\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 615, __PRETTY_FUNCTION__)) | ||||||||||||
615 | "Vector size mismatch")((DemandedElts.getBitWidth() == Ty->getNumElements() && "Vector size mismatch") ? static_cast<void> (0) : __assert_fail ("DemandedElts.getBitWidth() == Ty->getNumElements() && \"Vector size mismatch\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 615, __PRETTY_FUNCTION__)); | ||||||||||||
616 | |||||||||||||
617 | unsigned Cost = 0; | ||||||||||||
618 | |||||||||||||
619 | for (int i = 0, e = Ty->getNumElements(); i < e; ++i) { | ||||||||||||
620 | if (!DemandedElts[i]) | ||||||||||||
621 | continue; | ||||||||||||
622 | if (Insert) | ||||||||||||
623 | Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, Ty, i); | ||||||||||||
624 | if (Extract) | ||||||||||||
625 | Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, Ty, i); | ||||||||||||
626 | } | ||||||||||||
627 | |||||||||||||
628 | return Cost; | ||||||||||||
629 | } | ||||||||||||
630 | |||||||||||||
631 | /// Helper wrapper for the DemandedElts variant of getScalarizationOverhead. | ||||||||||||
632 | unsigned getScalarizationOverhead(VectorType *InTy, bool Insert, | ||||||||||||
633 | bool Extract) { | ||||||||||||
634 | auto *Ty = cast<FixedVectorType>(InTy); | ||||||||||||
635 | |||||||||||||
636 | APInt DemandedElts = APInt::getAllOnesValue(Ty->getNumElements()); | ||||||||||||
637 | return thisT()->getScalarizationOverhead(Ty, DemandedElts, Insert, Extract); | ||||||||||||
638 | } | ||||||||||||
639 | |||||||||||||
640 | /// Estimate the overhead of scalarizing an instructions unique | ||||||||||||
641 | /// non-constant operands. The (potentially vector) types to use for each of | ||||||||||||
642 | /// argument are passes via Tys. | ||||||||||||
643 | unsigned getOperandsScalarizationOverhead(ArrayRef<const Value *> Args, | ||||||||||||
644 | ArrayRef<Type *> Tys) { | ||||||||||||
645 | assert(Args.size() == Tys.size() && "Expected matching Args and Tys")((Args.size() == Tys.size() && "Expected matching Args and Tys" ) ? static_cast<void> (0) : __assert_fail ("Args.size() == Tys.size() && \"Expected matching Args and Tys\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 645, __PRETTY_FUNCTION__)); | ||||||||||||
646 | |||||||||||||
647 | unsigned Cost = 0; | ||||||||||||
648 | SmallPtrSet<const Value*, 4> UniqueOperands; | ||||||||||||
649 | for (int I = 0, E = Args.size(); I != E; I++) { | ||||||||||||
650 | // Disregard things like metadata arguments. | ||||||||||||
651 | const Value *A = Args[I]; | ||||||||||||
652 | Type *Ty = Tys[I]; | ||||||||||||
653 | if (!Ty->isIntOrIntVectorTy() && !Ty->isFPOrFPVectorTy() && | ||||||||||||
654 | !Ty->isPtrOrPtrVectorTy()) | ||||||||||||
655 | continue; | ||||||||||||
656 | |||||||||||||
657 | if (!isa<Constant>(A) && UniqueOperands.insert(A).second) { | ||||||||||||
658 | if (auto *VecTy = dyn_cast<VectorType>(Ty)) | ||||||||||||
659 | Cost += getScalarizationOverhead(VecTy, false, true); | ||||||||||||
660 | } | ||||||||||||
661 | } | ||||||||||||
662 | |||||||||||||
663 | return Cost; | ||||||||||||
664 | } | ||||||||||||
665 | |||||||||||||
666 | /// Estimate the overhead of scalarizing the inputs and outputs of an | ||||||||||||
667 | /// instruction, with return type RetTy and arguments Args of type Tys. If | ||||||||||||
668 | /// Args are unknown (empty), then the cost associated with one argument is | ||||||||||||
669 | /// added as a heuristic. | ||||||||||||
670 | unsigned getScalarizationOverhead(VectorType *RetTy, | ||||||||||||
671 | ArrayRef<const Value *> Args, | ||||||||||||
672 | ArrayRef<Type *> Tys) { | ||||||||||||
673 | unsigned Cost = 0; | ||||||||||||
674 | |||||||||||||
675 | Cost += getScalarizationOverhead(RetTy, true, false); | ||||||||||||
676 | if (!Args.empty()) | ||||||||||||
677 | Cost += getOperandsScalarizationOverhead(Args, Tys); | ||||||||||||
678 | else | ||||||||||||
679 | // When no information on arguments is provided, we add the cost | ||||||||||||
680 | // associated with one argument as a heuristic. | ||||||||||||
681 | Cost += getScalarizationOverhead(RetTy, false, true); | ||||||||||||
682 | |||||||||||||
683 | return Cost; | ||||||||||||
684 | } | ||||||||||||
685 | |||||||||||||
686 | unsigned getMaxInterleaveFactor(unsigned VF) { return 1; } | ||||||||||||
687 | |||||||||||||
688 | unsigned getArithmeticInstrCost( | ||||||||||||
689 | unsigned Opcode, Type *Ty, | ||||||||||||
690 | TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput, | ||||||||||||
691 | TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue, | ||||||||||||
692 | TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue, | ||||||||||||
693 | TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None, | ||||||||||||
694 | TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None, | ||||||||||||
695 | ArrayRef<const Value *> Args = ArrayRef<const Value *>(), | ||||||||||||
696 | const Instruction *CxtI = nullptr) { | ||||||||||||
697 | // Check if any of the operands are vector operands. | ||||||||||||
698 | const TargetLoweringBase *TLI = getTLI(); | ||||||||||||
699 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | ||||||||||||
700 | assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> ( 0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 700, __PRETTY_FUNCTION__)); | ||||||||||||
701 | |||||||||||||
702 | // TODO: Handle more cost kinds. | ||||||||||||
703 | if (CostKind != TTI::TCK_RecipThroughput) | ||||||||||||
704 | return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, | ||||||||||||
705 | Opd1Info, Opd2Info, | ||||||||||||
706 | Opd1PropInfo, Opd2PropInfo, | ||||||||||||
707 | Args, CxtI); | ||||||||||||
708 | |||||||||||||
709 | std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty); | ||||||||||||
710 | |||||||||||||
711 | bool IsFloat = Ty->isFPOrFPVectorTy(); | ||||||||||||
712 | // Assume that floating point arithmetic operations cost twice as much as | ||||||||||||
713 | // integer operations. | ||||||||||||
714 | unsigned OpCost = (IsFloat ? 2 : 1); | ||||||||||||
715 | |||||||||||||
716 | if (TLI->isOperationLegalOrPromote(ISD, LT.second)) { | ||||||||||||
717 | // The operation is legal. Assume it costs 1. | ||||||||||||
718 | // TODO: Once we have extract/insert subvector cost we need to use them. | ||||||||||||
719 | return LT.first * OpCost; | ||||||||||||
720 | } | ||||||||||||
721 | |||||||||||||
722 | if (!TLI->isOperationExpand(ISD, LT.second)) { | ||||||||||||
723 | // If the operation is custom lowered, then assume that the code is twice | ||||||||||||
724 | // as expensive. | ||||||||||||
725 | return LT.first * 2 * OpCost; | ||||||||||||
726 | } | ||||||||||||
727 | |||||||||||||
728 | // Else, assume that we need to scalarize this op. | ||||||||||||
729 | // TODO: If one of the types get legalized by splitting, handle this | ||||||||||||
730 | // similarly to what getCastInstrCost() does. | ||||||||||||
731 | if (auto *VTy = dyn_cast<VectorType>(Ty)) { | ||||||||||||
732 | unsigned Num = cast<FixedVectorType>(VTy)->getNumElements(); | ||||||||||||
733 | unsigned Cost = thisT()->getArithmeticInstrCost( | ||||||||||||
734 | Opcode, VTy->getScalarType(), CostKind, Opd1Info, Opd2Info, | ||||||||||||
735 | Opd1PropInfo, Opd2PropInfo, Args, CxtI); | ||||||||||||
736 | // Return the cost of multiple scalar invocation plus the cost of | ||||||||||||
737 | // inserting and extracting the values. | ||||||||||||
738 | SmallVector<Type *> Tys(Args.size(), Ty); | ||||||||||||
739 | return getScalarizationOverhead(VTy, Args, Tys) + Num * Cost; | ||||||||||||
740 | } | ||||||||||||
741 | |||||||||||||
742 | // We don't know anything about this scalar instruction. | ||||||||||||
743 | return OpCost; | ||||||||||||
744 | } | ||||||||||||
745 | |||||||||||||
746 | unsigned getShuffleCost(TTI::ShuffleKind Kind, VectorType *Tp, | ||||||||||||
747 | ArrayRef<int> Mask, int Index, VectorType *SubTp) { | ||||||||||||
748 | |||||||||||||
749 | switch (Kind) { | ||||||||||||
750 | case TTI::SK_Broadcast: | ||||||||||||
751 | return getBroadcastShuffleOverhead(cast<FixedVectorType>(Tp)); | ||||||||||||
752 | case TTI::SK_Select: | ||||||||||||
753 | case TTI::SK_Reverse: | ||||||||||||
754 | case TTI::SK_Transpose: | ||||||||||||
755 | case TTI::SK_PermuteSingleSrc: | ||||||||||||
756 | case TTI::SK_PermuteTwoSrc: | ||||||||||||
757 | return getPermuteShuffleOverhead(cast<FixedVectorType>(Tp)); | ||||||||||||
758 | case TTI::SK_ExtractSubvector: | ||||||||||||
759 | return getExtractSubvectorOverhead(Tp, Index, | ||||||||||||
760 | cast<FixedVectorType>(SubTp)); | ||||||||||||
761 | case TTI::SK_InsertSubvector: | ||||||||||||
762 | return getInsertSubvectorOverhead(Tp, Index, | ||||||||||||
763 | cast<FixedVectorType>(SubTp)); | ||||||||||||
764 | } | ||||||||||||
765 | llvm_unreachable("Unknown TTI::ShuffleKind")::llvm::llvm_unreachable_internal("Unknown TTI::ShuffleKind", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 765); | ||||||||||||
766 | } | ||||||||||||
767 | |||||||||||||
768 | InstructionCost getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src, | ||||||||||||
769 | TTI::CastContextHint CCH, | ||||||||||||
770 | TTI::TargetCostKind CostKind, | ||||||||||||
771 | const Instruction *I = nullptr) { | ||||||||||||
772 | if (BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I) == 0) | ||||||||||||
773 | return 0; | ||||||||||||
774 | |||||||||||||
775 | const TargetLoweringBase *TLI = getTLI(); | ||||||||||||
776 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | ||||||||||||
777 | assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> ( 0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 777, __PRETTY_FUNCTION__)); | ||||||||||||
778 | std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src); | ||||||||||||
779 | std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst); | ||||||||||||
780 | |||||||||||||
781 | TypeSize SrcSize = SrcLT.second.getSizeInBits(); | ||||||||||||
782 | TypeSize DstSize = DstLT.second.getSizeInBits(); | ||||||||||||
783 | bool IntOrPtrSrc = Src->isIntegerTy() || Src->isPointerTy(); | ||||||||||||
784 | bool IntOrPtrDst = Dst->isIntegerTy() || Dst->isPointerTy(); | ||||||||||||
785 | |||||||||||||
786 | switch (Opcode) { | ||||||||||||
787 | default: | ||||||||||||
788 | break; | ||||||||||||
789 | case Instruction::Trunc: | ||||||||||||
790 | // Check for NOOP conversions. | ||||||||||||
791 | if (TLI->isTruncateFree(SrcLT.second, DstLT.second)) | ||||||||||||
792 | return 0; | ||||||||||||
793 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||||||||||
794 | case Instruction::BitCast: | ||||||||||||
795 | // Bitcast between types that are legalized to the same type are free and | ||||||||||||
796 | // assume int to/from ptr of the same size is also free. | ||||||||||||
797 | if (SrcLT.first == DstLT.first && IntOrPtrSrc == IntOrPtrDst && | ||||||||||||
798 | SrcSize == DstSize) | ||||||||||||
799 | return 0; | ||||||||||||
800 | break; | ||||||||||||
801 | case Instruction::FPExt: | ||||||||||||
802 | if (I && getTLI()->isExtFree(I)) | ||||||||||||
803 | return 0; | ||||||||||||
804 | break; | ||||||||||||
805 | case Instruction::ZExt: | ||||||||||||
806 | if (TLI->isZExtFree(SrcLT.second, DstLT.second)) | ||||||||||||
807 | return 0; | ||||||||||||
808 | LLVM_FALLTHROUGH[[gnu::fallthrough]]; | ||||||||||||
809 | case Instruction::SExt: | ||||||||||||
810 | if (I && getTLI()->isExtFree(I)) | ||||||||||||
811 | return 0; | ||||||||||||
812 | |||||||||||||
813 | // If this is a zext/sext of a load, return 0 if the corresponding | ||||||||||||
814 | // extending load exists on target and the result type is legal. | ||||||||||||
815 | if (CCH == TTI::CastContextHint::Normal) { | ||||||||||||
816 | EVT ExtVT = EVT::getEVT(Dst); | ||||||||||||
817 | EVT LoadVT = EVT::getEVT(Src); | ||||||||||||
818 | unsigned LType = | ||||||||||||
819 | ((Opcode == Instruction::ZExt) ? ISD::ZEXTLOAD : ISD::SEXTLOAD); | ||||||||||||
820 | if (DstLT.first == SrcLT.first && | ||||||||||||
821 | TLI->isLoadExtLegal(LType, ExtVT, LoadVT)) | ||||||||||||
822 | return 0; | ||||||||||||
823 | } | ||||||||||||
824 | break; | ||||||||||||
825 | case Instruction::AddrSpaceCast: | ||||||||||||
826 | if (TLI->isFreeAddrSpaceCast(Src->getPointerAddressSpace(), | ||||||||||||
827 | Dst->getPointerAddressSpace())) | ||||||||||||
828 | return 0; | ||||||||||||
829 | break; | ||||||||||||
830 | } | ||||||||||||
831 | |||||||||||||
832 | auto *SrcVTy = dyn_cast<VectorType>(Src); | ||||||||||||
833 | auto *DstVTy = dyn_cast<VectorType>(Dst); | ||||||||||||
834 | |||||||||||||
835 | // If the cast is marked as legal (or promote) then assume low cost. | ||||||||||||
836 | if (SrcLT.first == DstLT.first && | ||||||||||||
837 | TLI->isOperationLegalOrPromote(ISD, DstLT.second)) | ||||||||||||
838 | return SrcLT.first; | ||||||||||||
839 | |||||||||||||
840 | // Handle scalar conversions. | ||||||||||||
841 | if (!SrcVTy && !DstVTy) { | ||||||||||||
842 | // Just check the op cost. If the operation is legal then assume it costs | ||||||||||||
843 | // 1. | ||||||||||||
844 | if (!TLI->isOperationExpand(ISD, DstLT.second)) | ||||||||||||
845 | return 1; | ||||||||||||
846 | |||||||||||||
847 | // Assume that illegal scalar instruction are expensive. | ||||||||||||
848 | return 4; | ||||||||||||
849 | } | ||||||||||||
850 | |||||||||||||
851 | // Check vector-to-vector casts. | ||||||||||||
852 | if (DstVTy && SrcVTy) { | ||||||||||||
853 | // If the cast is between same-sized registers, then the check is simple. | ||||||||||||
854 | if (SrcLT.first == DstLT.first && SrcSize == DstSize) { | ||||||||||||
855 | |||||||||||||
856 | // Assume that Zext is done using AND. | ||||||||||||
857 | if (Opcode == Instruction::ZExt) | ||||||||||||
858 | return SrcLT.first; | ||||||||||||
859 | |||||||||||||
860 | // Assume that sext is done using SHL and SRA. | ||||||||||||
861 | if (Opcode == Instruction::SExt) | ||||||||||||
862 | return SrcLT.first * 2; | ||||||||||||
863 | |||||||||||||
864 | // Just check the op cost. If the operation is legal then assume it | ||||||||||||
865 | // costs | ||||||||||||
866 | // 1 and multiply by the type-legalization overhead. | ||||||||||||
867 | if (!TLI->isOperationExpand(ISD, DstLT.second)) | ||||||||||||
868 | return SrcLT.first * 1; | ||||||||||||
869 | } | ||||||||||||
870 | |||||||||||||
871 | // If we are legalizing by splitting, query the concrete TTI for the cost | ||||||||||||
872 | // of casting the original vector twice. We also need to factor in the | ||||||||||||
873 | // cost of the split itself. Count that as 1, to be consistent with | ||||||||||||
874 | // TLI->getTypeLegalizationCost(). | ||||||||||||
875 | bool SplitSrc = | ||||||||||||
876 | TLI->getTypeAction(Src->getContext(), TLI->getValueType(DL, Src)) == | ||||||||||||
877 | TargetLowering::TypeSplitVector; | ||||||||||||
878 | bool SplitDst = | ||||||||||||
879 | TLI->getTypeAction(Dst->getContext(), TLI->getValueType(DL, Dst)) == | ||||||||||||
880 | TargetLowering::TypeSplitVector; | ||||||||||||
881 | if ((SplitSrc || SplitDst) && SrcVTy->getElementCount().isVector() && | ||||||||||||
882 | DstVTy->getElementCount().isVector()) { | ||||||||||||
883 | Type *SplitDstTy = VectorType::getHalfElementsVectorType(DstVTy); | ||||||||||||
884 | Type *SplitSrcTy = VectorType::getHalfElementsVectorType(SrcVTy); | ||||||||||||
885 | T *TTI = static_cast<T *>(this); | ||||||||||||
886 | // If both types need to be split then the split is free. | ||||||||||||
887 | InstructionCost SplitCost = | ||||||||||||
888 | (!SplitSrc || !SplitDst) ? TTI->getVectorSplitCost() : 0; | ||||||||||||
889 | return SplitCost + | ||||||||||||
890 | (2 * TTI->getCastInstrCost(Opcode, SplitDstTy, SplitSrcTy, CCH, | ||||||||||||
891 | CostKind, I)); | ||||||||||||
892 | } | ||||||||||||
893 | |||||||||||||
894 | // In other cases where the source or destination are illegal, assume | ||||||||||||
895 | // the operation will get scalarized. | ||||||||||||
896 | unsigned Num = cast<FixedVectorType>(DstVTy)->getNumElements(); | ||||||||||||
897 | InstructionCost Cost = thisT()->getCastInstrCost( | ||||||||||||
898 | Opcode, Dst->getScalarType(), Src->getScalarType(), CCH, CostKind, I); | ||||||||||||
899 | |||||||||||||
900 | // Return the cost of multiple scalar invocation plus the cost of | ||||||||||||
901 | // inserting and extracting the values. | ||||||||||||
902 | return getScalarizationOverhead(DstVTy, true, true) + Num * Cost; | ||||||||||||
903 | } | ||||||||||||
904 | |||||||||||||
905 | // We already handled vector-to-vector and scalar-to-scalar conversions. | ||||||||||||
906 | // This | ||||||||||||
907 | // is where we handle bitcast between vectors and scalars. We need to assume | ||||||||||||
908 | // that the conversion is scalarized in one way or another. | ||||||||||||
909 | if (Opcode == Instruction::BitCast) { | ||||||||||||
910 | // Illegal bitcasts are done by storing and loading from a stack slot. | ||||||||||||
911 | return (SrcVTy ? getScalarizationOverhead(SrcVTy, false, true) : 0) + | ||||||||||||
912 | (DstVTy ? getScalarizationOverhead(DstVTy, true, false) : 0); | ||||||||||||
913 | } | ||||||||||||
914 | |||||||||||||
915 | llvm_unreachable("Unhandled cast")::llvm::llvm_unreachable_internal("Unhandled cast", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 915); | ||||||||||||
916 | } | ||||||||||||
917 | |||||||||||||
918 | InstructionCost getExtractWithExtendCost(unsigned Opcode, Type *Dst, | ||||||||||||
919 | VectorType *VecTy, unsigned Index) { | ||||||||||||
920 | return thisT()->getVectorInstrCost(Instruction::ExtractElement, VecTy, | ||||||||||||
921 | Index) + | ||||||||||||
922 | thisT()->getCastInstrCost(Opcode, Dst, VecTy->getElementType(), | ||||||||||||
923 | TTI::CastContextHint::None, TTI::TCK_RecipThroughput); | ||||||||||||
924 | } | ||||||||||||
925 | |||||||||||||
926 | unsigned getCFInstrCost(unsigned Opcode, TTI::TargetCostKind CostKind, | ||||||||||||
927 | const Instruction *I = nullptr) { | ||||||||||||
928 | return BaseT::getCFInstrCost(Opcode, CostKind, I); | ||||||||||||
929 | } | ||||||||||||
930 | |||||||||||||
931 | InstructionCost getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy, | ||||||||||||
932 | CmpInst::Predicate VecPred, | ||||||||||||
933 | TTI::TargetCostKind CostKind, | ||||||||||||
934 | const Instruction *I = nullptr) { | ||||||||||||
935 | const TargetLoweringBase *TLI = getTLI(); | ||||||||||||
936 | int ISD = TLI->InstructionOpcodeToISD(Opcode); | ||||||||||||
937 | assert(ISD && "Invalid opcode")((ISD && "Invalid opcode") ? static_cast<void> ( 0) : __assert_fail ("ISD && \"Invalid opcode\"", "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 937, __PRETTY_FUNCTION__)); | ||||||||||||
938 | |||||||||||||
939 | // TODO: Handle other cost kinds. | ||||||||||||
940 | if (CostKind != TTI::TCK_RecipThroughput) | ||||||||||||
941 | return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, | ||||||||||||
942 | I); | ||||||||||||
943 | |||||||||||||
944 | // Selects on vectors are actually vector selects. | ||||||||||||
945 | if (ISD == ISD::SELECT) { | ||||||||||||
946 | assert(CondTy && "CondTy must exist")((CondTy && "CondTy must exist") ? static_cast<void > (0) : __assert_fail ("CondTy && \"CondTy must exist\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 946, __PRETTY_FUNCTION__)); | ||||||||||||
947 | if (CondTy->isVectorTy()) | ||||||||||||
948 | ISD = ISD::VSELECT; | ||||||||||||
949 | } | ||||||||||||
950 | std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy); | ||||||||||||
951 | |||||||||||||
952 | if (!(ValTy->isVectorTy() && !LT.second.isVector()) && | ||||||||||||
953 | !TLI->isOperationExpand(ISD, LT.second)) { | ||||||||||||
954 | // The operation is legal. Assume it costs 1. Multiply | ||||||||||||
955 | // by the type-legalization overhead. | ||||||||||||
956 | return LT.first * 1; | ||||||||||||
957 | } | ||||||||||||
958 | |||||||||||||
959 | // Otherwise, assume that the cast is scalarized. | ||||||||||||
960 | // TODO: If one of the types get legalized by splitting, handle this | ||||||||||||
961 | // similarly to what getCastInstrCost() does. | ||||||||||||
962 | if (auto *ValVTy = dyn_cast<VectorType>(ValTy)) { | ||||||||||||
963 | unsigned Num = cast<FixedVectorType>(ValVTy)->getNumElements(); | ||||||||||||
964 | if (CondTy) | ||||||||||||
965 | CondTy = CondTy->getScalarType(); | ||||||||||||
966 | InstructionCost Cost = thisT()->getCmpSelInstrCost( | ||||||||||||
967 | Opcode, ValVTy->getScalarType(), CondTy, VecPred, CostKind, I); | ||||||||||||
968 | |||||||||||||
969 | // Return the cost of multiple scalar invocation plus the cost of | ||||||||||||
970 | // inserting and extracting the values. | ||||||||||||
971 | return getScalarizationOverhead(ValVTy, true, false) + Num * Cost; | ||||||||||||
972 | } | ||||||||||||
973 | |||||||||||||
974 | // Unknown scalar opcode. | ||||||||||||
975 | return 1; | ||||||||||||
976 | } | ||||||||||||
977 | |||||||||||||
978 | unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) { | ||||||||||||
979 | std::pair<unsigned, MVT> LT = | ||||||||||||
980 | getTLI()->getTypeLegalizationCost(DL, Val->getScalarType()); | ||||||||||||
981 | |||||||||||||
982 | return LT.first; | ||||||||||||
983 | } | ||||||||||||
984 | |||||||||||||
985 | InstructionCost getMemoryOpCost(unsigned Opcode, Type *Src, | ||||||||||||
986 | MaybeAlign Alignment, unsigned AddressSpace, | ||||||||||||
987 | TTI::TargetCostKind CostKind, | ||||||||||||
988 | const Instruction *I = nullptr) { | ||||||||||||
989 | assert(!Src->isVoidTy() && "Invalid type")((!Src->isVoidTy() && "Invalid type") ? static_cast <void> (0) : __assert_fail ("!Src->isVoidTy() && \"Invalid type\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 989, __PRETTY_FUNCTION__)); | ||||||||||||
990 | // Assume types, such as structs, are expensive. | ||||||||||||
991 | if (getTLI()->getValueType(DL, Src, true) == MVT::Other) | ||||||||||||
992 | return 4; | ||||||||||||
993 | std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src); | ||||||||||||
994 | |||||||||||||
995 | // Assuming that all loads of legal types cost 1. | ||||||||||||
996 | InstructionCost Cost = LT.first; | ||||||||||||
997 | if (CostKind != TTI::TCK_RecipThroughput) | ||||||||||||
998 | return Cost; | ||||||||||||
999 | |||||||||||||
1000 | if (Src->isVectorTy() && | ||||||||||||
1001 | // In practice it's not currently possible to have a change in lane | ||||||||||||
1002 | // length for extending loads or truncating stores so both types should | ||||||||||||
1003 | // have the same scalable property. | ||||||||||||
1004 | TypeSize::isKnownLT(Src->getPrimitiveSizeInBits(), | ||||||||||||
1005 | LT.second.getSizeInBits())) { | ||||||||||||
1006 | // This is a vector load that legalizes to a larger type than the vector | ||||||||||||
1007 | // itself. Unless the corresponding extending load or truncating store is | ||||||||||||
1008 | // legal, then this will scalarize. | ||||||||||||
1009 | TargetLowering::LegalizeAction LA = TargetLowering::Expand; | ||||||||||||
1010 | EVT MemVT = getTLI()->getValueType(DL, Src); | ||||||||||||
1011 | if (Opcode == Instruction::Store) | ||||||||||||
1012 | LA = getTLI()->getTruncStoreAction(LT.second, MemVT); | ||||||||||||
1013 | else | ||||||||||||
1014 | LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT); | ||||||||||||
1015 | |||||||||||||
1016 | if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) { | ||||||||||||
1017 | // This is a vector load/store for some illegal type that is scalarized. | ||||||||||||
1018 | // We must account for the cost of building or decomposing the vector. | ||||||||||||
1019 | Cost += getScalarizationOverhead(cast<VectorType>(Src), | ||||||||||||
1020 | Opcode != Instruction::Store, | ||||||||||||
1021 | Opcode == Instruction::Store); | ||||||||||||
1022 | } | ||||||||||||
1023 | } | ||||||||||||
1024 | |||||||||||||
1025 | return Cost; | ||||||||||||
1026 | } | ||||||||||||
1027 | |||||||||||||
1028 | InstructionCost getGatherScatterOpCost(unsigned Opcode, Type *DataTy, | ||||||||||||
1029 | const Value *Ptr, bool VariableMask, | ||||||||||||
1030 | Align Alignment, | ||||||||||||
1031 | TTI::TargetCostKind CostKind, | ||||||||||||
1032 | const Instruction *I = nullptr) { | ||||||||||||
1033 | auto *VT = cast<FixedVectorType>(DataTy); | ||||||||||||
1034 | // Assume the target does not have support for gather/scatter operations | ||||||||||||
1035 | // and provide a rough estimate. | ||||||||||||
1036 | // | ||||||||||||
1037 | // First, compute the cost of extracting the individual addresses and the | ||||||||||||
1038 | // individual memory operations. | ||||||||||||
1039 | InstructionCost LoadCost = | ||||||||||||
1040 | VT->getNumElements() * | ||||||||||||
1041 | (getVectorInstrCost( | ||||||||||||
1042 | Instruction::ExtractElement, | ||||||||||||
1043 | FixedVectorType::get(PointerType::get(VT->getElementType(), 0), | ||||||||||||
1044 | VT->getNumElements()), | ||||||||||||
1045 | -1) + | ||||||||||||
1046 | getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind)); | ||||||||||||
1047 | |||||||||||||
1048 | // Next, compute the cost of packing the result in a vector. | ||||||||||||
1049 | int PackingCost = getScalarizationOverhead(VT, Opcode != Instruction::Store, | ||||||||||||
1050 | Opcode == Instruction::Store); | ||||||||||||
1051 | |||||||||||||
1052 | int ConditionalCost = 0; | ||||||||||||
1053 | if (VariableMask) { | ||||||||||||
1054 | // Compute the cost of conditionally executing the memory operations with | ||||||||||||
1055 | // variable masks. This includes extracting the individual conditions, a | ||||||||||||
1056 | // branches and PHIs to combine the results. | ||||||||||||
1057 | // NOTE: Estimating the cost of conditionally executing the memory | ||||||||||||
1058 | // operations accurately is quite difficult and the current solution | ||||||||||||
1059 | // provides a very rough estimate only. | ||||||||||||
1060 | ConditionalCost = | ||||||||||||
1061 | VT->getNumElements() * | ||||||||||||
1062 | (getVectorInstrCost( | ||||||||||||
1063 | Instruction::ExtractElement, | ||||||||||||
1064 | FixedVectorType::get(Type::getInt1Ty(DataTy->getContext()), | ||||||||||||
1065 | VT->getNumElements()), | ||||||||||||
1066 | -1) + | ||||||||||||
1067 | getCFInstrCost(Instruction::Br, CostKind) + | ||||||||||||
1068 | getCFInstrCost(Instruction::PHI, CostKind)); | ||||||||||||
1069 | } | ||||||||||||
1070 | |||||||||||||
1071 | return LoadCost + PackingCost + ConditionalCost; | ||||||||||||
1072 | } | ||||||||||||
1073 | |||||||||||||
1074 | InstructionCost getInterleavedMemoryOpCost( | ||||||||||||
1075 | unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices, | ||||||||||||
1076 | Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind, | ||||||||||||
1077 | bool UseMaskForCond = false, bool UseMaskForGaps = false) { | ||||||||||||
1078 | auto *VT = cast<FixedVectorType>(VecTy); | ||||||||||||
1079 | |||||||||||||
1080 | unsigned NumElts = VT->getNumElements(); | ||||||||||||
1081 | assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor")((Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor" ) ? static_cast<void> (0) : __assert_fail ("Factor > 1 && NumElts % Factor == 0 && \"Invalid interleave factor\"" , "/build/llvm-toolchain-snapshot-13~++20210413100635+64c24f493e5f/llvm/include/llvm/CodeGen/BasicTTIImpl.h" , 1081, __PRETTY_FUNCTION__)); | ||||||||||||
1082 | |||||||||||||
1083 | unsigned NumSubElts = NumElts / Factor; | ||||||||||||
1084 | auto *SubVT = FixedVectorType::get(VT->getElementType(), NumSubElts); | ||||||||||||
1085 | |||||||||||||
1086 | // Firstly, the cost of load/store operation. | ||||||||||||
1087 | InstructionCost Cost; | ||||||||||||
1088 | if (UseMaskForCond
| ||||||||||||