Bug Summary

File:build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
Warning:line 4325, column 15
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name X86TargetTransformInfo.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/X86 -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/X86 -I include -I /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/= -ferror-limit 19 -fvisibility=hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-10-03-140002-15933-1 -x c++ /build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

/build/llvm-toolchain-snapshot-16~++20221003111214+1fa2019828ca/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements a TargetTransformInfo analysis pass specific to the
10/// X86 target machine. It uses the target's detailed information to provide
11/// more precise answers to certain TTI queries, while letting the target
12/// independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15/// About Cost Model numbers used below it's necessary to say the following:
16/// the numbers correspond to some "generic" X86 CPU instead of usage of a
17/// specific CPU model. Usually the numbers correspond to the CPU where the
18/// feature first appeared. For example, if we do Subtarget.hasSSE42() in
19/// the lookups below the cost is based on Nehalem as that was the first CPU
20/// to support that feature level and thus has most likely the worst case cost,
21/// although we may discard an outlying worst cost from one CPU (e.g. Atom).
22///
23/// Some examples of other technologies/CPUs:
24/// SSE 3 - Pentium4 / Athlon64
25/// SSE 4.1 - Penryn
26/// SSE 4.2 - Nehalem / Silvermont
27/// AVX - Sandy Bridge / Jaguar / Bulldozer
28/// AVX2 - Haswell / Ryzen
29/// AVX-512 - Xeon Phi / Skylake
30///
31/// And some examples of instruction target dependent costs (latency)
32/// divss sqrtss rsqrtss
33/// AMD K7 11-16 19 3
34/// Piledriver 9-24 13-15 5
35/// Jaguar 14 16 2
36/// Pentium II,III 18 30 2
37/// Nehalem 7-14 7-18 3
38/// Haswell 10-13 11 5
39///
40/// Interpreting the 4 TargetCostKind types:
41/// TCK_RecipThroughput and TCK_Latency should try to match the worst case
42/// values reported by the CPU scheduler models (and llvm-mca).
43/// TCK_CodeSize should match the instruction count (e.g. divss = 1), NOT the
44/// actual encoding size of the instruction.
45/// TCK_SizeAndLatency should match the worst case micro-op counts reported by
46/// by the CPU scheduler models (and llvm-mca), to ensure that they are
47/// compatible with the MicroOpBufferSize and LoopMicroOpBufferSize values which are
48/// often used as the cost thresholds where TCK_SizeAndLatency is requested.
49//===----------------------------------------------------------------------===//
50
51#include "X86TargetTransformInfo.h"
52#include "llvm/Analysis/TargetTransformInfo.h"
53#include "llvm/CodeGen/BasicTTIImpl.h"
54#include "llvm/CodeGen/CostTable.h"
55#include "llvm/CodeGen/TargetLowering.h"
56#include "llvm/IR/InstIterator.h"
57#include "llvm/IR/IntrinsicInst.h"
58#include "llvm/Support/Debug.h"
59
60using namespace llvm;
61
62#define DEBUG_TYPE"x86tti" "x86tti"
63
64//===----------------------------------------------------------------------===//
65//
66// X86 cost model.
67//
68//===----------------------------------------------------------------------===//
69
70// Helper struct to store/access costs for each cost kind.
71// TODO: Move this to allow other targets to use it?
72struct CostKindCosts {
73 unsigned RecipThroughputCost = ~0U;
74 unsigned LatencyCost = ~0U;
75 unsigned CodeSizeCost = ~0U;
76 unsigned SizeAndLatencyCost = ~0U;
77
78 llvm::Optional<unsigned>
79 operator[](TargetTransformInfo::TargetCostKind Kind) const {
80 unsigned Cost = ~0U;
81 switch (Kind) {
82 case TargetTransformInfo::TCK_RecipThroughput:
83 Cost = RecipThroughputCost;
84 break;
85 case TargetTransformInfo::TCK_Latency:
86 Cost = LatencyCost;
87 break;
88 case TargetTransformInfo::TCK_CodeSize:
89 Cost = CodeSizeCost;
90 break;
91 case TargetTransformInfo::TCK_SizeAndLatency:
92 Cost = SizeAndLatencyCost;
93 break;
94 }
95 if (Cost == ~0U)
96 return None;
97 return Cost;
98 }
99};
100using CostKindTblEntry = CostTblEntryT<CostKindCosts>;
101
102TargetTransformInfo::PopcntSupportKind
103X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
104 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2")(static_cast <bool> (isPowerOf2_32(TyWidth) && "Ty width must be power of 2"
) ? void (0) : __assert_fail ("isPowerOf2_32(TyWidth) && \"Ty width must be power of 2\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 104, __extension__
__PRETTY_FUNCTION__))
;
105 // TODO: Currently the __builtin_popcount() implementation using SSE3
106 // instructions is inefficient. Once the problem is fixed, we should
107 // call ST->hasSSE3() instead of ST->hasPOPCNT().
108 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
109}
110
111llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
112 TargetTransformInfo::CacheLevel Level) const {
113 switch (Level) {
114 case TargetTransformInfo::CacheLevel::L1D:
115 // - Penryn
116 // - Nehalem
117 // - Westmere
118 // - Sandy Bridge
119 // - Ivy Bridge
120 // - Haswell
121 // - Broadwell
122 // - Skylake
123 // - Kabylake
124 return 32 * 1024; // 32 KByte
125 case TargetTransformInfo::CacheLevel::L2D:
126 // - Penryn
127 // - Nehalem
128 // - Westmere
129 // - Sandy Bridge
130 // - Ivy Bridge
131 // - Haswell
132 // - Broadwell
133 // - Skylake
134 // - Kabylake
135 return 256 * 1024; // 256 KByte
136 }
137
138 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 138)
;
139}
140
141llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
142 TargetTransformInfo::CacheLevel Level) const {
143 // - Penryn
144 // - Nehalem
145 // - Westmere
146 // - Sandy Bridge
147 // - Ivy Bridge
148 // - Haswell
149 // - Broadwell
150 // - Skylake
151 // - Kabylake
152 switch (Level) {
153 case TargetTransformInfo::CacheLevel::L1D:
154 [[fallthrough]];
155 case TargetTransformInfo::CacheLevel::L2D:
156 return 8;
157 }
158
159 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 159)
;
160}
161
162unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
163 bool Vector = (ClassID == 1);
164 if (Vector && !ST->hasSSE1())
165 return 0;
166
167 if (ST->is64Bit()) {
168 if (Vector && ST->hasAVX512())
169 return 32;
170 return 16;
171 }
172 return 8;
173}
174
175TypeSize
176X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
177 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
178 switch (K) {
179 case TargetTransformInfo::RGK_Scalar:
180 return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
181 case TargetTransformInfo::RGK_FixedWidthVector:
182 if (ST->hasAVX512() && PreferVectorWidth >= 512)
183 return TypeSize::getFixed(512);
184 if (ST->hasAVX() && PreferVectorWidth >= 256)
185 return TypeSize::getFixed(256);
186 if (ST->hasSSE1() && PreferVectorWidth >= 128)
187 return TypeSize::getFixed(128);
188 return TypeSize::getFixed(0);
189 case TargetTransformInfo::RGK_ScalableVector:
190 return TypeSize::getScalable(0);
191 }
192
193 llvm_unreachable("Unsupported register kind")::llvm::llvm_unreachable_internal("Unsupported register kind"
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 193)
;
194}
195
196unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
197 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
198 .getFixedSize();
199}
200
201unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
202 // If the loop will not be vectorized, don't interleave the loop.
203 // Let regular unroll to unroll the loop, which saves the overflow
204 // check and memory check cost.
205 if (VF == 1)
206 return 1;
207
208 if (ST->isAtom())
209 return 1;
210
211 // Sandybridge and Haswell have multiple execution ports and pipelined
212 // vector units.
213 if (ST->hasAVX())
214 return 4;
215
216 return 2;
217}
218
219InstructionCost X86TTIImpl::getArithmeticInstrCost(
220 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
221 TTI::OperandValueInfo Op1Info, TTI::OperandValueInfo Op2Info,
222 ArrayRef<const Value *> Args,
223 const Instruction *CxtI) {
224
225 // vXi8 multiplications are always promoted to vXi16.
226 if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
227 Ty->getScalarSizeInBits() == 8) {
228 Type *WideVecTy =
229 VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
230 return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
231 TargetTransformInfo::CastContextHint::None,
232 CostKind) +
233 getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
234 TargetTransformInfo::CastContextHint::None,
235 CostKind) +
236 getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info);
237 }
238
239 // Legalize the type.
240 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
241
242 int ISD = TLI->InstructionOpcodeToISD(Opcode);
243 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 243, __extension__
__PRETTY_FUNCTION__))
;
244
245 if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() &&
246 LT.second.getScalarType() == MVT::i32) {
247 // Check if the operands can be represented as a smaller datatype.
248 bool Op1Signed = false, Op2Signed = false;
249 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
250 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
251 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
252 bool SignedMode = Op1Signed || Op2Signed;
253
254 // If both are representable as i15 and at least one is constant,
255 // zero-extended, or sign-extended from vXi16 (or less pre-SSE41) then we
256 // can treat this as PMADDWD which has the same costs as a vXi16 multiply.
257 if (OpMinSize <= 15 && !ST->isPMADDWDSlow()) {
258 bool Op1Constant =
259 isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]);
260 bool Op2Constant =
261 isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]);
262 bool Op1Sext = isa<SExtInst>(Args[0]) &&
263 (Op1MinSize == 15 || (Op1MinSize < 15 && !ST->hasSSE41()));
264 bool Op2Sext = isa<SExtInst>(Args[1]) &&
265 (Op2MinSize == 15 || (Op2MinSize < 15 && !ST->hasSSE41()));
266
267 bool IsZeroExtended = !Op1Signed || !Op2Signed;
268 bool IsConstant = Op1Constant || Op2Constant;
269 bool IsSext = Op1Sext || Op2Sext;
270 if (IsConstant || IsZeroExtended || IsSext)
271 LT.second =
272 MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements());
273 }
274
275 // Check if the vXi32 operands can be shrunk into a smaller datatype.
276 // This should match the codegen from reduceVMULWidth.
277 // TODO: Make this generic (!ST->SSE41 || ST->isPMULLDSlow()).
278 if (ST->useSLMArithCosts() && LT.second == MVT::v4i32) {
279 if (OpMinSize <= 7)
280 return LT.first * 3; // pmullw/sext
281 if (!SignedMode && OpMinSize <= 8)
282 return LT.first * 3; // pmullw/zext
283 if (OpMinSize <= 15)
284 return LT.first * 5; // pmullw/pmulhw/pshuf
285 if (!SignedMode && OpMinSize <= 16)
286 return LT.first * 5; // pmullw/pmulhw/pshuf
287 }
288 }
289
290 // Vector multiply by pow2 will be simplified to shifts.
291 // Vector multiply by -pow2 will be simplified to shifts/negates.
292 if (ISD == ISD::MUL && Op2Info.isConstant() &&
293 (Op2Info.isPowerOf2() || Op2Info.isNegatedPowerOf2())) {
294 InstructionCost Cost =
295 getArithmeticInstrCost(Instruction::Shl, Ty, CostKind,
296 Op1Info.getNoProps(), Op2Info.getNoProps());
297 if (Op2Info.isNegatedPowerOf2())
298 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind);
299 return Cost;
300 }
301
302 // On X86, vector signed division by constants power-of-two are
303 // normally expanded to the sequence SRA + SRL + ADD + SRA.
304 // The OperandValue properties may not be the same as that of the previous
305 // operation; conservatively assume OP_None.
306 if ((ISD == ISD::SDIV || ISD == ISD::SREM) &&
307 Op2Info.isConstant() && Op2Info.isPowerOf2()) {
308 InstructionCost Cost =
309 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind,
310 Op1Info.getNoProps(), Op2Info.getNoProps());
311 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
312 Op1Info.getNoProps(), Op2Info.getNoProps());
313 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind,
314 Op1Info.getNoProps(), Op2Info.getNoProps());
315
316 if (ISD == ISD::SREM) {
317 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
318 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info.getNoProps(),
319 Op2Info.getNoProps());
320 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info.getNoProps(),
321 Op2Info.getNoProps());
322 }
323
324 return Cost;
325 }
326
327 // Vector unsigned division/remainder will be simplified to shifts/masks.
328 if ((ISD == ISD::UDIV || ISD == ISD::UREM) &&
329 Op2Info.isConstant() && Op2Info.isPowerOf2()) {
330 if (ISD == ISD::UDIV)
331 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind,
332 Op1Info.getNoProps(), Op2Info.getNoProps());
333 // UREM
334 return getArithmeticInstrCost(Instruction::And, Ty, CostKind,
335 Op1Info.getNoProps(), Op2Info.getNoProps());
336 }
337
338 static const CostKindTblEntry AVX512BWUniformConstCostTable[] = {
339 { ISD::SHL, MVT::v16i8, { 1, 7, 2, 3 } }, // psllw + pand.
340 { ISD::SRL, MVT::v16i8, { 1, 7, 2, 3 } }, // psrlw + pand.
341 { ISD::SRA, MVT::v16i8, { 1, 8, 4, 5 } }, // psrlw, pand, pxor, psubb.
342 { ISD::SHL, MVT::v32i8, { 1, 8, 2, 3 } }, // psllw + pand.
343 { ISD::SRL, MVT::v32i8, { 1, 8, 2, 3 } }, // psrlw + pand.
344 { ISD::SRA, MVT::v32i8, { 1, 9, 4, 5 } }, // psrlw, pand, pxor, psubb.
345 { ISD::SHL, MVT::v64i8, { 1, 8, 2, 3 } }, // psllw + pand.
346 { ISD::SRL, MVT::v64i8, { 1, 8, 2, 3 } }, // psrlw + pand.
347 { ISD::SRA, MVT::v64i8, { 1, 9, 4, 6 } }, // psrlw, pand, pxor, psubb.
348
349 { ISD::SHL, MVT::v16i16, { 1, 1, 1, 1 } }, // psllw
350 { ISD::SRL, MVT::v16i16, { 1, 1, 1, 1 } }, // psrlw
351 { ISD::SRA, MVT::v16i16, { 1, 1, 1, 1 } }, // psrlw
352 { ISD::SHL, MVT::v32i16, { 1, 1, 1, 1 } }, // psllw
353 { ISD::SRL, MVT::v32i16, { 1, 1, 1, 1 } }, // psrlw
354 { ISD::SRA, MVT::v32i16, { 1, 1, 1, 1 } }, // psrlw
355 };
356
357 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasBWI())
358 if (const auto *Entry =
359 CostTableLookup(AVX512BWUniformConstCostTable, ISD, LT.second))
360 if (auto KindCost = Entry->Cost[CostKind])
361 return LT.first * KindCost.value();
362
363 static const CostKindTblEntry AVX512UniformConstCostTable[] = {
364 { ISD::SHL, MVT::v64i8, { 2, 12, 5, 6 } }, // psllw + pand.
365 { ISD::SRL, MVT::v64i8, { 2, 12, 5, 6 } }, // psrlw + pand.
366 { ISD::SRA, MVT::v64i8, { 3, 10, 12, 12 } }, // psrlw, pand, pxor, psubb.
367
368 { ISD::SHL, MVT::v16i16, { 2, 7, 4, 4 } }, // psllw + split.
369 { ISD::SRL, MVT::v16i16, { 2, 7, 4, 4 } }, // psrlw + split.
370 { ISD::SRA, MVT::v16i16, { 2, 7, 4, 4 } }, // psraw + split.
371
372 { ISD::SHL, MVT::v8i32, { 1, 1, 1, 1 } }, // pslld
373 { ISD::SRL, MVT::v8i32, { 1, 1, 1, 1 } }, // psrld
374 { ISD::SRA, MVT::v8i32, { 1, 1, 1, 1 } }, // psrad
375 { ISD::SHL, MVT::v16i32, { 1, 1, 1, 1 } }, // pslld
376 { ISD::SRL, MVT::v16i32, { 1, 1, 1, 1 } }, // psrld
377 { ISD::SRA, MVT::v16i32, { 1, 1, 1, 1 } }, // psrad
378
379 { ISD::SRA, MVT::v2i64, { 1, 1, 1, 1 } }, // psraq
380 { ISD::SHL, MVT::v4i64, { 1, 1, 1, 1 } }, // psllq
381 { ISD::SRL, MVT::v4i64, { 1, 1, 1, 1 } }, // psrlq
382 { ISD::SRA, MVT::v4i64, { 1, 1, 1, 1 } }, // psraq
383 { ISD::SHL, MVT::v8i64, { 1, 1, 1, 1 } }, // psllq
384 { ISD::SRL, MVT::v8i64, { 1, 1, 1, 1 } }, // psrlq
385 { ISD::SRA, MVT::v8i64, { 1, 1, 1, 1 } }, // psraq
386
387 { ISD::SDIV, MVT::v16i32, { 6 } }, // pmuludq sequence
388 { ISD::SREM, MVT::v16i32, { 8 } }, // pmuludq+mul+sub sequence
389 { ISD::UDIV, MVT::v16i32, { 5 } }, // pmuludq sequence
390 { ISD::UREM, MVT::v16i32, { 7 } }, // pmuludq+mul+sub sequence
391 };
392
393 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasAVX512())
394 if (const auto *Entry =
395 CostTableLookup(AVX512UniformConstCostTable, ISD, LT.second))
396 if (auto KindCost = Entry->Cost[CostKind])
397 return LT.first * KindCost.value();
398
399 static const CostKindTblEntry AVX2UniformConstCostTable[] = {
400 { ISD::SHL, MVT::v16i8, { 1, 8, 2, 3 } }, // psllw + pand.
401 { ISD::SRL, MVT::v16i8, { 1, 8, 2, 3 } }, // psrlw + pand.
402 { ISD::SRA, MVT::v16i8, { 2, 10, 5, 6 } }, // psrlw, pand, pxor, psubb.
403 { ISD::SHL, MVT::v32i8, { 2, 8, 2, 4 } }, // psllw + pand.
404 { ISD::SRL, MVT::v32i8, { 2, 8, 2, 4 } }, // psrlw + pand.
405 { ISD::SRA, MVT::v32i8, { 3, 10, 5, 9 } }, // psrlw, pand, pxor, psubb.
406
407 { ISD::SHL, MVT::v8i16, { 1, 1, 1, 1 } }, // psllw
408 { ISD::SRL, MVT::v8i16, { 1, 1, 1, 1 } }, // psrlw
409 { ISD::SRA, MVT::v8i16, { 1, 1, 1, 1 } }, // psraw
410 { ISD::SHL, MVT::v16i16,{ 2, 2, 1, 2 } }, // psllw
411 { ISD::SRL, MVT::v16i16,{ 2, 2, 1, 2 } }, // psrlw
412 { ISD::SRA, MVT::v16i16,{ 2, 2, 1, 2 } }, // psraw
413
414 { ISD::SHL, MVT::v4i32, { 1, 1, 1, 1 } }, // pslld
415 { ISD::SRL, MVT::v4i32, { 1, 1, 1, 1 } }, // psrld
416 { ISD::SRA, MVT::v4i32, { 1, 1, 1, 1 } }, // psrad
417 { ISD::SHL, MVT::v8i32, { 2, 2, 1, 2 } }, // pslld
418 { ISD::SRL, MVT::v8i32, { 2, 2, 1, 2 } }, // psrld
419 { ISD::SRA, MVT::v8i32, { 2, 2, 1, 2 } }, // psrad
420
421 { ISD::SHL, MVT::v2i64, { 1, 1, 1, 1 } }, // psllq
422 { ISD::SRL, MVT::v2i64, { 1, 1, 1, 1 } }, // psrlq
423 { ISD::SRA, MVT::v2i64, { 2, 3, 3, 3 } }, // psrad + shuffle.
424 { ISD::SHL, MVT::v4i64, { 2, 2, 1, 2 } }, // psllq
425 { ISD::SRL, MVT::v4i64, { 2, 2, 1, 2 } }, // psrlq
426 { ISD::SRA, MVT::v4i64, { 4, 4, 3, 6 } }, // psrad + shuffle + split.
427
428 { ISD::SDIV, MVT::v8i32, { 6 } }, // pmuludq sequence
429 { ISD::SREM, MVT::v8i32, { 8 } }, // pmuludq+mul+sub sequence
430 { ISD::UDIV, MVT::v8i32, { 5 } }, // pmuludq sequence
431 { ISD::UREM, MVT::v8i32, { 7 } }, // pmuludq+mul+sub sequence
432 };
433
434 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasAVX2())
435 if (const auto *Entry =
436 CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second))
437 if (auto KindCost = Entry->Cost[CostKind])
438 return LT.first * KindCost.value();
439
440 static const CostKindTblEntry AVXUniformConstCostTable[] = {
441 { ISD::SHL, MVT::v16i8, { 2, 7, 2, 3 } }, // psllw + pand.
442 { ISD::SRL, MVT::v16i8, { 2, 7, 2, 3 } }, // psrlw + pand.
443 { ISD::SRA, MVT::v16i8, { 3, 9, 5, 6 } }, // psrlw, pand, pxor, psubb.
444 { ISD::SHL, MVT::v32i8, { 4, 7, 7, 8 } }, // 2*(psllw + pand) + split.
445 { ISD::SRL, MVT::v32i8, { 4, 7, 7, 8 } }, // 2*(psrlw + pand) + split.
446 { ISD::SRA, MVT::v32i8, { 7, 7, 12, 13 } }, // 2*(psrlw, pand, pxor, psubb) + split.
447
448 { ISD::SHL, MVT::v8i16, { 1, 2, 1, 1 } }, // psllw.
449 { ISD::SRL, MVT::v8i16, { 1, 2, 1, 1 } }, // psrlw.
450 { ISD::SRA, MVT::v8i16, { 1, 2, 1, 1 } }, // psraw.
451 { ISD::SHL, MVT::v16i16,{ 3, 6, 4, 5 } }, // psllw + split.
452 { ISD::SRL, MVT::v16i16,{ 3, 6, 4, 5 } }, // psrlw + split.
453 { ISD::SRA, MVT::v16i16,{ 3, 6, 4, 5 } }, // psraw + split.
454
455 { ISD::SHL, MVT::v4i32, { 1, 2, 1, 1 } }, // pslld.
456 { ISD::SRL, MVT::v4i32, { 1, 2, 1, 1 } }, // psrld.
457 { ISD::SRA, MVT::v4i32, { 1, 2, 1, 1 } }, // psrad.
458 { ISD::SHL, MVT::v8i32, { 3, 6, 4, 5 } }, // pslld + split.
459 { ISD::SRL, MVT::v8i32, { 3, 6, 4, 5 } }, // psrld + split.
460 { ISD::SRA, MVT::v8i32, { 3, 6, 4, 5 } }, // psrad + split.
461
462 { ISD::SHL, MVT::v2i64, { 1, 2, 1, 1 } }, // psllq.
463 { ISD::SRL, MVT::v2i64, { 1, 2, 1, 1 } }, // psrlq.
464 { ISD::SRA, MVT::v2i64, { 2, 3, 3, 3 } }, // psrad + shuffle.
465 { ISD::SHL, MVT::v4i64, { 3, 6, 4, 5 } }, // 2 x psllq + split.
466 { ISD::SRL, MVT::v4i64, { 3, 6, 4, 5 } }, // 2 x psllq + split.
467 { ISD::SRA, MVT::v4i64, { 5, 7, 8, 9 } }, // 2 x psrad + shuffle + split.
468
469 { ISD::SDIV, MVT::v8i32, { 14 } }, // 2*pmuludq sequence + split.
470 { ISD::SREM, MVT::v8i32, { 18 } }, // 2*pmuludq+mul+sub sequence + split.
471 { ISD::UDIV, MVT::v8i32, { 12 } }, // 2*pmuludq sequence + split.
472 { ISD::UREM, MVT::v8i32, { 16 } }, // 2*pmuludq+mul+sub sequence + split.
473 };
474
475 // XOP has faster vXi8 shifts.
476 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasAVX() &&
477 (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
478 if (const auto *Entry =
479 CostTableLookup(AVXUniformConstCostTable, ISD, LT.second))
480 if (auto KindCost = Entry->Cost[CostKind])
481 return LT.first * KindCost.value();
482
483 static const CostKindTblEntry SSE2UniformConstCostTable[] = {
484 { ISD::SHL, MVT::v16i8, { 1, 7, 2, 3 } }, // psllw + pand.
485 { ISD::SRL, MVT::v16i8, { 1, 7, 2, 3 } }, // psrlw + pand.
486 { ISD::SRA, MVT::v16i8, { 3, 9, 5, 6 } }, // psrlw, pand, pxor, psubb.
487
488 { ISD::SHL, MVT::v8i16, { 1, 1, 1, 1 } }, // psllw.
489 { ISD::SRL, MVT::v8i16, { 1, 1, 1, 1 } }, // psrlw.
490 { ISD::SRA, MVT::v8i16, { 1, 1, 1, 1 } }, // psraw.
491
492 { ISD::SHL, MVT::v4i32, { 1, 1, 1, 1 } }, // pslld
493 { ISD::SRL, MVT::v4i32, { 1, 1, 1, 1 } }, // psrld.
494 { ISD::SRA, MVT::v4i32, { 1, 1, 1, 1 } }, // psrad.
495
496 { ISD::SHL, MVT::v2i64, { 1, 1, 1, 1 } }, // psllq.
497 { ISD::SRL, MVT::v2i64, { 1, 1, 1, 1 } }, // psrlq.
498 { ISD::SRA, MVT::v2i64, { 3, 5, 6, 6 } }, // 2 x psrad + shuffle.
499
500 { ISD::SDIV, MVT::v4i32, { 6 } }, // pmuludq sequence
501 { ISD::SREM, MVT::v4i32, { 8 } }, // pmuludq+mul+sub sequence
502 { ISD::UDIV, MVT::v4i32, { 5 } }, // pmuludq sequence
503 { ISD::UREM, MVT::v4i32, { 7 } }, // pmuludq+mul+sub sequence
504 };
505
506 // XOP has faster vXi8 shifts.
507 if (Op2Info.isUniform() && Op2Info.isConstant() && ST->hasSSE2() &&
508 (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
509 if (const auto *Entry =
510 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
511 if (auto KindCost = Entry->Cost[CostKind])
512 return LT.first * KindCost.value();
513
514 static const CostKindTblEntry AVX512BWConstCostTable[] = {
515 { ISD::SDIV, MVT::v64i8, { 14 } }, // 2*ext+2*pmulhw sequence
516 { ISD::SREM, MVT::v64i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
517 { ISD::UDIV, MVT::v64i8, { 14 } }, // 2*ext+2*pmulhw sequence
518 { ISD::UREM, MVT::v64i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
519
520 { ISD::SDIV, MVT::v32i16, { 6 } }, // vpmulhw sequence
521 { ISD::SREM, MVT::v32i16, { 8 } }, // vpmulhw+mul+sub sequence
522 { ISD::UDIV, MVT::v32i16, { 6 } }, // vpmulhuw sequence
523 { ISD::UREM, MVT::v32i16, { 8 } }, // vpmulhuw+mul+sub sequence
524 };
525
526 if (Op2Info.isConstant() && ST->hasBWI())
527 if (const auto *Entry =
528 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
529 if (auto KindCost = Entry->Cost[CostKind])
530 return LT.first * KindCost.value();
531
532 static const CostKindTblEntry AVX512ConstCostTable[] = {
533 { ISD::SDIV, MVT::v64i8, { 28 } }, // 4*ext+4*pmulhw sequence
534 { ISD::SREM, MVT::v64i8, { 32 } }, // 4*ext+4*pmulhw+mul+sub sequence
535 { ISD::UDIV, MVT::v64i8, { 28 } }, // 4*ext+4*pmulhw sequence
536 { ISD::UREM, MVT::v64i8, { 32 } }, // 4*ext+4*pmulhw+mul+sub sequence
537
538 { ISD::SDIV, MVT::v32i16, { 12 } }, // 2*vpmulhw sequence
539 { ISD::SREM, MVT::v32i16, { 16 } }, // 2*vpmulhw+mul+sub sequence
540 { ISD::UDIV, MVT::v32i16, { 12 } }, // 2*vpmulhuw sequence
541 { ISD::UREM, MVT::v32i16, { 16 } }, // 2*vpmulhuw+mul+sub sequence
542
543 { ISD::SDIV, MVT::v16i32, { 15 } }, // vpmuldq sequence
544 { ISD::SREM, MVT::v16i32, { 17 } }, // vpmuldq+mul+sub sequence
545 { ISD::UDIV, MVT::v16i32, { 15 } }, // vpmuludq sequence
546 { ISD::UREM, MVT::v16i32, { 17 } }, // vpmuludq+mul+sub sequence
547 };
548
549 if (Op2Info.isConstant() && ST->hasAVX512())
550 if (const auto *Entry =
551 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
552 if (auto KindCost = Entry->Cost[CostKind])
553 return LT.first * KindCost.value();
554
555 static const CostKindTblEntry AVX2ConstCostTable[] = {
556 { ISD::SDIV, MVT::v32i8, { 14 } }, // 2*ext+2*pmulhw sequence
557 { ISD::SREM, MVT::v32i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
558 { ISD::UDIV, MVT::v32i8, { 14 } }, // 2*ext+2*pmulhw sequence
559 { ISD::UREM, MVT::v32i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
560
561 { ISD::SDIV, MVT::v16i16, { 6 } }, // vpmulhw sequence
562 { ISD::SREM, MVT::v16i16, { 8 } }, // vpmulhw+mul+sub sequence
563 { ISD::UDIV, MVT::v16i16, { 6 } }, // vpmulhuw sequence
564 { ISD::UREM, MVT::v16i16, { 8 } }, // vpmulhuw+mul+sub sequence
565
566 { ISD::SDIV, MVT::v8i32, { 15 } }, // vpmuldq sequence
567 { ISD::SREM, MVT::v8i32, { 19 } }, // vpmuldq+mul+sub sequence
568 { ISD::UDIV, MVT::v8i32, { 15 } }, // vpmuludq sequence
569 { ISD::UREM, MVT::v8i32, { 19 } }, // vpmuludq+mul+sub sequence
570 };
571
572 if (Op2Info.isConstant() && ST->hasAVX2())
573 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
574 if (auto KindCost = Entry->Cost[CostKind])
575 return LT.first * KindCost.value();
576
577 static const CostKindTblEntry AVXConstCostTable[] = {
578 { ISD::SDIV, MVT::v32i8, { 30 } }, // 4*ext+4*pmulhw sequence + split.
579 { ISD::SREM, MVT::v32i8, { 34 } }, // 4*ext+4*pmulhw+mul+sub sequence + split.
580 { ISD::UDIV, MVT::v32i8, { 30 } }, // 4*ext+4*pmulhw sequence + split.
581 { ISD::UREM, MVT::v32i8, { 34 } }, // 4*ext+4*pmulhw+mul+sub sequence + split.
582
583 { ISD::SDIV, MVT::v16i16, { 14 } }, // 2*pmulhw sequence + split.
584 { ISD::SREM, MVT::v16i16, { 18 } }, // 2*pmulhw+mul+sub sequence + split.
585 { ISD::UDIV, MVT::v16i16, { 14 } }, // 2*pmulhuw sequence + split.
586 { ISD::UREM, MVT::v16i16, { 18 } }, // 2*pmulhuw+mul+sub sequence + split.
587
588 { ISD::SDIV, MVT::v8i32, { 32 } }, // vpmuludq sequence
589 { ISD::SREM, MVT::v8i32, { 38 } }, // vpmuludq+mul+sub sequence
590 { ISD::UDIV, MVT::v8i32, { 32 } }, // 2*pmuludq sequence + split.
591 { ISD::UREM, MVT::v8i32, { 42 } }, // 2*pmuludq+mul+sub sequence + split.
592 };
593
594 if (Op2Info.isConstant() && ST->hasAVX())
595 if (const auto *Entry = CostTableLookup(AVXConstCostTable, ISD, LT.second))
596 if (auto KindCost = Entry->Cost[CostKind])
597 return LT.first * KindCost.value();
598
599 static const CostKindTblEntry SSE41ConstCostTable[] = {
600 { ISD::SDIV, MVT::v4i32, { 15 } }, // vpmuludq sequence
601 { ISD::SREM, MVT::v4i32, { 20 } }, // vpmuludq+mul+sub sequence
602 };
603
604 if (Op2Info.isConstant() && ST->hasSSE41())
605 if (const auto *Entry =
606 CostTableLookup(SSE41ConstCostTable, ISD, LT.second))
607 if (auto KindCost = Entry->Cost[CostKind])
608 return LT.first * KindCost.value();
609
610 static const CostKindTblEntry SSE2ConstCostTable[] = {
611 { ISD::SDIV, MVT::v16i8, { 14 } }, // 2*ext+2*pmulhw sequence
612 { ISD::SREM, MVT::v16i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
613 { ISD::UDIV, MVT::v16i8, { 14 } }, // 2*ext+2*pmulhw sequence
614 { ISD::UREM, MVT::v16i8, { 16 } }, // 2*ext+2*pmulhw+mul+sub sequence
615
616 { ISD::SDIV, MVT::v8i16, { 6 } }, // pmulhw sequence
617 { ISD::SREM, MVT::v8i16, { 8 } }, // pmulhw+mul+sub sequence
618 { ISD::UDIV, MVT::v8i16, { 6 } }, // pmulhuw sequence
619 { ISD::UREM, MVT::v8i16, { 8 } }, // pmulhuw+mul+sub sequence
620
621 { ISD::SDIV, MVT::v4i32, { 19 } }, // pmuludq sequence
622 { ISD::SREM, MVT::v4i32, { 24 } }, // pmuludq+mul+sub sequence
623 { ISD::UDIV, MVT::v4i32, { 15 } }, // pmuludq sequence
624 { ISD::UREM, MVT::v4i32, { 20 } }, // pmuludq+mul+sub sequence
625 };
626
627 if (Op2Info.isConstant() && ST->hasSSE2())
628 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
629 if (auto KindCost = Entry->Cost[CostKind])
630 return LT.first * KindCost.value();
631
632 static const CostKindTblEntry AVX512BWUniformCostTable[] = {
633 { ISD::SHL, MVT::v16i8, { 3, 5, 5, 7 } }, // psllw + pand.
634 { ISD::SRL, MVT::v16i8, { 3,10, 5, 8 } }, // psrlw + pand.
635 { ISD::SRA, MVT::v16i8, { 4,12, 8,12 } }, // psrlw, pand, pxor, psubb.
636 { ISD::SHL, MVT::v32i8, { 4, 7, 6, 8 } }, // psllw + pand.
637 { ISD::SRL, MVT::v32i8, { 4, 8, 7, 9 } }, // psrlw + pand.
638 { ISD::SRA, MVT::v32i8, { 5,10,10,13 } }, // psrlw, pand, pxor, psubb.
639 { ISD::SHL, MVT::v64i8, { 4, 7, 6, 8 } }, // psllw + pand.
640 { ISD::SRL, MVT::v64i8, { 4, 8, 7,10 } }, // psrlw + pand.
641 { ISD::SRA, MVT::v64i8, { 5,10,10,15 } }, // psrlw, pand, pxor, psubb.
642
643 { ISD::SHL, MVT::v32i16, { 2, 4, 2, 3 } }, // psllw
644 { ISD::SRL, MVT::v32i16, { 2, 4, 2, 3 } }, // psrlw
645 { ISD::SRA, MVT::v32i16, { 2, 4, 2, 3 } }, // psrqw
646 };
647
648 if (ST->hasBWI() && Op2Info.isUniform())
649 if (const auto *Entry =
650 CostTableLookup(AVX512BWUniformCostTable, ISD, LT.second))
651 if (auto KindCost = Entry->Cost[CostKind])
652 return LT.first * KindCost.value();
653
654 static const CostKindTblEntry AVX512UniformCostTable[] = {
655 { ISD::SHL, MVT::v32i16, { 5,10, 5, 7 } }, // psllw + split.
656 { ISD::SRL, MVT::v32i16, { 5,10, 5, 7 } }, // psrlw + split.
657 { ISD::SRA, MVT::v32i16, { 5,10, 5, 7 } }, // psraw + split.
658
659 { ISD::SHL, MVT::v16i32, { 2, 4, 2, 3 } }, // pslld
660 { ISD::SRL, MVT::v16i32, { 2, 4, 2, 3 } }, // psrld
661 { ISD::SRA, MVT::v16i32, { 2, 4, 2, 3 } }, // psrad
662
663 { ISD::SRA, MVT::v2i64, { 1, 2, 1, 2 } }, // psraq
664 { ISD::SHL, MVT::v4i64, { 1, 4, 1, 2 } }, // psllq
665 { ISD::SRL, MVT::v4i64, { 1, 4, 1, 2 } }, // psrlq
666 { ISD::SRA, MVT::v4i64, { 1, 4, 1, 2 } }, // psraq
667 { ISD::SHL, MVT::v8i64, { 1, 4, 1, 2 } }, // psllq
668 { ISD::SRL, MVT::v8i64, { 1, 4, 1, 2 } }, // psrlq
669 { ISD::SRA, MVT::v8i64, { 1, 4, 1, 2 } }, // psraq
670 };
671
672 if (ST->hasAVX512() && Op2Info.isUniform())
673 if (const auto *Entry =
674 CostTableLookup(AVX512UniformCostTable, ISD, LT.second))
675 if (auto KindCost = Entry->Cost[CostKind])
676 return LT.first * KindCost.value();
677
678 static const CostKindTblEntry AVX2UniformCostTable[] = {
679 // Uniform splats are cheaper for the following instructions.
680 { ISD::SHL, MVT::v16i8, { 3, 5, 5, 7 } }, // psllw + pand.
681 { ISD::SRL, MVT::v16i8, { 3, 9, 5, 8 } }, // psrlw + pand.
682 { ISD::SRA, MVT::v16i8, { 4, 5, 9,13 } }, // psrlw, pand, pxor, psubb.
683 { ISD::SHL, MVT::v32i8, { 4, 7, 6, 8 } }, // psllw + pand.
684 { ISD::SRL, MVT::v32i8, { 4, 8, 7, 9 } }, // psrlw + pand.
685 { ISD::SRA, MVT::v32i8, { 6, 9,11,16 } }, // psrlw, pand, pxor, psubb.
686
687 { ISD::SHL, MVT::v8i16, { 1, 2, 1, 2 } }, // psllw.
688 { ISD::SRL, MVT::v8i16, { 1, 2, 1, 2 } }, // psrlw.
689 { ISD::SRA, MVT::v8i16, { 1, 2, 1, 2 } }, // psraw.
690 { ISD::SHL, MVT::v16i16, { 2, 4, 2, 3 } }, // psllw.
691 { ISD::SRL, MVT::v16i16, { 2, 4, 2, 3 } }, // psrlw.
692 { ISD::SRA, MVT::v16i16, { 2, 4, 2, 3 } }, // psraw.
693
694 { ISD::SHL, MVT::v4i32, { 1, 2, 1, 2 } }, // pslld
695 { ISD::SRL, MVT::v4i32, { 1, 2, 1, 2 } }, // psrld
696 { ISD::SRA, MVT::v4i32, { 1, 2, 1, 2 } }, // psrad
697 { ISD::SHL, MVT::v8i32, { 2, 4, 2, 3 } }, // pslld
698 { ISD::SRL, MVT::v8i32, { 2, 4, 2, 3 } }, // psrld
699 { ISD::SRA, MVT::v8i32, { 2, 4, 2, 3 } }, // psrad
700
701 { ISD::SHL, MVT::v2i64, { 1, 2, 1, 2 } }, // psllq
702 { ISD::SRL, MVT::v2i64, { 1, 2, 1, 2 } }, // psrlq
703 { ISD::SRA, MVT::v2i64, { 2, 4, 5, 7 } }, // 2 x psrad + shuffle.
704 { ISD::SHL, MVT::v4i64, { 2, 4, 1, 2 } }, // psllq
705 { ISD::SRL, MVT::v4i64, { 2, 4, 1, 2 } }, // psrlq
706 { ISD::SRA, MVT::v4i64, { 4, 6, 5, 9 } }, // 2 x psrad + shuffle.
707 };
708
709 if (ST->hasAVX2() && Op2Info.isUniform())
710 if (const auto *Entry =
711 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
712 if (auto KindCost = Entry->Cost[CostKind])
713 return LT.first * KindCost.value();
714
715 static const CostKindTblEntry AVXUniformCostTable[] = {
716 { ISD::SHL, MVT::v16i8, { 4, 4, 6, 8 } }, // psllw + pand.
717 { ISD::SRL, MVT::v16i8, { 4, 8, 5, 8 } }, // psrlw + pand.
718 { ISD::SRA, MVT::v16i8, { 6, 6, 9,13 } }, // psrlw, pand, pxor, psubb.
719 { ISD::SHL, MVT::v32i8, { 7, 8,11,14 } }, // psllw + pand + split.
720 { ISD::SRL, MVT::v32i8, { 7, 9,10,14 } }, // psrlw + pand + split.
721 { ISD::SRA, MVT::v32i8, { 10,11,16,21 } }, // psrlw, pand, pxor, psubb + split.
722
723 { ISD::SHL, MVT::v8i16, { 1, 3, 1, 2 } }, // psllw.
724 { ISD::SRL, MVT::v8i16, { 1, 3, 1, 2 } }, // psrlw.
725 { ISD::SRA, MVT::v8i16, { 1, 3, 1, 2 } }, // psraw.
726 { ISD::SHL, MVT::v16i16, { 3, 7, 5, 7 } }, // psllw + split.
727 { ISD::SRL, MVT::v16i16, { 3, 7, 5, 7 } }, // psrlw + split.
728 { ISD::SRA, MVT::v16i16, { 3, 7, 5, 7 } }, // psraw + split.
729
730 { ISD::SHL, MVT::v4i32, { 1, 3, 1, 2 } }, // pslld.
731 { ISD::SRL, MVT::v4i32, { 1, 3, 1, 2 } }, // psrld.
732 { ISD::SRA, MVT::v4i32, { 1, 3, 1, 2 } }, // psrad.
733 { ISD::SHL, MVT::v8i32, { 3, 7, 5, 7 } }, // pslld + split.
734 { ISD::SRL, MVT::v8i32, { 3, 7, 5, 7 } }, // psrld + split.
735 { ISD::SRA, MVT::v8i32, { 3, 7, 5, 7 } }, // psrad + split.
736
737 { ISD::SHL, MVT::v2i64, { 1, 3, 1, 2 } }, // psllq.
738 { ISD::SRL, MVT::v2i64, { 1, 3, 1, 2 } }, // psrlq.
739 { ISD::SRA, MVT::v2i64, { 3, 4, 5, 7 } }, // 2 x psrad + shuffle.
740 { ISD::SHL, MVT::v4i64, { 3, 7, 4, 6 } }, // psllq + split.
741 { ISD::SRL, MVT::v4i64, { 3, 7, 4, 6 } }, // psrlq + split.
742 { ISD::SRA, MVT::v4i64, { 6, 7,10,13 } }, // 2 x (2 x psrad + shuffle) + split.
743 };
744
745 // XOP has faster vXi8 shifts.
746 if (ST->hasAVX() && Op2Info.isUniform() &&
747 (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
748 if (const auto *Entry =
749 CostTableLookup(AVXUniformCostTable, ISD, LT.second))
750 if (auto KindCost = Entry->Cost[CostKind])
751 return LT.first * KindCost.value();
752
753 static const CostKindTblEntry SSE2UniformCostTable[] = {
754 // Uniform splats are cheaper for the following instructions.
755 { ISD::SHL, MVT::v16i8, { 9, 10, 6, 9 } }, // psllw + pand.
756 { ISD::SRL, MVT::v16i8, { 9, 13, 5, 9 } }, // psrlw + pand.
757 { ISD::SRA, MVT::v16i8, { 11, 15, 9,13 } }, // pcmpgtb sequence.
758
759 { ISD::SHL, MVT::v8i16, { 2, 2, 1, 2 } }, // psllw.
760 { ISD::SRL, MVT::v8i16, { 2, 2, 1, 2 } }, // psrlw.
761 { ISD::SRA, MVT::v8i16, { 2, 2, 1, 2 } }, // psraw.
762
763 { ISD::SHL, MVT::v4i32, { 2, 2, 1, 2 } }, // pslld
764 { ISD::SRL, MVT::v4i32, { 2, 2, 1, 2 } }, // psrld.
765 { ISD::SRA, MVT::v4i32, { 2, 2, 1, 2 } }, // psrad.
766
767 { ISD::SHL, MVT::v2i64, { 2, 2, 1, 2 } }, // psllq.
768 { ISD::SRL, MVT::v2i64, { 2, 2, 1, 2 } }, // psrlq.
769 { ISD::SRA, MVT::v2i64, { 5, 9, 5, 7 } }, // 2*psrlq + xor + sub.
770 };
771
772 if (ST->hasSSE2() && Op2Info.isUniform() &&
773 (!ST->hasXOP() || LT.second.getScalarSizeInBits() != 8))
774 if (const auto *Entry =
775 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
776 if (auto KindCost = Entry->Cost[CostKind])
777 return LT.first * KindCost.value();
778
779 static const CostKindTblEntry AVX512DQCostTable[] = {
780 { ISD::MUL, MVT::v2i64, { 2, 15, 1, 3 } }, // pmullq
781 { ISD::MUL, MVT::v4i64, { 2, 15, 1, 3 } }, // pmullq
782 { ISD::MUL, MVT::v8i64, { 3, 15, 1, 3 } } // pmullq
783 };
784
785 // Look for AVX512DQ lowering tricks for custom cases.
786 if (ST->hasDQI())
787 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
788 if (auto KindCost = Entry->Cost[CostKind])
789 return LT.first * KindCost.value();
790
791 static const CostKindTblEntry AVX512BWCostTable[] = {
792 { ISD::SHL, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsllvw/pack sequence.
793 { ISD::SRL, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsrlvw/pack sequence.
794 { ISD::SRA, MVT::v16i8, { 4, 8, 4, 5 } }, // extend/vpsravw/pack sequence.
795 { ISD::SHL, MVT::v32i8, { 4, 23,11,16 } }, // extend/vpsllvw/pack sequence.
796 { ISD::SRL, MVT::v32i8, { 4, 30,12,18 } }, // extend/vpsrlvw/pack sequence.
797 { ISD::SRA, MVT::v32i8, { 6, 13,24,30 } }, // extend/vpsravw/pack sequence.
798 { ISD::SHL, MVT::v64i8, { 6, 19,13,15 } }, // extend/vpsllvw/pack sequence.
799 { ISD::SRL, MVT::v64i8, { 7, 27,15,18 } }, // extend/vpsrlvw/pack sequence.
800 { ISD::SRA, MVT::v64i8, { 15, 15,30,30 } }, // extend/vpsravw/pack sequence.
801
802 { ISD::SHL, MVT::v8i16, { 1, 1, 1, 1 } }, // vpsllvw
803 { ISD::SRL, MVT::v8i16, { 1, 1, 1, 1 } }, // vpsrlvw
804 { ISD::SRA, MVT::v8i16, { 1, 1, 1, 1 } }, // vpsravw
805 { ISD::SHL, MVT::v16i16, { 1, 1, 1, 1 } }, // vpsllvw
806 { ISD::SRL, MVT::v16i16, { 1, 1, 1, 1 } }, // vpsrlvw
807 { ISD::SRA, MVT::v16i16, { 1, 1, 1, 1 } }, // vpsravw
808 { ISD::SHL, MVT::v32i16, { 1, 1, 1, 1 } }, // vpsllvw
809 { ISD::SRL, MVT::v32i16, { 1, 1, 1, 1 } }, // vpsrlvw
810 { ISD::SRA, MVT::v32i16, { 1, 1, 1, 1 } }, // vpsravw
811
812 { ISD::ADD, MVT::v64i8, { 1, 1, 1, 1 } }, // paddb
813 { ISD::ADD, MVT::v32i16, { 1, 1, 1, 1 } }, // paddw
814
815 { ISD::ADD, MVT::v32i8, { 1, 1, 1, 1 } }, // paddb
816 { ISD::ADD, MVT::v16i16, { 1, 1, 1, 1 } }, // paddw
817 { ISD::ADD, MVT::v8i32, { 1, 1, 1, 1 } }, // paddd
818 { ISD::ADD, MVT::v4i64, { 1, 1, 1, 1 } }, // paddq
819
820 { ISD::SUB, MVT::v64i8, { 1, 1, 1, 1 } }, // psubb
821 { ISD::SUB, MVT::v32i16, { 1, 1, 1, 1 } }, // psubw
822
823 { ISD::MUL, MVT::v32i16, { 1, 5, 1, 1 } }, // pmullw
824
825 { ISD::SUB, MVT::v32i8, { 1, 1, 1, 1 } }, // psubb
826 { ISD::SUB, MVT::v16i16, { 1, 1, 1, 1 } }, // psubw
827 { ISD::SUB, MVT::v8i32, { 1, 1, 1, 1 } }, // psubd
828 { ISD::SUB, MVT::v4i64, { 1, 1, 1, 1 } }, // psubq
829 };
830
831 // Look for AVX512BW lowering tricks for custom cases.
832 if (ST->hasBWI())
833 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
834 if (auto KindCost = Entry->Cost[CostKind])
835 return LT.first * KindCost.value();
836
837 static const CostKindTblEntry AVX512CostTable[] = {
838 { ISD::SHL, MVT::v64i8, { 15, 19,27,33 } }, // vpblendv+split sequence.
839 { ISD::SRL, MVT::v64i8, { 15, 19,30,36 } }, // vpblendv+split sequence.
840 { ISD::SRA, MVT::v64i8, { 37, 37,51,63 } }, // vpblendv+split sequence.
841
842 { ISD::SHL, MVT::v32i16, { 11, 16,11,15 } }, // 2*extend/vpsrlvd/pack sequence.
843 { ISD::SRL, MVT::v32i16, { 11, 16,11,15 } }, // 2*extend/vpsrlvd/pack sequence.
844 { ISD::SRA, MVT::v32i16, { 11, 16,11,15 } }, // 2*extend/vpsravd/pack sequence.
845
846 { ISD::SHL, MVT::v4i32, { 1, 1, 1, 1 } },
847 { ISD::SRL, MVT::v4i32, { 1, 1, 1, 1 } },
848 { ISD::SRA, MVT::v4i32, { 1, 1, 1, 1 } },
849 { ISD::SHL, MVT::v8i32, { 1, 1, 1, 1 } },
850 { ISD::SRL, MVT::v8i32, { 1, 1, 1, 1 } },
851 { ISD::SRA, MVT::v8i32, { 1, 1, 1, 1 } },
852 { ISD::SHL, MVT::v16i32, { 1, 1, 1, 1 } },
853 { ISD::SRL, MVT::v16i32, { 1, 1, 1, 1 } },
854 { ISD::SRA, MVT::v16i32, { 1, 1, 1, 1 } },
855
856 { ISD::SHL, MVT::v2i64, { 1, 1, 1, 1 } },
857 { ISD::SRL, MVT::v2i64, { 1, 1, 1, 1 } },
858 { ISD::SRA, MVT::v2i64, { 1, 1, 1, 1 } },
859 { ISD::SHL, MVT::v4i64, { 1, 1, 1, 1 } },
860 { ISD::SRL, MVT::v4i64, { 1, 1, 1, 1 } },
861 { ISD::SRA, MVT::v4i64, { 1, 1, 1, 1 } },
862 { ISD::SHL, MVT::v8i64, { 1, 1, 1, 1 } },
863 { ISD::SRL, MVT::v8i64, { 1, 1, 1, 1 } },
864 { ISD::SRA, MVT::v8i64, { 1, 1, 1, 1 } },
865
866 { ISD::ADD, MVT::v64i8, { 3, 7, 5, 5 } }, // 2*paddb + split
867 { ISD::ADD, MVT::v32i16, { 3, 7, 5, 5 } }, // 2*paddw + split
868
869 { ISD::SUB, MVT::v64i8, { 3, 7, 5, 5 } }, // 2*psubb + split
870 { ISD::SUB, MVT::v32i16, { 3, 7, 5, 5 } }, // 2*psubw + split
871
872 { ISD::AND, MVT::v32i8, { 1, 1, 1, 1 } },
873 { ISD::AND, MVT::v16i16, { 1, 1, 1, 1 } },
874 { ISD::AND, MVT::v8i32, { 1, 1, 1, 1 } },
875 { ISD::AND, MVT::v4i64, { 1, 1, 1, 1 } },
876
877 { ISD::OR, MVT::v32i8, { 1, 1, 1, 1 } },
878 { ISD::OR, MVT::v16i16, { 1, 1, 1, 1 } },
879 { ISD::OR, MVT::v8i32, { 1, 1, 1, 1 } },
880 { ISD::OR, MVT::v4i64, { 1, 1, 1, 1 } },
881
882 { ISD::XOR, MVT::v32i8, { 1, 1, 1, 1 } },
883 { ISD::XOR, MVT::v16i16, { 1, 1, 1, 1 } },
884 { ISD::XOR, MVT::v8i32, { 1, 1, 1, 1 } },
885 { ISD::XOR, MVT::v4i64, { 1, 1, 1, 1 } },
886
887 { ISD::MUL, MVT::v16i32, { 1, 10, 1, 2 } }, // pmulld (Skylake from agner.org)
888 { ISD::MUL, MVT::v8i32, { 1, 10, 1, 2 } }, // pmulld (Skylake from agner.org)
889 { ISD::MUL, MVT::v4i32, { 1, 10, 1, 2 } }, // pmulld (Skylake from agner.org)
890 { ISD::MUL, MVT::v8i64, { 6, 9, 8, 8 } }, // 3*pmuludq/3*shift/2*add
891 { ISD::MUL, MVT::i64, { 1 } }, // Skylake from http://www.agner.org/
892
893 { ISD::FNEG, MVT::v8f64, { 1, 1, 1, 2 } }, // Skylake from http://www.agner.org/
894 { ISD::FADD, MVT::v8f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
895 { ISD::FADD, MVT::v4f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
896 { ISD::FSUB, MVT::v8f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
897 { ISD::FSUB, MVT::v4f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
898 { ISD::FMUL, MVT::v8f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
899 { ISD::FMUL, MVT::v4f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
900 { ISD::FMUL, MVT::v2f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
901 { ISD::FMUL, MVT::f64, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
902
903 { ISD::FDIV, MVT::f64, { 4, 14, 1, 1 } }, // Skylake from http://www.agner.org/
904 { ISD::FDIV, MVT::v2f64, { 4, 14, 1, 1 } }, // Skylake from http://www.agner.org/
905 { ISD::FDIV, MVT::v4f64, { 8, 14, 1, 1 } }, // Skylake from http://www.agner.org/
906 { ISD::FDIV, MVT::v8f64, { 16, 23, 1, 3 } }, // Skylake from http://www.agner.org/
907
908 { ISD::FNEG, MVT::v16f32, { 1, 1, 1, 2 } }, // Skylake from http://www.agner.org/
909 { ISD::FADD, MVT::v16f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
910 { ISD::FADD, MVT::v8f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
911 { ISD::FSUB, MVT::v16f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
912 { ISD::FSUB, MVT::v8f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
913 { ISD::FMUL, MVT::v16f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
914 { ISD::FMUL, MVT::v8f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
915 { ISD::FMUL, MVT::v4f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
916 { ISD::FMUL, MVT::f32, { 1, 4, 1, 1 } }, // Skylake from http://www.agner.org/
917
918 { ISD::FDIV, MVT::f32, { 3, 11, 1, 1 } }, // Skylake from http://www.agner.org/
919 { ISD::FDIV, MVT::v4f32, { 3, 11, 1, 1 } }, // Skylake from http://www.agner.org/
920 { ISD::FDIV, MVT::v8f32, { 5, 11, 1, 1 } }, // Skylake from http://www.agner.org/
921 { ISD::FDIV, MVT::v16f32, { 10, 18, 1, 3 } }, // Skylake from http://www.agner.org/
922 };
923
924 if (ST->hasAVX512())
925 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
926 if (auto KindCost = Entry->Cost[CostKind])
927 return LT.first * KindCost.value();
928
929 static const CostKindTblEntry AVX2ShiftCostTable[] = {
930 // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
931 // customize them to detect the cases where shift amount is a scalar one.
932 { ISD::SHL, MVT::v4i32, { 2, 3, 1, 3 } }, // vpsllvd (Haswell from agner.org)
933 { ISD::SRL, MVT::v4i32, { 2, 3, 1, 3 } }, // vpsrlvd (Haswell from agner.org)
934 { ISD::SRA, MVT::v4i32, { 2, 3, 1, 3 } }, // vpsravd (Haswell from agner.org)
935 { ISD::SHL, MVT::v8i32, { 4, 4, 1, 3 } }, // vpsllvd (Haswell from agner.org)
936 { ISD::SRL, MVT::v8i32, { 4, 4, 1, 3 } }, // vpsrlvd (Haswell from agner.org)
937 { ISD::SRA, MVT::v8i32, { 4, 4, 1, 3 } }, // vpsravd (Haswell from agner.org)
938 { ISD::SHL, MVT::v2i64, { 2, 3, 1, 1 } }, // vpsllvq (Haswell from agner.org)
939 { ISD::SRL, MVT::v2i64, { 2, 3, 1, 1 } }, // vpsrlvq (Haswell from agner.org)
940 { ISD::SHL, MVT::v4i64, { 4, 4, 1, 2 } }, // vpsllvq (Haswell from agner.org)
941 { ISD::SRL, MVT::v4i64, { 4, 4, 1, 2 } }, // vpsrlvq (Haswell from agner.org)
942 };
943
944 if (ST->hasAVX512()) {
945 if (ISD == ISD::SHL && LT.second == MVT::v32i16 && Op2Info.isConstant())
946 // On AVX512, a packed v32i16 shift left by a constant build_vector
947 // is lowered into a vector multiply (vpmullw).
948 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
949 Op1Info.getNoProps(), Op2Info.getNoProps());
950 }
951
952 // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
953 if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) {
954 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
955 Op2Info.isConstant())
956 // On AVX2, a packed v16i16 shift left by a constant build_vector
957 // is lowered into a vector multiply (vpmullw).
958 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
959 Op1Info.getNoProps(), Op2Info.getNoProps());
960
961 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
962 if (auto KindCost = Entry->Cost[CostKind])
963 return LT.first * KindCost.value();
964 }
965
966 static const CostKindTblEntry XOPShiftCostTable[] = {
967 // 128bit shifts take 1cy, but right shifts require negation beforehand.
968 { ISD::SHL, MVT::v16i8, { 1, 3, 1, 1 } },
969 { ISD::SRL, MVT::v16i8, { 2, 3, 1, 1 } },
970 { ISD::SRA, MVT::v16i8, { 2, 3, 1, 1 } },
971 { ISD::SHL, MVT::v8i16, { 1, 3, 1, 1 } },
972 { ISD::SRL, MVT::v8i16, { 2, 3, 1, 1 } },
973 { ISD::SRA, MVT::v8i16, { 2, 3, 1, 1 } },
974 { ISD::SHL, MVT::v4i32, { 1, 3, 1, 1 } },
975 { ISD::SRL, MVT::v4i32, { 2, 3, 1, 1 } },
976 { ISD::SRA, MVT::v4i32, { 2, 3, 1, 1 } },
977 { ISD::SHL, MVT::v2i64, { 1, 3, 1, 1 } },
978 { ISD::SRL, MVT::v2i64, { 2, 3, 1, 1 } },
979 { ISD::SRA, MVT::v2i64, { 2, 3, 1, 1 } },
980 // 256bit shifts require splitting if AVX2 didn't catch them above.
981 { ISD::SHL, MVT::v32i8, { 4, 7, 5, 6 } },
982 { ISD::SRL, MVT::v32i8, { 6, 7, 5, 6 } },
983 { ISD::SRA, MVT::v32i8, { 6, 7, 5, 6 } },
984 { ISD::SHL, MVT::v16i16, { 4, 7, 5, 6 } },
985 { ISD::SRL, MVT::v16i16, { 6, 7, 5, 6 } },
986 { ISD::SRA, MVT::v16i16, { 6, 7, 5, 6 } },
987 { ISD::SHL, MVT::v8i32, { 4, 7, 5, 6 } },
988 { ISD::SRL, MVT::v8i32, { 6, 7, 5, 6 } },
989 { ISD::SRA, MVT::v8i32, { 6, 7, 5, 6 } },
990 { ISD::SHL, MVT::v4i64, { 4, 7, 5, 6 } },
991 { ISD::SRL, MVT::v4i64, { 6, 7, 5, 6 } },
992 { ISD::SRA, MVT::v4i64, { 6, 7, 5, 6 } },
993 };
994
995 // Look for XOP lowering tricks.
996 if (ST->hasXOP()) {
997 // If the right shift is constant then we'll fold the negation so
998 // it's as cheap as a left shift.
999 int ShiftISD = ISD;
1000 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) && Op2Info.isConstant())
1001 ShiftISD = ISD::SHL;
1002 if (const auto *Entry =
1003 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
1004 if (auto KindCost = Entry->Cost[CostKind])
1005 return LT.first * KindCost.value();
1006 }
1007
1008 if (ISD == ISD::SHL && !Op2Info.isUniform() && Op2Info.isConstant()) {
1009 MVT VT = LT.second;
1010 // Vector shift left by non uniform constant can be lowered
1011 // into vector multiply.
1012 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
1013 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
1014 ISD = ISD::MUL;
1015 }
1016
1017 static const CostKindTblEntry GLMCostTable[] = {
1018 { ISD::FDIV, MVT::f32, { 18, 19, 1, 1 } }, // divss
1019 { ISD::FDIV, MVT::v4f32, { 35, 36, 1, 1 } }, // divps
1020 { ISD::FDIV, MVT::f64, { 33, 34, 1, 1 } }, // divsd
1021 { ISD::FDIV, MVT::v2f64, { 65, 66, 1, 1 } }, // divpd
1022 };
1023
1024 if (ST->useGLMDivSqrtCosts())
1025 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD, LT.second))
1026 if (auto KindCost = Entry->Cost[CostKind])
1027 return LT.first * KindCost.value();
1028
1029 static const CostKindTblEntry SLMCostTable[] = {
1030 { ISD::MUL, MVT::v4i32, { 11, 11, 1, 7 } }, // pmulld
1031 { ISD::MUL, MVT::v8i16, { 2, 5, 1, 1 } }, // pmullw
1032 { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // mulsd
1033 { ISD::FMUL, MVT::f32, { 1, 4, 1, 1 } }, // mulss
1034 { ISD::FMUL, MVT::v2f64, { 4, 7, 1, 1 } }, // mulpd
1035 { ISD::FMUL, MVT::v4f32, { 2, 5, 1, 1 } }, // mulps
1036 { ISD::FDIV, MVT::f32, { 17, 19, 1, 1 } }, // divss
1037 { ISD::FDIV, MVT::v4f32, { 39, 39, 1, 6 } }, // divps
1038 { ISD::FDIV, MVT::f64, { 32, 34, 1, 1 } }, // divsd
1039 { ISD::FDIV, MVT::v2f64, { 69, 69, 1, 6 } }, // divpd
1040 { ISD::FADD, MVT::v2f64, { 2, 4, 1, 1 } }, // addpd
1041 { ISD::FSUB, MVT::v2f64, { 2, 4, 1, 1 } }, // subpd
1042 // v2i64/v4i64 mul is custom lowered as a series of long:
1043 // multiplies(3), shifts(3) and adds(2)
1044 // slm muldq version throughput is 2 and addq throughput 4
1045 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
1046 // 3X4 (addq throughput) = 17
1047 { ISD::MUL, MVT::v2i64, { 17, 22, 9, 9 } },
1048 // slm addq\subq throughput is 4
1049 { ISD::ADD, MVT::v2i64, { 4, 2, 1, 2 } },
1050 { ISD::SUB, MVT::v2i64, { 4, 2, 1, 2 } },
1051 };
1052
1053 if (ST->useSLMArithCosts())
1054 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD, LT.second))
1055 if (auto KindCost = Entry->Cost[CostKind])
1056 return LT.first * KindCost.value();
1057
1058 static const CostKindTblEntry AVX2CostTable[] = {
1059 { ISD::SHL, MVT::v16i8, { 6, 21,11,16 } }, // vpblendvb sequence.
1060 { ISD::SHL, MVT::v32i8, { 6, 23,11,22 } }, // vpblendvb sequence.
1061 { ISD::SHL, MVT::v8i16, { 5, 18, 5,10 } }, // extend/vpsrlvd/pack sequence.
1062 { ISD::SHL, MVT::v16i16, { 8, 10,10,14 } }, // extend/vpsrlvd/pack sequence.
1063
1064 { ISD::SRL, MVT::v16i8, { 6, 27,12,18 } }, // vpblendvb sequence.
1065 { ISD::SRL, MVT::v32i8, { 8, 30,12,24 } }, // vpblendvb sequence.
1066 { ISD::SRL, MVT::v8i16, { 5, 11, 5,10 } }, // extend/vpsrlvd/pack sequence.
1067 { ISD::SRL, MVT::v16i16, { 8, 10,10,14 } }, // extend/vpsrlvd/pack sequence.
1068
1069 { ISD::SRA, MVT::v16i8, { 17, 17,24,30 } }, // vpblendvb sequence.
1070 { ISD::SRA, MVT::v32i8, { 18, 20,24,43 } }, // vpblendvb sequence.
1071 { ISD::SRA, MVT::v8i16, { 5, 11, 5,10 } }, // extend/vpsravd/pack sequence.
1072 { ISD::SRA, MVT::v16i16, { 8, 10,10,14 } }, // extend/vpsravd/pack sequence.
1073 { ISD::SRA, MVT::v2i64, { 4, 5, 5, 5 } }, // srl/xor/sub sequence.
1074 { ISD::SRA, MVT::v4i64, { 8, 8, 5, 9 } }, // srl/xor/sub sequence.
1075
1076 { ISD::SUB, MVT::v32i8, { 1, 1, 1, 2 } }, // psubb
1077 { ISD::ADD, MVT::v32i8, { 1, 1, 1, 2 } }, // paddb
1078 { ISD::SUB, MVT::v16i16, { 1, 1, 1, 2 } }, // psubw
1079 { ISD::ADD, MVT::v16i16, { 1, 1, 1, 2 } }, // paddw
1080 { ISD::SUB, MVT::v8i32, { 1, 1, 1, 2 } }, // psubd
1081 { ISD::ADD, MVT::v8i32, { 1, 1, 1, 2 } }, // paddd
1082 { ISD::SUB, MVT::v4i64, { 1, 1, 1, 2 } }, // psubq
1083 { ISD::ADD, MVT::v4i64, { 1, 1, 1, 2 } }, // paddq
1084
1085 { ISD::MUL, MVT::v16i16, { 2, 5, 1, 1 } }, // pmullw
1086 { ISD::MUL, MVT::v8i32, { 4, 10, 1, 2 } }, // pmulld
1087 { ISD::MUL, MVT::v4i32, { 2, 10, 1, 2 } }, // pmulld
1088 { ISD::MUL, MVT::v4i64, { 6, 10, 8,13 } }, // 3*pmuludq/3*shift/2*add
1089 { ISD::MUL, MVT::v2i64, { 6, 10, 8, 8 } }, // 3*pmuludq/3*shift/2*add
1090
1091 { ISD::FNEG, MVT::v4f64, { 1, 1, 1, 2 } }, // vxorpd
1092 { ISD::FNEG, MVT::v8f32, { 1, 1, 1, 2 } }, // vxorps
1093
1094 { ISD::FADD, MVT::f64, { 1, 4, 1, 1 } }, // vaddsd
1095 { ISD::FADD, MVT::f32, { 1, 4, 1, 1 } }, // vaddss
1096 { ISD::FADD, MVT::v2f64, { 1, 4, 1, 1 } }, // vaddpd
1097 { ISD::FADD, MVT::v4f32, { 1, 4, 1, 1 } }, // vaddps
1098 { ISD::FADD, MVT::v4f64, { 1, 4, 1, 2 } }, // vaddpd
1099 { ISD::FADD, MVT::v8f32, { 1, 4, 1, 2 } }, // vaddps
1100
1101 { ISD::FSUB, MVT::f64, { 1, 4, 1, 1 } }, // vsubsd
1102 { ISD::FSUB, MVT::f32, { 1, 4, 1, 1 } }, // vsubss
1103 { ISD::FSUB, MVT::v2f64, { 1, 4, 1, 1 } }, // vsubpd
1104 { ISD::FSUB, MVT::v4f32, { 1, 4, 1, 1 } }, // vsubps
1105 { ISD::FSUB, MVT::v4f64, { 1, 4, 1, 2 } }, // vsubpd
1106 { ISD::FSUB, MVT::v8f32, { 1, 4, 1, 2 } }, // vsubps
1107
1108 { ISD::FMUL, MVT::f64, { 1, 5, 1, 1 } }, // vmulsd
1109 { ISD::FMUL, MVT::f32, { 1, 5, 1, 1 } }, // vmulss
1110 { ISD::FMUL, MVT::v2f64, { 1, 5, 1, 1 } }, // vmulpd
1111 { ISD::FMUL, MVT::v4f32, { 1, 5, 1, 1 } }, // vmulps
1112 { ISD::FMUL, MVT::v4f64, { 1, 5, 1, 2 } }, // vmulpd
1113 { ISD::FMUL, MVT::v8f32, { 1, 5, 1, 2 } }, // vmulps
1114
1115 { ISD::FDIV, MVT::f32, { 7, 13, 1, 1 } }, // vdivss
1116 { ISD::FDIV, MVT::v4f32, { 7, 13, 1, 1 } }, // vdivps
1117 { ISD::FDIV, MVT::v8f32, { 14, 21, 1, 3 } }, // vdivps
1118 { ISD::FDIV, MVT::f64, { 14, 20, 1, 1 } }, // vdivsd
1119 { ISD::FDIV, MVT::v2f64, { 14, 20, 1, 1 } }, // vdivpd
1120 { ISD::FDIV, MVT::v4f64, { 28, 35, 1, 3 } }, // vdivpd
1121 };
1122
1123 // Look for AVX2 lowering tricks for custom cases.
1124 if (ST->hasAVX2())
1125 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
1126 if (auto KindCost = Entry->Cost[CostKind])
1127 return LT.first * KindCost.value();
1128
1129 static const CostKindTblEntry AVX1CostTable[] = {
1130 // We don't have to scalarize unsupported ops. We can issue two half-sized
1131 // operations and we only need to extract the upper YMM half.
1132 // Two ops + 1 extract + 1 insert = 4.
1133 { ISD::MUL, MVT::v16i16, { 4, 8, 5, 6 } }, // pmullw + split
1134 { ISD::MUL, MVT::v8i32, { 5, 8, 5, 10 } }, // pmulld + split
1135 { ISD::MUL, MVT::v4i32, { 2, 5, 1, 3 } }, // pmulld
1136 { ISD::MUL, MVT::v4i64, { 12, 15, 19, 20 } },
1137
1138 { ISD::AND, MVT::v32i8, { 1, 1, 1, 2 } }, // vandps
1139 { ISD::AND, MVT::v16i16, { 1, 1, 1, 2 } }, // vandps
1140 { ISD::AND, MVT::v8i32, { 1, 1, 1, 2 } }, // vandps
1141 { ISD::AND, MVT::v4i64, { 1, 1, 1, 2 } }, // vandps
1142
1143 { ISD::OR, MVT::v32i8, { 1, 1, 1, 2 } }, // vorps
1144 { ISD::OR, MVT::v16i16, { 1, 1, 1, 2 } }, // vorps
1145 { ISD::OR, MVT::v8i32, { 1, 1, 1, 2 } }, // vorps
1146 { ISD::OR, MVT::v4i64, { 1, 1, 1, 2 } }, // vorps
1147
1148 { ISD::XOR, MVT::v32i8, { 1, 1, 1, 2 } }, // vxorps
1149 { ISD::XOR, MVT::v16i16, { 1, 1, 1, 2 } }, // vxorps
1150 { ISD::XOR, MVT::v8i32, { 1, 1, 1, 2 } }, // vxorps
1151 { ISD::XOR, MVT::v4i64, { 1, 1, 1, 2 } }, // vxorps
1152
1153 { ISD::SUB, MVT::v32i8, { 4, 2, 5, 6 } }, // psubb + split
1154 { ISD::ADD, MVT::v32i8, { 4, 2, 5, 6 } }, // paddb + split
1155 { ISD::SUB, MVT::v16i16, { 4, 2, 5, 6 } }, // psubw + split
1156 { ISD::ADD, MVT::v16i16, { 4, 2, 5, 6 } }, // paddw + split
1157 { ISD::SUB, MVT::v8i32, { 4, 2, 5, 6 } }, // psubd + split
1158 { ISD::ADD, MVT::v8i32, { 4, 2, 5, 6 } }, // paddd + split
1159 { ISD::SUB, MVT::v4i64, { 4, 2, 5, 6 } }, // psubq + split
1160 { ISD::ADD, MVT::v4i64, { 4, 2, 5, 6 } }, // paddq + split
1161 { ISD::SUB, MVT::v2i64, { 1, 1, 1, 1 } }, // psubq
1162 { ISD::ADD, MVT::v2i64, { 1, 1, 1, 1 } }, // paddq
1163
1164 { ISD::SHL, MVT::v16i8, { 10, 21,11,17 } }, // pblendvb sequence.
1165 { ISD::SHL, MVT::v32i8, { 22, 22,27,40 } }, // pblendvb sequence + split.
1166 { ISD::SHL, MVT::v8i16, { 6, 9,11,11 } }, // pblendvb sequence.
1167 { ISD::SHL, MVT::v16i16, { 13, 16,24,25 } }, // pblendvb sequence + split.
1168 { ISD::SHL, MVT::v4i32, { 3, 11, 4, 6 } }, // pslld/paddd/cvttps2dq/pmulld
1169 { ISD::SHL, MVT::v8i32, { 9, 11,12,17 } }, // pslld/paddd/cvttps2dq/pmulld + split
1170 { ISD::SHL, MVT::v2i64, { 2, 4, 4, 6 } }, // Shift each lane + blend.
1171 { ISD::SHL, MVT::v4i64, { 6, 7,11,15 } }, // Shift each lane + blend + split.
1172
1173 { ISD::SRL, MVT::v16i8, { 11, 27,12,18 } }, // pblendvb sequence.
1174 { ISD::SRL, MVT::v32i8, { 23, 23,30,43 } }, // pblendvb sequence + split.
1175 { ISD::SRL, MVT::v8i16, { 13, 16,14,22 } }, // pblendvb sequence.
1176 { ISD::SRL, MVT::v16i16, { 28, 30,31,48 } }, // pblendvb sequence + split.
1177 { ISD::SRL, MVT::v4i32, { 6, 7,12,16 } }, // Shift each lane + blend.
1178 { ISD::SRL, MVT::v8i32, { 14, 14,26,34 } }, // Shift each lane + blend + split.
1179 { ISD::SRL, MVT::v2i64, { 2, 4, 4, 6 } }, // Shift each lane + blend.
1180 { ISD::SRL, MVT::v4i64, { 6, 7,11,15 } }, // Shift each lane + blend + split.
1181
1182 { ISD::SRA, MVT::v16i8, { 21, 22,24,36 } }, // pblendvb sequence.
1183 { ISD::SRA, MVT::v32i8, { 44, 45,51,76 } }, // pblendvb sequence + split.
1184 { ISD::SRA, MVT::v8i16, { 13, 16,14,22 } }, // pblendvb sequence.
1185 { ISD::SRA, MVT::v16i16, { 28, 30,31,48 } }, // pblendvb sequence + split.
1186 { ISD::SRA, MVT::v4i32, { 6, 7,12,16 } }, // Shift each lane + blend.
1187 { ISD::SRA, MVT::v8i32, { 14, 14,26,34 } }, // Shift each lane + blend + split.
1188 { ISD::SRA, MVT::v2i64, { 5, 6,10,14 } }, // Shift each lane + blend.
1189 { ISD::SRA, MVT::v4i64, { 12, 12,22,30 } }, // Shift each lane + blend + split.
1190
1191 { ISD::FNEG, MVT::v4f64, { 2, 2, 1, 2 } }, // BTVER2 from http://www.agner.org/
1192 { ISD::FNEG, MVT::v8f32, { 2, 2, 1, 2 } }, // BTVER2 from http://www.agner.org/
1193
1194 { ISD::FADD, MVT::f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1195 { ISD::FADD, MVT::f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1196 { ISD::FADD, MVT::v2f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1197 { ISD::FADD, MVT::v4f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1198 { ISD::FADD, MVT::v4f64, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
1199 { ISD::FADD, MVT::v8f32, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
1200
1201 { ISD::FSUB, MVT::f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1202 { ISD::FSUB, MVT::f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1203 { ISD::FSUB, MVT::v2f64, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1204 { ISD::FSUB, MVT::v4f32, { 1, 5, 1, 1 } }, // BDVER2 from http://www.agner.org/
1205 { ISD::FSUB, MVT::v4f64, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
1206 { ISD::FSUB, MVT::v8f32, { 2, 5, 1, 2 } }, // BDVER2 from http://www.agner.org/
1207
1208 { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
1209 { ISD::FMUL, MVT::f32, { 1, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
1210 { ISD::FMUL, MVT::v2f64, { 2, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
1211 { ISD::FMUL, MVT::v4f32, { 1, 5, 1, 1 } }, // BTVER2 from http://www.agner.org/
1212 { ISD::FMUL, MVT::v4f64, { 4, 5, 1, 2 } }, // BTVER2 from http://www.agner.org/
1213 { ISD::FMUL, MVT::v8f32, { 2, 5, 1, 2 } }, // BTVER2 from http://www.agner.org/
1214
1215 { ISD::FDIV, MVT::f32, { 14, 14, 1, 1 } }, // SNB from http://www.agner.org/
1216 { ISD::FDIV, MVT::v4f32, { 14, 14, 1, 1 } }, // SNB from http://www.agner.org/
1217 { ISD::FDIV, MVT::v8f32, { 28, 29, 1, 3 } }, // SNB from http://www.agner.org/
1218 { ISD::FDIV, MVT::f64, { 22, 22, 1, 1 } }, // SNB from http://www.agner.org/
1219 { ISD::FDIV, MVT::v2f64, { 22, 22, 1, 1 } }, // SNB from http://www.agner.org/
1220 { ISD::FDIV, MVT::v4f64, { 44, 45, 1, 3 } }, // SNB from http://www.agner.org/
1221 };
1222
1223 if (ST->hasAVX())
1224 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
1225 if (auto KindCost = Entry->Cost[CostKind])
1226 return LT.first * KindCost.value();
1227
1228 static const CostKindTblEntry SSE42CostTable[] = {
1229 { ISD::FADD, MVT::f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1230 { ISD::FADD, MVT::f32, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1231 { ISD::FADD, MVT::v2f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1232 { ISD::FADD, MVT::v4f32, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1233
1234 { ISD::FSUB, MVT::f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1235 { ISD::FSUB, MVT::f32 , { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1236 { ISD::FSUB, MVT::v2f64, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1237 { ISD::FSUB, MVT::v4f32, { 1, 3, 1, 1 } }, // Nehalem from http://www.agner.org/
1238
1239 { ISD::FMUL, MVT::f64, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
1240 { ISD::FMUL, MVT::f32, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
1241 { ISD::FMUL, MVT::v2f64, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
1242 { ISD::FMUL, MVT::v4f32, { 1, 5, 1, 1 } }, // Nehalem from http://www.agner.org/
1243
1244 { ISD::FDIV, MVT::f32, { 14, 14, 1, 1 } }, // Nehalem from http://www.agner.org/
1245 { ISD::FDIV, MVT::v4f32, { 14, 14, 1, 1 } }, // Nehalem from http://www.agner.org/
1246 { ISD::FDIV, MVT::f64, { 22, 22, 1, 1 } }, // Nehalem from http://www.agner.org/
1247 { ISD::FDIV, MVT::v2f64, { 22, 22, 1, 1 } }, // Nehalem from http://www.agner.org/
1248
1249 { ISD::MUL, MVT::v2i64, { 6, 10,10,10 } } // 3*pmuludq/3*shift/2*add
1250 };
1251
1252 if (ST->hasSSE42())
1253 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
1254 if (auto KindCost = Entry->Cost[CostKind])
1255 return LT.first * KindCost.value();
1256
1257
1258 static const CostKindTblEntry SSE41CostTable[] = {
1259 { ISD::SHL, MVT::v16i8, { 15, 24,17,22 } }, // pblendvb sequence.
1260 { ISD::SHL, MVT::v8i16, { 11, 14,11,11 } }, // pblendvb sequence.
1261 { ISD::SHL, MVT::v4i32, { 14, 20, 4,10 } }, // pslld/paddd/cvttps2dq/pmulld
1262
1263 { ISD::SRL, MVT::v16i8, { 16, 27,18,24 } }, // pblendvb sequence.
1264 { ISD::SRL, MVT::v8i16, { 22, 26,23,27 } }, // pblendvb sequence.
1265 { ISD::SRL, MVT::v4i32, { 16, 17,15,19 } }, // Shift each lane + blend.
1266 { ISD::SRL, MVT::v2i64, { 4, 6, 5, 7 } }, // splat+shuffle sequence.
1267
1268 { ISD::SRA, MVT::v16i8, { 38, 41,30,36 } }, // pblendvb sequence.
1269 { ISD::SRA, MVT::v8i16, { 22, 26,23,27 } }, // pblendvb sequence.
1270 { ISD::SRA, MVT::v4i32, { 16, 17,15,19 } }, // Shift each lane + blend.
1271 { ISD::SRA, MVT::v2i64, { 8, 17, 5, 7 } }, // splat+shuffle sequence.
1272
1273 { ISD::MUL, MVT::v4i32, { 2, 11, 1, 1 } } // pmulld (Nehalem from agner.org)
1274 };
1275
1276 if (ST->hasSSE41())
1277 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
1278 if (auto KindCost = Entry->Cost[CostKind])
1279 return LT.first * KindCost.value();
1280
1281 static const CostKindTblEntry SSE2CostTable[] = {
1282 // We don't correctly identify costs of casts because they are marked as
1283 // custom.
1284 { ISD::SHL, MVT::v16i8, { 13, 21,26,28 } }, // cmpgtb sequence.
1285 { ISD::SHL, MVT::v8i16, { 24, 27,16,20 } }, // cmpgtw sequence.
1286 { ISD::SHL, MVT::v4i32, { 17, 19,10,12 } }, // pslld/paddd/cvttps2dq/pmuludq.
1287 { ISD::SHL, MVT::v2i64, { 4, 6, 5, 7 } }, // splat+shuffle sequence.
1288
1289 { ISD::SRL, MVT::v16i8, { 14, 28,27,30 } }, // cmpgtb sequence.
1290 { ISD::SRL, MVT::v8i16, { 16, 19,31,31 } }, // cmpgtw sequence.
1291 { ISD::SRL, MVT::v4i32, { 12, 12,15,19 } }, // Shift each lane + blend.
1292 { ISD::SRL, MVT::v2i64, { 4, 6, 5, 7 } }, // splat+shuffle sequence.
1293
1294 { ISD::SRA, MVT::v16i8, { 27, 30,54,54 } }, // unpacked cmpgtb sequence.
1295 { ISD::SRA, MVT::v8i16, { 16, 19,31,31 } }, // cmpgtw sequence.
1296 { ISD::SRA, MVT::v4i32, { 12, 12,15,19 } }, // Shift each lane + blend.
1297 { ISD::SRA, MVT::v2i64, { 8, 11,12,16 } }, // srl/xor/sub splat+shuffle sequence.
1298
1299 { ISD::AND, MVT::v16i8, { 1, 1, 1, 1 } }, // pand
1300 { ISD::AND, MVT::v8i16, { 1, 1, 1, 1 } }, // pand
1301 { ISD::AND, MVT::v4i32, { 1, 1, 1, 1 } }, // pand
1302 { ISD::AND, MVT::v2i64, { 1, 1, 1, 1 } }, // pand
1303
1304 { ISD::OR, MVT::v16i8, { 1, 1, 1, 1 } }, // por
1305 { ISD::OR, MVT::v8i16, { 1, 1, 1, 1 } }, // por
1306 { ISD::OR, MVT::v4i32, { 1, 1, 1, 1 } }, // por
1307 { ISD::OR, MVT::v2i64, { 1, 1, 1, 1 } }, // por
1308
1309 { ISD::XOR, MVT::v16i8, { 1, 1, 1, 1 } }, // pxor
1310 { ISD::XOR, MVT::v8i16, { 1, 1, 1, 1 } }, // pxor
1311 { ISD::XOR, MVT::v4i32, { 1, 1, 1, 1 } }, // pxor
1312 { ISD::XOR, MVT::v2i64, { 1, 1, 1, 1 } }, // pxor
1313
1314 { ISD::ADD, MVT::v2i64, { 1, 2, 1, 2 } }, // paddq
1315 { ISD::SUB, MVT::v2i64, { 1, 2, 1, 2 } }, // psubq
1316
1317 { ISD::MUL, MVT::v8i16, { 1, 5, 1, 1 } }, // pmullw
1318 { ISD::MUL, MVT::v4i32, { 6, 8, 7, 7 } }, // 3*pmuludq/4*shuffle
1319 { ISD::MUL, MVT::v2i64, { 8, 10, 8, 8 } }, // 3*pmuludq/3*shift/2*add
1320
1321 { ISD::FDIV, MVT::f32, { 23, 23, 1, 1 } }, // Pentium IV from http://www.agner.org/
1322 { ISD::FDIV, MVT::v4f32, { 39, 39, 1, 1 } }, // Pentium IV from http://www.agner.org/
1323 { ISD::FDIV, MVT::f64, { 38, 38, 1, 1 } }, // Pentium IV from http://www.agner.org/
1324 { ISD::FDIV, MVT::v2f64, { 69, 69, 1, 1 } }, // Pentium IV from http://www.agner.org/
1325
1326 { ISD::FNEG, MVT::f32, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
1327 { ISD::FNEG, MVT::f64, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
1328 { ISD::FNEG, MVT::v4f32, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
1329 { ISD::FNEG, MVT::v2f64, { 1, 1, 1, 1 } }, // Pentium IV from http://www.agner.org/
1330
1331 { ISD::FADD, MVT::f32, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1332 { ISD::FADD, MVT::f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1333 { ISD::FADD, MVT::v2f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1334
1335 { ISD::FSUB, MVT::f32, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1336 { ISD::FSUB, MVT::f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1337 { ISD::FSUB, MVT::v2f64, { 2, 3, 1, 1 } }, // Pentium IV from http://www.agner.org/
1338
1339 { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // Pentium IV from http://www.agner.org/
1340 { ISD::FMUL, MVT::v2f64, { 2, 5, 1, 1 } }, // Pentium IV from http://www.agner.org/
1341 };
1342
1343 if (ST->hasSSE2())
1344 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
1345 if (auto KindCost = Entry->Cost[CostKind])
1346 return LT.first * KindCost.value();
1347
1348 static const CostKindTblEntry SSE1CostTable[] = {
1349 { ISD::FDIV, MVT::f32, { 17, 18, 1, 1 } }, // Pentium III from http://www.agner.org/
1350 { ISD::FDIV, MVT::v4f32, { 34, 48, 1, 1 } }, // Pentium III from http://www.agner.org/
1351
1352 { ISD::FNEG, MVT::f32, { 2, 2, 1, 2 } }, // Pentium III from http://www.agner.org/
1353 { ISD::FNEG, MVT::v4f32, { 2, 2, 1, 2 } }, // Pentium III from http://www.agner.org/
1354
1355 { ISD::FADD, MVT::f32, { 1, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
1356 { ISD::FADD, MVT::v4f32, { 2, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
1357
1358 { ISD::FSUB, MVT::f32, { 1, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
1359 { ISD::FSUB, MVT::v4f32, { 2, 3, 1, 1 } }, // Pentium III from http://www.agner.org/
1360
1361 { ISD::FMUL, MVT::f32, { 2, 5, 1, 1 } }, // Pentium III from http://www.agner.org/
1362 { ISD::FMUL, MVT::v4f32, { 2, 5, 1, 1 } }, // Pentium III from http://www.agner.org/
1363 };
1364
1365 if (ST->hasSSE1())
1366 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
1367 if (auto KindCost = Entry->Cost[CostKind])
1368 return LT.first * KindCost.value();
1369
1370 static const CostKindTblEntry X64CostTbl[] = { // 64-bit targets
1371 { ISD::ADD, MVT::i64, { 1 } }, // Core (Merom) from http://www.agner.org/
1372 { ISD::SUB, MVT::i64, { 1 } }, // Core (Merom) from http://www.agner.org/
1373 { ISD::MUL, MVT::i64, { 2 } }, // Nehalem from http://www.agner.org/
1374 };
1375
1376 if (ST->is64Bit())
1377 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
1378 if (auto KindCost = Entry->Cost[CostKind])
1379 return LT.first * KindCost.value();
1380
1381 static const CostKindTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1382 { ISD::ADD, MVT::i8, { 1 } }, // Pentium III from http://www.agner.org/
1383 { ISD::ADD, MVT::i16, { 1 } }, // Pentium III from http://www.agner.org/
1384 { ISD::ADD, MVT::i32, { 1 } }, // Pentium III from http://www.agner.org/
1385
1386 { ISD::SUB, MVT::i8, { 1 } }, // Pentium III from http://www.agner.org/
1387 { ISD::SUB, MVT::i16, { 1 } }, // Pentium III from http://www.agner.org/
1388 { ISD::SUB, MVT::i32, { 1 } }, // Pentium III from http://www.agner.org/
1389
1390 { ISD::FNEG, MVT::f64, { 2, 2, 1, 3 } }, // (x87)
1391 { ISD::FADD, MVT::f64, { 2, 3, 1, 1 } }, // (x87)
1392 { ISD::FSUB, MVT::f64, { 2, 3, 1, 1 } }, // (x87)
1393 { ISD::FMUL, MVT::f64, { 2, 5, 1, 1 } }, // (x87)
1394 { ISD::FDIV, MVT::f64, { 38, 38, 1, 1 } }, // (x87)
1395 };
1396
1397 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
1398 if (auto KindCost = Entry->Cost[CostKind])
1399 return LT.first * KindCost.value();
1400
1401 // It is not a good idea to vectorize division. We have to scalarize it and
1402 // in the process we will often end up having to spilling regular
1403 // registers. The overhead of division is going to dominate most kernels
1404 // anyways so try hard to prevent vectorization of division - it is
1405 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
1406 // to hide "20 cycles" for each lane.
1407 if (CostKind == TTI::TCK_RecipThroughput && LT.second.isVector() &&
1408 (ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
1409 ISD == ISD::UREM)) {
1410 InstructionCost ScalarCost =
1411 getArithmeticInstrCost(Opcode, Ty->getScalarType(), CostKind,
1412 Op1Info.getNoProps(), Op2Info.getNoProps());
1413 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
1414 }
1415
1416 // Handle some basic single instruction code size cases.
1417 if (CostKind == TTI::TCK_CodeSize) {
1418 switch (ISD) {
1419 case ISD::FADD:
1420 case ISD::FSUB:
1421 case ISD::FMUL:
1422 case ISD::FDIV:
1423 case ISD::FNEG:
1424 case ISD::AND:
1425 case ISD::OR:
1426 case ISD::XOR:
1427 return LT.first;
1428 break;
1429 }
1430 }
1431
1432 // Fallback to the default implementation.
1433 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info,
1434 Args, CxtI);
1435}
1436
1437InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1438 VectorType *BaseTp,
1439 ArrayRef<int> Mask,
1440 TTI::TargetCostKind CostKind,
1441 int Index, VectorType *SubTp,
1442 ArrayRef<const Value *> Args) {
1443 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1444 // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1445 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(BaseTp);
1446
1447 Kind = improveShuffleKindFromMask(Kind, Mask);
1448
1449 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1450 if (Kind == TTI::SK_Transpose)
1451 Kind = TTI::SK_PermuteTwoSrc;
1452
1453 // For Broadcasts we are splatting the first element from the first input
1454 // register, so only need to reference that input and all the output
1455 // registers are the same.
1456 if (Kind == TTI::SK_Broadcast)
1457 LT.first = 1;
1458
1459 // Subvector extractions are free if they start at the beginning of a
1460 // vector and cheap if the subvectors are aligned.
1461 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
1462 int NumElts = LT.second.getVectorNumElements();
1463 if ((Index % NumElts) == 0)
1464 return 0;
1465 std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
1466 if (SubLT.second.isVector()) {
1467 int NumSubElts = SubLT.second.getVectorNumElements();
1468 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1469 return SubLT.first;
1470 // Handle some cases for widening legalization. For now we only handle
1471 // cases where the original subvector was naturally aligned and evenly
1472 // fit in its legalized subvector type.
1473 // FIXME: Remove some of the alignment restrictions.
1474 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1475 // vectors.
1476 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
1477 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
1478 (NumSubElts % OrigSubElts) == 0 &&
1479 LT.second.getVectorElementType() ==
1480 SubLT.second.getVectorElementType() &&
1481 LT.second.getVectorElementType().getSizeInBits() ==
1482 BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1483 assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&(static_cast <bool> (NumElts >= NumSubElts &&
NumElts > OrigSubElts && "Unexpected number of elements!"
) ? void (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 1484, __extension__
__PRETTY_FUNCTION__))
1484 "Unexpected number of elements!")(static_cast <bool> (NumElts >= NumSubElts &&
NumElts > OrigSubElts && "Unexpected number of elements!"
) ? void (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 1484, __extension__
__PRETTY_FUNCTION__))
;
1485 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1486 LT.second.getVectorNumElements());
1487 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1488 SubLT.second.getVectorNumElements());
1489 int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1490 InstructionCost ExtractCost =
1491 getShuffleCost(TTI::SK_ExtractSubvector, VecTy, None, CostKind,
1492 ExtractIndex, SubTy);
1493
1494 // If the original size is 32-bits or more, we can use pshufd. Otherwise
1495 // if we have SSSE3 we can use pshufb.
1496 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1497 return ExtractCost + 1; // pshufd or pshufb
1498
1499 assert(SubTp->getPrimitiveSizeInBits() == 16 &&(static_cast <bool> (SubTp->getPrimitiveSizeInBits()
== 16 && "Unexpected vector size") ? void (0) : __assert_fail
("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 1500, __extension__
__PRETTY_FUNCTION__))
1500 "Unexpected vector size")(static_cast <bool> (SubTp->getPrimitiveSizeInBits()
== 16 && "Unexpected vector size") ? void (0) : __assert_fail
("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 1500, __extension__
__PRETTY_FUNCTION__))
;
1501
1502 return ExtractCost + 2; // worst case pshufhw + pshufd
1503 }
1504 }
1505 }
1506
1507 // Subvector insertions are cheap if the subvectors are aligned.
1508 // Note that in general, the insertion starting at the beginning of a vector
1509 // isn't free, because we need to preserve the rest of the wide vector.
1510 if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
1511 int NumElts = LT.second.getVectorNumElements();
1512 std::pair<InstructionCost, MVT> SubLT = getTypeLegalizationCost(SubTp);
1513 if (SubLT.second.isVector()) {
1514 int NumSubElts = SubLT.second.getVectorNumElements();
1515 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1516 return SubLT.first;
1517 }
1518
1519 // If the insertion isn't aligned, treat it like a 2-op shuffle.
1520 Kind = TTI::SK_PermuteTwoSrc;
1521 }
1522
1523 // Handle some common (illegal) sub-vector types as they are often very cheap
1524 // to shuffle even on targets without PSHUFB.
1525 EVT VT = TLI->getValueType(DL, BaseTp);
1526 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1527 !ST->hasSSSE3()) {
1528 static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1529 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw
1530 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw
1531 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw
1532 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw
1533 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck
1534
1535 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw
1536 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw
1537 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus
1538 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck
1539
1540 {TTI::SK_Splice, MVT::v4i16, 2}, // punpck+psrldq
1541 {TTI::SK_Splice, MVT::v2i16, 2}, // punpck+psrldq
1542 {TTI::SK_Splice, MVT::v4i8, 2}, // punpck+psrldq
1543 {TTI::SK_Splice, MVT::v2i8, 2}, // punpck+psrldq
1544
1545 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw
1546 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw
1547 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw
1548 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw
1549 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck
1550
1551 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1552 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1553 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw
1554 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw
1555 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck
1556 };
1557
1558 if (ST->hasSSE2())
1559 if (const auto *Entry =
1560 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1561 return Entry->Cost;
1562 }
1563
1564 // We are going to permute multiple sources and the result will be in multiple
1565 // destinations. Providing an accurate cost only for splits where the element
1566 // type remains the same.
1567 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1568 MVT LegalVT = LT.second;
1569 if (LegalVT.isVector() &&
1570 LegalVT.getVectorElementType().getSizeInBits() ==
1571 BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1572 LegalVT.getVectorNumElements() <
1573 cast<FixedVectorType>(BaseTp)->getNumElements()) {
1574
1575 unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1576 unsigned LegalVTSize = LegalVT.getStoreSize();
1577 // Number of source vectors after legalization:
1578 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1579 // Number of destination vectors after legalization:
1580 InstructionCost NumOfDests = LT.first;
1581
1582 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1583 LegalVT.getVectorNumElements());
1584
1585 if (!Mask.empty() && NumOfDests.isValid()) {
1586 // Try to perform better estimation of the permutation.
1587 // 1. Split the source/destination vectors into real registers.
1588 // 2. Do the mask analysis to identify which real registers are
1589 // permuted. If more than 1 source registers are used for the
1590 // destination register building, the cost for this destination register
1591 // is (Number_of_source_register - 1) * Cost_PermuteTwoSrc. If only one
1592 // source register is used, build mask and calculate the cost as a cost
1593 // of PermuteSingleSrc.
1594 // Also, for the single register permute we try to identify if the
1595 // destination register is just a copy of the source register or the
1596 // copy of the previous destination register (the cost is
1597 // TTI::TCC_Basic). If the source register is just reused, the cost for
1598 // this operation is 0.
1599 unsigned E = *NumOfDests.getValue();
1600 unsigned NormalizedVF =
1601 LegalVT.getVectorNumElements() * std::max(NumOfSrcs, E);
1602 unsigned NumOfSrcRegs = NormalizedVF / LegalVT.getVectorNumElements();
1603 unsigned NumOfDestRegs = NormalizedVF / LegalVT.getVectorNumElements();
1604 SmallVector<int> NormalizedMask(NormalizedVF, UndefMaskElem);
1605 copy(Mask, NormalizedMask.begin());
1606 unsigned PrevSrcReg = 0;
1607 ArrayRef<int> PrevRegMask;
1608 InstructionCost Cost = 0;
1609 processShuffleMasks(
1610 NormalizedMask, NumOfSrcRegs, NumOfDestRegs, NumOfDestRegs, []() {},
1611 [this, SingleOpTy, CostKind, &PrevSrcReg, &PrevRegMask,
1612 &Cost](ArrayRef<int> RegMask, unsigned SrcReg, unsigned DestReg) {
1613 if (!ShuffleVectorInst::isIdentityMask(RegMask)) {
1614 // Check if the previous register can be just copied to the next
1615 // one.
1616 if (PrevRegMask.empty() || PrevSrcReg != SrcReg ||
1617 PrevRegMask != RegMask)
1618 Cost += getShuffleCost(TTI::SK_PermuteSingleSrc, SingleOpTy,
1619 RegMask, CostKind, 0, nullptr);
1620 else
1621 // Just a copy of previous destination register.
1622 Cost += TTI::TCC_Basic;
1623 return;
1624 }
1625 if (SrcReg != DestReg &&
1626 any_of(RegMask, [](int I) { return I != UndefMaskElem; })) {
1627 // Just a copy of the source register.
1628 Cost += TTI::TCC_Basic;
1629 }
1630 PrevSrcReg = SrcReg;
1631 PrevRegMask = RegMask;
1632 },
1633 [this, SingleOpTy, CostKind, &Cost](ArrayRef<int> RegMask,
1634 unsigned /*Unused*/,
1635 unsigned /*Unused*/) {
1636 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, RegMask,
1637 CostKind, 0, nullptr);
1638 });
1639 return Cost;
1640 }
1641
1642 InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1643 return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
1644 None, CostKind, 0, nullptr);
1645 }
1646
1647 return BaseT::getShuffleCost(Kind, BaseTp, Mask, CostKind, Index, SubTp);
1648 }
1649
1650 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1651 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1652 // We assume that source and destination have the same vector type.
1653 InstructionCost NumOfDests = LT.first;
1654 InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
1655 LT.first = NumOfDests * NumOfShufflesPerDest;
1656 }
1657
1658 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1659 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1660 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1661
1662 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1663 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1664
1665 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1666 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1667 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b
1668 };
1669
1670 if (ST->hasVBMI())
1671 if (const auto *Entry =
1672 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1673 return LT.first * Entry->Cost;
1674
1675 static const CostTblEntry AVX512BWShuffleTbl[] = {
1676 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1677 {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw
1678 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1679
1680 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1681 {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw
1682 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1683 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
1684
1685 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1686 {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw
1687 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1688 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw
1689 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
1690
1691 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1692 {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w
1693 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1694 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w
1695 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1696
1697 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1698 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb
1699
1700 {TTI::SK_Splice, MVT::v32i16, 2}, // vshufi64x2 + palignr
1701 {TTI::SK_Splice, MVT::v32f16, 2}, // vshufi64x2 + palignr
1702 {TTI::SK_Splice, MVT::v64i8, 2}, // vshufi64x2 + palignr
1703 };
1704
1705 if (ST->hasBWI())
1706 if (const auto *Entry =
1707 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1708 return LT.first * Entry->Cost;
1709
1710 static const CostTblEntry AVX512ShuffleTbl[] = {
1711 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd
1712 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1713 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq
1714 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1715 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1716 {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw
1717 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1718
1719 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd
1720 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1721 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq
1722 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1723 {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca
1724 {TTI::SK_Reverse, MVT::v32f16, 7}, // per mca
1725 {TTI::SK_Reverse, MVT::v64i8, 7}, // per mca
1726
1727 {TTI::SK_Splice, MVT::v8f64, 1}, // vpalignd
1728 {TTI::SK_Splice, MVT::v4f64, 1}, // vpalignd
1729 {TTI::SK_Splice, MVT::v16f32, 1}, // vpalignd
1730 {TTI::SK_Splice, MVT::v8f32, 1}, // vpalignd
1731 {TTI::SK_Splice, MVT::v8i64, 1}, // vpalignd
1732 {TTI::SK_Splice, MVT::v4i64, 1}, // vpalignd
1733 {TTI::SK_Splice, MVT::v16i32, 1}, // vpalignd
1734 {TTI::SK_Splice, MVT::v8i32, 1}, // vpalignd
1735 {TTI::SK_Splice, MVT::v32i16, 4}, // split + palignr
1736 {TTI::SK_Splice, MVT::v32f16, 4}, // split + palignr
1737 {TTI::SK_Splice, MVT::v64i8, 4}, // split + palignr
1738
1739 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd
1740 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1741 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd
1742 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1743 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1744 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps
1745 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq
1746 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1747 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq
1748 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1749 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1750 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd
1751 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1752
1753 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd
1754 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1755 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q
1756 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1757 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd
1758 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps
1759 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q
1760 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d
1761 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd
1762 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps
1763 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q
1764 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d
1765
1766 // FIXME: This just applies the type legalization cost rules above
1767 // assuming these completely split.
1768 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1769 {TTI::SK_PermuteSingleSrc, MVT::v32f16, 14},
1770 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14},
1771 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42},
1772 {TTI::SK_PermuteTwoSrc, MVT::v32f16, 42},
1773 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42},
1774
1775 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1776 {TTI::SK_Select, MVT::v32f16, 1}, // vpternlogq
1777 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq
1778 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd
1779 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1780 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq
1781 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1782 };
1783
1784 if (ST->hasAVX512())
1785 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1786 return LT.first * Entry->Cost;
1787
1788 static const CostTblEntry AVX2ShuffleTbl[] = {
1789 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1790 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1791 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1792 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1793 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1794 {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw
1795 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1796
1797 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1798 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1799 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1800 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1801 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1802 {TTI::SK_Reverse, MVT::v16f16, 2}, // vperm2i128 + pshufb
1803 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1804
1805 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1806 {TTI::SK_Select, MVT::v16f16, 1}, // vpblendvb
1807 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1808
1809 {TTI::SK_Splice, MVT::v8i32, 2}, // vperm2i128 + vpalignr
1810 {TTI::SK_Splice, MVT::v8f32, 2}, // vperm2i128 + vpalignr
1811 {TTI::SK_Splice, MVT::v16i16, 2}, // vperm2i128 + vpalignr
1812 {TTI::SK_Splice, MVT::v16f16, 2}, // vperm2i128 + vpalignr
1813 {TTI::SK_Splice, MVT::v32i8, 2}, // vperm2i128 + vpalignr
1814
1815 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1816 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1817 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1818 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1819 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1820 // + vpblendvb
1821 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 4}, // vperm2i128 + 2*vpshufb
1822 // + vpblendvb
1823 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1824 // + vpblendvb
1825
1826 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1827 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1828 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1829 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1830 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1831 // + vpblendvb
1832 {TTI::SK_PermuteTwoSrc, MVT::v16f16, 7}, // 2*vperm2i128 + 4*vpshufb
1833 // + vpblendvb
1834 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1835 // + vpblendvb
1836 };
1837
1838 if (ST->hasAVX2())
1839 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1840 return LT.first * Entry->Cost;
1841
1842 static const CostTblEntry XOPShuffleTbl[] = {
1843 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1844 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1845 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1846 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1847 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1848 // + vinsertf128
1849 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1850 // + vinsertf128
1851
1852 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1853 // + vinsertf128
1854 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1855 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1856 // + vinsertf128
1857 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1858 };
1859
1860 if (ST->hasXOP())
1861 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1862 return LT.first * Entry->Cost;
1863
1864 static const CostTblEntry AVX1ShuffleTbl[] = {
1865 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1866 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1867 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1868 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1869 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1870 {TTI::SK_Broadcast, MVT::v16f16, 3}, // vpshuflw + vpshufd + vinsertf128
1871 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1872
1873 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1874 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1875 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1876 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1877 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1878 // + vinsertf128
1879 {TTI::SK_Reverse, MVT::v16f16, 4}, // vextractf128 + 2*pshufb
1880 // + vinsertf128
1881 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1882 // + vinsertf128
1883
1884 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1885 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1886 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1887 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1888 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1889 {TTI::SK_Select, MVT::v16f16, 3}, // vpand + vpandn + vpor
1890 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1891
1892 {TTI::SK_Splice, MVT::v4i64, 2}, // vperm2f128 + shufpd
1893 {TTI::SK_Splice, MVT::v4f64, 2}, // vperm2f128 + shufpd
1894 {TTI::SK_Splice, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1895 {TTI::SK_Splice, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1896 {TTI::SK_Splice, MVT::v16i16, 5}, // 2*vperm2f128 + 2*vpalignr + vinsertf128
1897 {TTI::SK_Splice, MVT::v16f16, 5}, // 2*vperm2f128 + 2*vpalignr + vinsertf128
1898 {TTI::SK_Splice, MVT::v32i8, 5}, // 2*vperm2f128 + 2*vpalignr + vinsertf128
1899
1900 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1901 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1902 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1903 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1904 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1905 // + 2*por + vinsertf128
1906 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 8}, // vextractf128 + 4*pshufb
1907 // + 2*por + vinsertf128
1908 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1909 // + 2*por + vinsertf128
1910
1911 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1912 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1913 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1914 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1915 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1916 // + 4*por + vinsertf128
1917 {TTI::SK_PermuteTwoSrc, MVT::v16f16, 15}, // 2*vextractf128 + 8*pshufb
1918 // + 4*por + vinsertf128
1919 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1920 // + 4*por + vinsertf128
1921 };
1922
1923 if (ST->hasAVX())
1924 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1925 return LT.first * Entry->Cost;
1926
1927 static const CostTblEntry SSE41ShuffleTbl[] = {
1928 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1929 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1930 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1931 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1932 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1933 {TTI::SK_Select, MVT::v8f16, 1}, // pblendw
1934 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1935 };
1936
1937 if (ST->hasSSE41())
1938 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1939 return LT.first * Entry->Cost;
1940
1941 static const CostTblEntry SSSE3ShuffleTbl[] = {
1942 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1943 {TTI::SK_Broadcast, MVT::v8f16, 1}, // pshufb
1944 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1945
1946 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1947 {TTI::SK_Reverse, MVT::v8f16, 1}, // pshufb
1948 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1949
1950 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1951 {TTI::SK_Select, MVT::v8f16, 3}, // 2*pshufb + por
1952 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1953
1954 {TTI::SK_Splice, MVT::v4i32, 1}, // palignr
1955 {TTI::SK_Splice, MVT::v4f32, 1}, // palignr
1956 {TTI::SK_Splice, MVT::v8i16, 1}, // palignr
1957 {TTI::SK_Splice, MVT::v8f16, 1}, // palignr
1958 {TTI::SK_Splice, MVT::v16i8, 1}, // palignr
1959
1960 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1961 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1}, // pshufb
1962 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1963
1964 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1965 {TTI::SK_PermuteTwoSrc, MVT::v8f16, 3}, // 2*pshufb + por
1966 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1967 };
1968
1969 if (ST->hasSSSE3())
1970 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1971 return LT.first * Entry->Cost;
1972
1973 static const CostTblEntry SSE2ShuffleTbl[] = {
1974 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1975 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1976 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1977 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1978 {TTI::SK_Broadcast, MVT::v8f16, 2}, // pshuflw + pshufd
1979 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1980
1981 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1982 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1983 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1984 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1985 {TTI::SK_Reverse, MVT::v8f16, 3}, // pshuflw + pshufhw + pshufd
1986 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1987 // + 2*pshufd + 2*unpck + packus
1988
1989 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1990 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1991 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1992 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1993 {TTI::SK_Select, MVT::v8f16, 3}, // pand + pandn + por
1994 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1995
1996 {TTI::SK_Splice, MVT::v2i64, 1}, // shufpd
1997 {TTI::SK_Splice, MVT::v2f64, 1}, // shufpd
1998 {TTI::SK_Splice, MVT::v4i32, 2}, // 2*{unpck,movsd,pshufd}
1999 {TTI::SK_Splice, MVT::v8i16, 3}, // psrldq + psrlldq + por
2000 {TTI::SK_Splice, MVT::v8f16, 3}, // psrldq + psrlldq + por
2001 {TTI::SK_Splice, MVT::v16i8, 3}, // psrldq + psrlldq + por
2002
2003 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
2004 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
2005 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
2006 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
2007 // + pshufd/unpck
2008 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 5}, // 2*pshuflw + 2*pshufhw
2009 // + pshufd/unpck
2010 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
2011 // + 2*pshufd + 2*unpck + 2*packus
2012
2013 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
2014 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
2015 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
2016 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
2017 { TTI::SK_PermuteTwoSrc, MVT::v8f16, 8 }, // blend+permute
2018 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
2019 };
2020
2021 static const CostTblEntry SSE3BroadcastLoadTbl[] = {
2022 {TTI::SK_Broadcast, MVT::v2f64, 0}, // broadcast handled by movddup
2023 };
2024
2025 if (ST->hasSSE2()) {
2026 bool IsLoad =
2027 llvm::any_of(Args, [](const auto &V) { return isa<LoadInst>(V); });
2028 if (ST->hasSSE3() && IsLoad)
2029 if (const auto *Entry =
2030 CostTableLookup(SSE3BroadcastLoadTbl, Kind, LT.second)) {
2031 assert(isLegalBroadcastLoad(BaseTp->getElementType(),(static_cast <bool> (isLegalBroadcastLoad(BaseTp->getElementType
(), LT.second.getVectorElementCount()) && "Table entry missing from isLegalBroadcastLoad()"
) ? void (0) : __assert_fail ("isLegalBroadcastLoad(BaseTp->getElementType(), LT.second.getVectorElementCount()) && \"Table entry missing from isLegalBroadcastLoad()\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 2033, __extension__
__PRETTY_FUNCTION__))
2032 LT.second.getVectorElementCount()) &&(static_cast <bool> (isLegalBroadcastLoad(BaseTp->getElementType
(), LT.second.getVectorElementCount()) && "Table entry missing from isLegalBroadcastLoad()"
) ? void (0) : __assert_fail ("isLegalBroadcastLoad(BaseTp->getElementType(), LT.second.getVectorElementCount()) && \"Table entry missing from isLegalBroadcastLoad()\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 2033, __extension__
__PRETTY_FUNCTION__))
2033 "Table entry missing from isLegalBroadcastLoad()")(static_cast <bool> (isLegalBroadcastLoad(BaseTp->getElementType
(), LT.second.getVectorElementCount()) && "Table entry missing from isLegalBroadcastLoad()"
) ? void (0) : __assert_fail ("isLegalBroadcastLoad(BaseTp->getElementType(), LT.second.getVectorElementCount()) && \"Table entry missing from isLegalBroadcastLoad()\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 2033, __extension__
__PRETTY_FUNCTION__))
;
2034 return LT.first * Entry->Cost;
2035 }
2036
2037 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
2038 return LT.first * Entry->Cost;
2039 }
2040
2041 static const CostTblEntry SSE1ShuffleTbl[] = {
2042 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
2043 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
2044 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
2045 { TTI::SK_Splice, MVT::v4f32, 2 }, // 2*shufps
2046 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
2047 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
2048 };
2049
2050 if (ST->hasSSE1())
2051 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
2052 return LT.first * Entry->Cost;
2053
2054 return BaseT::getShuffleCost(Kind, BaseTp, Mask, CostKind, Index, SubTp);
2055}
2056
2057InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
2058 Type *Src,
2059 TTI::CastContextHint CCH,
2060 TTI::TargetCostKind CostKind,
2061 const Instruction *I) {
2062 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2063 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 2063, __extension__
__PRETTY_FUNCTION__))
;
2064
2065 // TODO: Allow non-throughput costs that aren't binary.
2066 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
2067 if (CostKind != TTI::TCK_RecipThroughput)
2068 return Cost == 0 ? 0 : 1;
2069 return Cost;
2070 };
2071
2072 // The cost tables include both specific, custom (non-legal) src/dst type
2073 // conversions and generic, legalized types. We test for customs first, before
2074 // falling back to legalization.
2075 // FIXME: Need a better design of the cost table to handle non-simple types of
2076 // potential massive combinations (elem_num x src_type x dst_type).
2077 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
2078 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
2079 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
2080
2081 // Mask sign extend has an instruction.
2082 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
2083 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 },
2084 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
2085 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 },
2086 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
2087 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 },
2088 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
2089 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 },
2090 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
2091 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 },
2092 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
2093 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
2094 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
2095 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
2096 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
2097 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
2098 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v64i1, 1 },
2099
2100 // Mask zero extend is a sext + shift.
2101 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
2102 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 },
2103 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
2104 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 },
2105 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
2106 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 },
2107 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
2108 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 },
2109 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
2110 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 },
2111 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
2112 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
2113 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
2114 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
2115 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
2116 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
2117 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v64i1, 2 },
2118
2119 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 },
2120 { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 },
2121 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 },
2122 { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 },
2123 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 },
2124 { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 },
2125 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 },
2126 { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 },
2127 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 },
2128 { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 },
2129 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 },
2130 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 },
2131 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 },
2132 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 },
2133 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 },
2134 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 },
2135 { ISD::TRUNCATE, MVT::v64i1, MVT::v32i16, 2 },
2136
2137 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 },
2138 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm
2139 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // vpmovwb
2140 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // vpmovwb
2141 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // vpmovwb
2142 };
2143
2144 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
2145 // Mask sign extend has an instruction.
2146 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 },
2147 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 },
2148 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 },
2149 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 },
2150 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 },
2151 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i1, 1 },
2152 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 },
2153 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 },
2154
2155 // Mask zero extend is a sext + shift.
2156 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 },
2157 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 },
2158 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 },
2159 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 },
2160 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 },
2161 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i1, 2 },
2162 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 },
2163 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
2164
2165 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 },
2166 { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 },
2167 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 },
2168 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 },
2169 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
2170 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 },
2171 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 },
2172 { ISD::TRUNCATE, MVT::v16i1, MVT::v8i64, 2 },
2173
2174 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
2175 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
2176
2177 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
2178 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
2179
2180 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
2181 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
2182
2183 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
2184 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
2185 };
2186
2187 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
2188 // 256-bit wide vectors.
2189
2190 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
2191 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
2192 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
2193 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
2194
2195 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
2196 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
2197 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
2198 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd
2199 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
2200 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
2201 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
2202 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd
2203 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd
2204 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd
2205 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd
2206 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd
2207 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq
2208 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq
2209 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq
2210 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 2 }, // vpmovdb
2211 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 2 }, // vpmovdb
2212 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, // vpmovdb
2213 { ISD::TRUNCATE, MVT::v32i8, MVT::v16i32, 2 }, // vpmovdb
2214 { ISD::TRUNCATE, MVT::v64i8, MVT::v16i32, 2 }, // vpmovdb
2215 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, // vpmovdw
2216 { ISD::TRUNCATE, MVT::v32i16, MVT::v16i32, 2 }, // vpmovdw
2217 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 2 }, // vpmovqb
2218 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1 }, // vpshufb
2219 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, // vpmovqb
2220 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i64, 2 }, // vpmovqb
2221 { ISD::TRUNCATE, MVT::v32i8, MVT::v8i64, 2 }, // vpmovqb
2222 { ISD::TRUNCATE, MVT::v64i8, MVT::v8i64, 2 }, // vpmovqb
2223 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, // vpmovqw
2224 { ISD::TRUNCATE, MVT::v16i16, MVT::v8i64, 2 }, // vpmovqw
2225 { ISD::TRUNCATE, MVT::v32i16, MVT::v8i64, 2 }, // vpmovqw
2226 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, // vpmovqd
2227 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd
2228 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
2229
2230 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32
2231 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 },
2232 { ISD::TRUNCATE, MVT::v64i8, MVT::v32i16, 8 },
2233
2234 // Sign extend is zmm vpternlogd+vptruncdb.
2235 // Zero extend is zmm broadcast load+vptruncdw.
2236 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 },
2237 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 },
2238 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 },
2239 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 },
2240 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 },
2241 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 },
2242 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 },
2243 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 },
2244
2245 // Sign extend is zmm vpternlogd+vptruncdw.
2246 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
2247 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 },
2248 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
2249 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 },
2250 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
2251 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 },
2252 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
2253 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 },
2254 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
2255
2256 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd
2257 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld
2258 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd
2259 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld
2260 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd
2261 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld
2262 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq
2263 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq
2264 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq
2265 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq
2266
2267 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd
2268 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld
2269 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq
2270 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq
2271
2272 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
2273 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
2274 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
2275 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
2276 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
2277 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
2278 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
2279 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
2280 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
2281 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
2282
2283 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
2284 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
2285
2286 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
2287 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
2288 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
2289 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
2290 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
2291 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
2292 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
2293 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
2294
2295 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
2296 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
2297 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
2298 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
2299 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
2300 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
2301 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
2302 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
2303 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
2304 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
2305
2306 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
2307 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f64, 7 },
2308 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f64,15 },
2309 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f32,11 },
2310 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f64,31 },
2311 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 },
2312 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f64, 7 },
2313 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f32, 5 },
2314 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f64,15 },
2315 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 1 },
2316 { ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f64, 3 },
2317
2318 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
2319 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 },
2320 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 },
2321 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
2322 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 },
2323 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 },
2324 };
2325
2326 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
2327 // Mask sign extend has an instruction.
2328 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
2329 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v2i1, 1 },
2330 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
2331 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v2i1, 1 },
2332 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
2333 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v4i1, 1 },
2334 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
2335 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v4i1, 1 },
2336 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
2337 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v8i1, 1 },
2338 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
2339 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
2340 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
2341 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
2342 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v32i1, 1 },
2343 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v64i1, 1 },
2344 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v64i1, 1 },
2345
2346 // Mask zero extend is a sext + shift.
2347 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
2348 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v2i1, 2 },
2349 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
2350 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v2i1, 2 },
2351 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
2352 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v4i1, 2 },
2353 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
2354 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v4i1, 2 },
2355 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
2356 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v8i1, 2 },
2357 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
2358 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
2359 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
2360 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
2361 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v32i1, 2 },
2362 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v64i1, 2 },
2363 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v64i1, 2 },
2364
2365 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 },
2366 { ISD::TRUNCATE, MVT::v2i1, MVT::v16i8, 2 },
2367 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 },
2368 { ISD::TRUNCATE, MVT::v2i1, MVT::v8i16, 2 },
2369 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 },
2370 { ISD::TRUNCATE, MVT::v4i1, MVT::v16i8, 2 },
2371 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 },
2372 { ISD::TRUNCATE, MVT::v4i1, MVT::v8i16, 2 },
2373 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 },
2374 { ISD::TRUNCATE, MVT::v8i1, MVT::v16i8, 2 },
2375 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 },
2376 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 },
2377 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 },
2378 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 },
2379 { ISD::TRUNCATE, MVT::v32i1, MVT::v16i16, 2 },
2380 { ISD::TRUNCATE, MVT::v64i1, MVT::v32i8, 2 },
2381 { ISD::TRUNCATE, MVT::v64i1, MVT::v16i16, 2 },
2382
2383 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
2384 };
2385
2386 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
2387 // Mask sign extend has an instruction.
2388 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 },
2389 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v2i1, 1 },
2390 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 },
2391 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i1, 1 },
2392 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 },
2393 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i1, 1 },
2394 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i1, 1 },
2395 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 },
2396
2397 // Mask zero extend is a sext + shift.
2398 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 },
2399 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v2i1, 2 },
2400 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 },
2401 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i1, 2 },
2402 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 },
2403 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i1, 2 },
2404 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i1, 2 },
2405 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 },
2406
2407 { ISD::TRUNCATE, MVT::v16i1, MVT::v4i64, 2 },
2408 { ISD::TRUNCATE, MVT::v16i1, MVT::v8i32, 2 },
2409 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 },
2410 { ISD::TRUNCATE, MVT::v2i1, MVT::v4i32, 2 },
2411 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 },
2412 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 },
2413 { ISD::TRUNCATE, MVT::v8i1, MVT::v4i64, 2 },
2414 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
2415
2416 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
2417 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
2418 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
2419 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
2420
2421 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
2422 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
2423 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
2424 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
2425
2426 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v4f32, 1 },
2427 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
2428 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
2429 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
2430
2431 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v4f32, 1 },
2432 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
2433 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
2434 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
2435 };
2436
2437 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
2438 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
2439 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
2440 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
2441 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8
2442 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
2443 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
2444 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
2445 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16
2446 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd
2447 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd
2448 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd
2449 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq
2450 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq
2451 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd
2452 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, // vpmovqb
2453 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, // vpmovqw
2454 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, // vpmovwb
2455
2456 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
2457 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
2458 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 },
2459 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 },
2460 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 },
2461 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 },
2462 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 },
2463 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 },
2464 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 },
2465 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 },
2466
2467 // sign extend is vpcmpeq+maskedmove+vpmovdw
2468 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
2469 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
2470 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 },
2471 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
2472 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 },
2473 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
2474 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 },
2475 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
2476 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
2477
2478 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd
2479 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld
2480 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd
2481 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld
2482 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd
2483 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld
2484 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq
2485 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq
2486 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq
2487 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq
2488
2489 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
2490 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
2491 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
2492 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
2493 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
2494 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
2495 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
2496 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
2497 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
2498 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
2499 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
2500 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
2501
2502 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2503 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
2504 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2505 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
2506
2507 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 },
2508 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
2509 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2510 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
2511 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2512 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
2513 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
2514 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2515 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
2516 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
2517 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
2518 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
2519 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
2520
2521 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
2522 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
2523 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f32, 5 },
2524
2525 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 },
2526 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 },
2527 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
2528 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 1 },
2529 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
2530 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
2531 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
2532 };
2533
2534 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
2535 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
2536 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
2537 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
2538 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
2539 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
2540 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
2541
2542 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
2543 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
2544 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
2545 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
2546 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
2547 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
2548 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
2549 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
2550 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
2551 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
2552 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
2553 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
2554 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
2555 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
2556
2557 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
2558
2559 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 4 },
2560 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 4 },
2561 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 1 },
2562 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 1 },
2563 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 1 },
2564 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 4 },
2565 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 4 },
2566 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 1 },
2567 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 1 },
2568 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 5 },
2569 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
2570 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
2571
2572 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
2573 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
2574
2575 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 1 },
2576 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 1 },
2577 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 1 },
2578 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 3 },
2579
2580 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 3 },
2581 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 3 },
2582 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 1 },
2583 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
2584 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2585 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4 },
2586 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 3 },
2587 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 4 },
2588
2589 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
2590 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
2591 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
2592 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
2593 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
2594 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
2595 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 3 },
2596
2597 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
2598 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
2599 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
2600 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
2601 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
2602 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
2603 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 2 },
2604 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2605 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
2606 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
2607 };
2608
2609 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
2610 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
2611 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
2612 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
2613 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
2614 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
2615 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
2616
2617 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
2618 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
2619 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
2620 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
2621 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
2622 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
2623 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
2624 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
2625 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
2626 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
2627 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
2628 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
2629
2630 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 },
2631 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 },
2632 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 },
2633 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 },
2634 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 },
2635
2636 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
2637 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 6 },
2638 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // and+extract+packuswb
2639 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 5 },
2640 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2641 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 5 },
2642 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 3 }, // and+extract+2*packusdw
2643 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
2644
2645 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
2646 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
2647 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
2648 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
2649 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
2650 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2651 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
2652 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2653 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
2654 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
2655 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 5 },
2656 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 8 },
2657
2658 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
2659 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
2660 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
2661 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
2662 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
2663 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2664 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
2665 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 4 },
2666 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 4 },
2667 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2668 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
2669 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
2670 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 10 },
2671 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 10 },
2672 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 18 },
2673 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
2674 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 10 },
2675
2676 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
2677 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f64, 2 },
2678 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v8f32, 2 },
2679 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v4f64, 2 },
2680 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 2 },
2681 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f64, 2 },
2682 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 2 },
2683 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v4f64, 2 },
2684 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 2 },
2685 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 2 },
2686 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 5 },
2687
2688 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v8f32, 2 },
2689 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f64, 2 },
2690 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v8f32, 2 },
2691 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v4f64, 2 },
2692 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 2 },
2693 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f64, 2 },
2694 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 2 },
2695 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v4f64, 2 },
2696 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
2697 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2698 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 6 },
2699 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 7 },
2700 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 7 },
2701
2702 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
2703 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
2704 };
2705
2706 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
2707 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
2708 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
2709 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
2710 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
2711 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2712 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2713 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
2714 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
2715 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2716 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2717 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2718 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2719
2720 // These truncates end up widening elements.
2721 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ
2722 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ
2723 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD
2724
2725 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 2 },
2726 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 2 },
2727 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 2 },
2728
2729 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 1 },
2730 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 1 },
2731 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 1 },
2732 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 1 },
2733 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
2734 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2735 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
2736 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2737 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2738 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 1 },
2739 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2740
2741 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 1 },
2742 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 1 },
2743 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 },
2744 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
2745 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
2746 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2747 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
2748 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2749 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 3 },
2750 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
2751 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 2 },
2752 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 12 },
2753 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 22 },
2754 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 4 },
2755
2756 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 1 },
2757 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 1 },
2758 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 1 },
2759 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 1 },
2760 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 2 },
2761 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 2 },
2762 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 1 },
2763 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 1 },
2764 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
2765 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 1 },
2766
2767 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 1 },
2768 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
2769 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 1 },
2770 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 },
2771 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 2 },
2772 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 2 },
2773 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 1 },
2774 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 1 },
2775 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 4 },
2776 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2777 };
2778
2779 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
2780 // These are somewhat magic numbers justified by comparing the
2781 // output of llvm-mca for our various supported scheduler models
2782 // and basing it off the worst case scenario.
2783 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 3 },
2784 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 3 },
2785 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 3 },
2786 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 3 },
2787 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 3 },
2788 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
2789 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 3 },
2790 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
2791 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
2792 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4 },
2793 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 8 },
2794 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 8 },
2795
2796 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 3 },
2797 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 3 },
2798 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 8 },
2799 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 9 },
2800 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
2801 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 4 },
2802 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 4 },
2803 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
2804 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 7 },
2805 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 7 },
2806 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2807 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 15 },
2808 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 18 },
2809
2810 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 4 },
2811 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 4 },
2812 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 4 },
2813 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 4 },
2814 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 6 },
2815 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 6 },
2816 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 5 },
2817 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 5 },
2818 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 4 },
2819 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 4 },
2820
2821 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 4 },
2822 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
2823 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 4 },
2824 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 15 },
2825 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 6 },
2826 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 6 },
2827 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 5 },
2828 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 5 },
2829 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 8 },
2830 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 8 },
2831
2832 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
2833 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
2834 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 2 },
2835 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 3 },
2836 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2837 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 2 },
2838 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 2 },
2839 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 3 },
2840 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2841 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 2 },
2842 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2843 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 2 },
2844
2845 // These truncates are really widening elements.
2846 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD
2847 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ
2848 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD
2849 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD
2850 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD
2851 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW
2852
2853 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 2 }, // PAND+PACKUSWB
2854 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
2855 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 3 }, // PAND+2*PACKUSWB
2856 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
2857 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 },
2858 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 3 },
2859 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2860 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32,10 },
2861 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB
2862 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW
2863 { ISD::TRUNCATE, MVT::v4i32, MVT::v2i64, 1 }, // PSHUFD
2864 };
2865
2866 // Attempt to map directly to (simple) MVT types to let us match custom entries.
2867 EVT SrcTy = TLI->getValueType(DL, Src);
2868 EVT DstTy = TLI->getValueType(DL, Dst);
2869
2870 // The function getSimpleVT only handles simple value types.
2871 if (SrcTy.isSimple() && DstTy.isSimple()) {
2872 MVT SimpleSrcTy = SrcTy.getSimpleVT();
2873 MVT SimpleDstTy = DstTy.getSimpleVT();
2874
2875 if (ST->useAVX512Regs()) {
2876 if (ST->hasBWI())
2877 if (const auto *Entry = ConvertCostTableLookup(
2878 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2879 return AdjustCost(Entry->Cost);
2880
2881 if (ST->hasDQI())
2882 if (const auto *Entry = ConvertCostTableLookup(
2883 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2884 return AdjustCost(Entry->Cost);
2885
2886 if (ST->hasAVX512())
2887 if (const auto *Entry = ConvertCostTableLookup(
2888 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2889 return AdjustCost(Entry->Cost);
2890 }
2891
2892 if (ST->hasBWI())
2893 if (const auto *Entry = ConvertCostTableLookup(
2894 AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2895 return AdjustCost(Entry->Cost);
2896
2897 if (ST->hasDQI())
2898 if (const auto *Entry = ConvertCostTableLookup(
2899 AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2900 return AdjustCost(Entry->Cost);
2901
2902 if (ST->hasAVX512())
2903 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2904 SimpleDstTy, SimpleSrcTy))
2905 return AdjustCost(Entry->Cost);
2906
2907 if (ST->hasAVX2()) {
2908 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2909 SimpleDstTy, SimpleSrcTy))
2910 return AdjustCost(Entry->Cost);
2911 }
2912
2913 if (ST->hasAVX()) {
2914 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2915 SimpleDstTy, SimpleSrcTy))
2916 return AdjustCost(Entry->Cost);
2917 }
2918
2919 if (ST->hasSSE41()) {
2920 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2921 SimpleDstTy, SimpleSrcTy))
2922 return AdjustCost(Entry->Cost);
2923 }
2924
2925 if (ST->hasSSE2()) {
2926 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2927 SimpleDstTy, SimpleSrcTy))
2928 return AdjustCost(Entry->Cost);
2929 }
2930 }
2931
2932 // Fall back to legalized types.
2933 std::pair<InstructionCost, MVT> LTSrc = getTypeLegalizationCost(Src);
2934 std::pair<InstructionCost, MVT> LTDest = getTypeLegalizationCost(Dst);
2935
2936 // If we're truncating to the same legalized type - just assume its free.
2937 if (ISD == ISD::TRUNCATE && LTSrc.second == LTDest.second)
2938 return TTI::TCC_Free;
2939
2940 if (ST->useAVX512Regs()) {
2941 if (ST->hasBWI())
2942 if (const auto *Entry = ConvertCostTableLookup(
2943 AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second))
2944 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2945
2946 if (ST->hasDQI())
2947 if (const auto *Entry = ConvertCostTableLookup(
2948 AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second))
2949 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2950
2951 if (ST->hasAVX512())
2952 if (const auto *Entry = ConvertCostTableLookup(
2953 AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second))
2954 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2955 }
2956
2957 if (ST->hasBWI())
2958 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2959 LTDest.second, LTSrc.second))
2960 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2961
2962 if (ST->hasDQI())
2963 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2964 LTDest.second, LTSrc.second))
2965 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2966
2967 if (ST->hasAVX512())
2968 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2969 LTDest.second, LTSrc.second))
2970 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2971
2972 if (ST->hasAVX2())
2973 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2974 LTDest.second, LTSrc.second))
2975 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2976
2977 if (ST->hasAVX())
2978 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2979 LTDest.second, LTSrc.second))
2980 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2981
2982 if (ST->hasSSE41())
2983 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2984 LTDest.second, LTSrc.second))
2985 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2986
2987 if (ST->hasSSE2())
2988 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2989 LTDest.second, LTSrc.second))
2990 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2991
2992 // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for
2993 // sitofp.
2994 if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) &&
2995 1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) {
2996 Type *ExtSrc = Src->getWithNewBitWidth(32);
2997 unsigned ExtOpc =
2998 (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt;
2999
3000 // For scalar loads the extend would be free.
3001 InstructionCost ExtCost = 0;
3002 if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0))))
3003 ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind);
3004
3005 return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc,
3006 TTI::CastContextHint::None, CostKind);
3007 }
3008
3009 // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi
3010 // i32.
3011 if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) &&
3012 1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) {
3013 Type *TruncDst = Dst->getWithNewBitWidth(32);
3014 return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) +
3015 getCastInstrCost(Instruction::Trunc, Dst, TruncDst,
3016 TTI::CastContextHint::None, CostKind);
3017 }
3018
3019 return AdjustCost(
3020 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
3021}
3022
3023InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
3024 Type *CondTy,
3025 CmpInst::Predicate VecPred,
3026 TTI::TargetCostKind CostKind,
3027 const Instruction *I) {
3028 // Early out if this type isn't scalar/vector integer/float.
3029 if (!(ValTy->isIntOrIntVectorTy() || ValTy->isFPOrFPVectorTy()))
3030 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
3031 I);
3032
3033 // Legalize the type.
3034 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
3035
3036 MVT MTy = LT.second;
3037
3038 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3039 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 3039, __extension__
__PRETTY_FUNCTION__))
;
3040
3041 InstructionCost ExtraCost = 0;
3042 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) {
3043 // Some vector comparison predicates cost extra instructions.
3044 // TODO: Should we invert this and assume worst case cmp costs
3045 // and reduce for particular predicates?
3046 if (MTy.isVector() &&
3047 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
3048 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
3049 ST->hasBWI())) {
3050 // Fallback to I if a specific predicate wasn't specified.
3051 CmpInst::Predicate Pred = VecPred;
3052 if (I && (Pred == CmpInst::BAD_ICMP_PREDICATE ||
3053 Pred == CmpInst::BAD_FCMP_PREDICATE))
3054 Pred = cast<CmpInst>(I)->getPredicate();
3055
3056 switch (Pred) {
3057 case CmpInst::Predicate::ICMP_NE:
3058 // xor(cmpeq(x,y),-1)
3059 ExtraCost = 1;
3060 break;
3061 case CmpInst::Predicate::ICMP_SGE:
3062 case CmpInst::Predicate::ICMP_SLE:
3063 // xor(cmpgt(x,y),-1)
3064 ExtraCost = 1;
3065 break;
3066 case CmpInst::Predicate::ICMP_ULT:
3067 case CmpInst::Predicate::ICMP_UGT:
3068 // cmpgt(xor(x,signbit),xor(y,signbit))
3069 // xor(cmpeq(pmaxu(x,y),x),-1)
3070 ExtraCost = 2;
3071 break;
3072 case CmpInst::Predicate::ICMP_ULE:
3073 case CmpInst::Predicate::ICMP_UGE:
3074 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
3075 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
3076 // cmpeq(psubus(x,y),0)
3077 // cmpeq(pminu(x,y),x)
3078 ExtraCost = 1;
3079 } else {
3080 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
3081 ExtraCost = 3;
3082 }
3083 break;
3084 case CmpInst::Predicate::FCMP_ONE:
3085 case CmpInst::Predicate::FCMP_UEQ:
3086 // Without AVX we need to expand FCMP_ONE/FCMP_UEQ cases.
3087 // Use FCMP_UEQ expansion - FCMP_ONE should be the same.
3088 if (CondTy && !ST->hasAVX())
3089 return getCmpSelInstrCost(Opcode, ValTy, CondTy,
3090 CmpInst::Predicate::FCMP_UNO, CostKind) +
3091 getCmpSelInstrCost(Opcode, ValTy, CondTy,
3092 CmpInst::Predicate::FCMP_OEQ, CostKind) +
3093 getArithmeticInstrCost(Instruction::Or, CondTy, CostKind);
3094
3095 break;
3096 case CmpInst::Predicate::BAD_ICMP_PREDICATE:
3097 case CmpInst::Predicate::BAD_FCMP_PREDICATE:
3098 // Assume worst case scenario and add the maximum extra cost.
3099 ExtraCost = 3;
3100 break;
3101 default:
3102 break;
3103 }
3104 }
3105 }
3106
3107 static const CostKindTblEntry SLMCostTbl[] = {
3108 // slm pcmpeq/pcmpgt throughput is 2
3109 { ISD::SETCC, MVT::v2i64, { 2, 5, 1, 2 } },
3110 // slm pblendvb/blendvpd/blendvps throughput is 4
3111 { ISD::SELECT, MVT::v2f64, { 4, 4, 1, 3 } }, // vblendvpd
3112 { ISD::SELECT, MVT::v4f32, { 4, 4, 1, 3 } }, // vblendvps
3113 { ISD::SELECT, MVT::v2i64, { 4, 4, 1, 3 } }, // pblendvb
3114 { ISD::SELECT, MVT::v8i32, { 4, 4, 1, 3 } }, // pblendvb
3115 { ISD::SELECT, MVT::v8i16, { 4, 4, 1, 3 } }, // pblendvb
3116 { ISD::SELECT, MVT::v16i8, { 4, 4, 1, 3 } }, // pblendvb
3117 };
3118
3119 static const CostKindTblEntry AVX512BWCostTbl[] = {
3120 { ISD::SETCC, MVT::v32i16, { 1, 1, 1, 1 } },
3121 { ISD::SETCC, MVT::v16i16, { 1, 1, 1, 1 } },
3122 { ISD::SETCC, MVT::v64i8, { 1, 1, 1, 1 } },
3123 { ISD::SETCC, MVT::v32i8, { 1, 1, 1, 1 } },
3124
3125 { ISD::SELECT, MVT::v32i16, { 1, 1, 1, 1 } },
3126 { ISD::SELECT, MVT::v64i8, { 1, 1, 1, 1 } },
3127 };
3128
3129 static const CostKindTblEntry AVX512CostTbl[] = {
3130 { ISD::SETCC, MVT::v8f64, { 1, 4, 1, 1 } },
3131 { ISD::SETCC, MVT::v4f64, { 1, 4, 1, 1 } },
3132 { ISD::SETCC, MVT::v16f32, { 1, 4, 1, 1 } },
3133 { ISD::SETCC, MVT::v8f32, { 1, 4, 1, 1 } },
3134
3135 { ISD::SETCC, MVT::v8i64, { 1, 1, 1, 1 } },
3136 { ISD::SETCC, MVT::v4i64, { 1, 1, 1, 1 } },
3137 { ISD::SETCC, MVT::v2i64, { 1, 1, 1, 1 } },
3138 { ISD::SETCC, MVT::v16i32, { 1, 1, 1, 1 } },
3139 { ISD::SETCC, MVT::v8i32, { 1, 1, 1, 1 } },
3140 { ISD::SETCC, MVT::v32i16, { 3, 7, 5, 5 } },
3141 { ISD::SETCC, MVT::v64i8, { 3, 7, 5, 5 } },
3142
3143 { ISD::SELECT, MVT::v8i64, { 1, 1, 1, 1 } },
3144 { ISD::SELECT, MVT::v4i64, { 1, 1, 1, 1 } },
3145 { ISD::SELECT, MVT::v2i64, { 1, 1, 1, 1 } },
3146 { ISD::SELECT, MVT::v16i32, { 1, 1, 1, 1 } },
3147 { ISD::SELECT, MVT::v8i32, { 1, 1, 1, 1 } },
3148 { ISD::SELECT, MVT::v4i32, { 1, 1, 1, 1 } },
3149 { ISD::SELECT, MVT::v8f64, { 1, 1, 1, 1 } },
3150 { ISD::SELECT, MVT::v4f64, { 1, 1, 1, 1 } },
3151 { ISD::SELECT, MVT::v2f64, { 1, 1, 1, 1 } },
3152 { ISD::SELECT, MVT::f64, { 1, 1, 1, 1 } },
3153 { ISD::SELECT, MVT::v16f32, { 1, 1, 1, 1 } },
3154 { ISD::SELECT, MVT::v8f32 , { 1, 1, 1, 1 } },
3155 { ISD::SELECT, MVT::v4f32, { 1, 1, 1, 1 } },
3156 { ISD::SELECT, MVT::f32 , { 1, 1, 1, 1 } },
3157
3158 { ISD::SELECT, MVT::v32i16, { 2, 2, 4, 4 } },
3159 { ISD::SELECT, MVT::v16i16, { 1, 1, 1, 1 } },
3160 { ISD::SELECT, MVT::v8i16, { 1, 1, 1, 1 } },
3161 { ISD::SELECT, MVT::v64i8, { 2, 2, 4, 4 } },
3162 { ISD::SELECT, MVT::v32i8, { 1, 1, 1, 1 } },
3163 { ISD::SELECT, MVT::v16i8, { 1, 1, 1, 1 } },
3164 };
3165
3166 static const CostKindTblEntry AVX2CostTbl[] = {
3167 { ISD::SETCC, MVT::v4f64, { 1, 4, 1, 2 } },
3168 { ISD::SETCC, MVT::v2f64, { 1, 4, 1, 1 } },
3169 { ISD::SETCC, MVT::f64, { 1, 4, 1, 1 } },
3170 { ISD::SETCC, MVT::v8f32, { 1, 4, 1, 2 } },
3171 { ISD::SETCC, MVT::v4f32, { 1, 4, 1, 1 } },
3172 { ISD::SETCC, MVT::f32, { 1, 4, 1, 1 } },
3173
3174 { ISD::SETCC, MVT::v4i64, { 1, 1, 1, 2 } },
3175 { ISD::SETCC, MVT::v8i32, { 1, 1, 1, 2 } },
3176 { ISD::SETCC, MVT::v16i16, { 1, 1, 1, 2 } },
3177 { ISD::SETCC, MVT::v32i8, { 1, 1, 1, 2 } },
3178
3179 { ISD::SELECT, MVT::v4f64, { 2, 2, 1, 2 } }, // vblendvpd
3180 { ISD::SELECT, MVT::v8f32, { 2, 2, 1, 2 } }, // vblendvps
3181 { ISD::SELECT, MVT::v4i64, { 2, 2, 1, 2 } }, // pblendvb
3182 { ISD::SELECT, MVT::v8i32, { 2, 2, 1, 2 } }, // pblendvb
3183 { ISD::SELECT, MVT::v16i16, { 2, 2, 1, 2 } }, // pblendvb
3184 { ISD::SELECT, MVT::v32i8, { 2, 2, 1, 2 } }, // pblendvb
3185 };
3186
3187 static const CostKindTblEntry XOPCostTbl[] = {
3188 { ISD::SETCC, MVT::v4i64, { 4, 2, 5, 6 } },
3189 { ISD::SETCC, MVT::v2i64, { 1, 1, 1, 1 } },
3190 };
3191
3192 static const CostKindTblEntry AVX1CostTbl[] = {
3193 { ISD::SETCC, MVT::v4f64, { 2, 3, 1, 2 } },
3194 { ISD::SETCC, MVT::v2f64, { 1, 3, 1, 1 } },
3195 { ISD::SETCC, MVT::f64, { 1, 3, 1, 1 } },
3196 { ISD::SETCC, MVT::v8f32, { 2, 3, 1, 2 } },
3197 { ISD::SETCC, MVT::v4f32, { 1, 3, 1, 1 } },
3198 { ISD::SETCC, MVT::f32, { 1, 3, 1, 1 } },
3199
3200 // AVX1 does not support 8-wide integer compare.
3201 { ISD::SETCC, MVT::v4i64, { 4, 2, 5, 6 } },
3202 { ISD::SETCC, MVT::v8i32, { 4, 2, 5, 6 } },
3203 { ISD::SETCC, MVT::v16i16, { 4, 2, 5, 6 } },
3204 { ISD::SETCC, MVT::v32i8, { 4, 2, 5, 6 } },
3205
3206 { ISD::SELECT, MVT::v4f64, { 3, 3, 1, 2 } }, // vblendvpd
3207 { ISD::SELECT, MVT::v8f32, { 3, 3, 1, 2 } }, // vblendvps
3208 { ISD::SELECT, MVT::v4i64, { 3, 3, 1, 2 } }, // vblendvpd
3209 { ISD::SELECT, MVT::v8i32, { 3, 3, 1, 2 } }, // vblendvps
3210 { ISD::SELECT, MVT::v16i16, { 3, 3, 3, 3 } }, // vandps + vandnps + vorps
3211 { ISD::SELECT, MVT::v32i8, { 3, 3, 3, 3 } }, // vandps + vandnps + vorps
3212 };
3213
3214 static const CostKindTblEntry SSE42CostTbl[] = {
3215 { ISD::SETCC, MVT::v2i64, { 1, 2, 1, 2 } },
3216 };
3217
3218 static const CostKindTblEntry SSE41CostTbl[] = {
3219 { ISD::SETCC, MVT::v2f64, { 1, 5, 1, 1 } },
3220 { ISD::SETCC, MVT::v4f32, { 1, 5, 1, 1 } },
3221
3222 { ISD::SELECT, MVT::v2f64, { 2, 2, 1, 2 } }, // blendvpd
3223 { ISD::SELECT, MVT::f64, { 2, 2, 1, 2 } }, // blendvpd
3224 { ISD::SELECT, MVT::v4f32, { 2, 2, 1, 2 } }, // blendvps
3225 { ISD::SELECT, MVT::f32 , { 2, 2, 1, 2 } }, // blendvps
3226 { ISD::SELECT, MVT::v2i64, { 2, 2, 1, 2 } }, // pblendvb
3227 { ISD::SELECT, MVT::v4i32, { 2, 2, 1, 2 } }, // pblendvb
3228 { ISD::SELECT, MVT::v8i16, { 2, 2, 1, 2 } }, // pblendvb
3229 { ISD::SELECT, MVT::v16i8, { 2, 2, 1, 2 } }, // pblendvb
3230 };
3231
3232 static const CostKindTblEntry SSE2CostTbl[] = {
3233 { ISD::SETCC, MVT::v2f64, { 2, 5, 1, 1 } },
3234 { ISD::SETCC, MVT::f64, { 1, 5, 1, 1 } },
3235
3236 { ISD::SETCC, MVT::v2i64, { 5, 4, 5, 5 } }, // pcmpeqd/pcmpgtd expansion
3237 { ISD::SETCC, MVT::v4i32, { 1, 1, 1, 1 } },
3238 { ISD::SETCC, MVT::v8i16, { 1, 1, 1, 1 } },
3239 { ISD::SETCC, MVT::v16i8, { 1, 1, 1, 1 } },
3240
3241 { ISD::SELECT, MVT::v2f64, { 2, 2, 3, 3 } }, // andpd + andnpd + orpd
3242 { ISD::SELECT, MVT::f64, { 2, 2, 3, 3 } }, // andpd + andnpd + orpd
3243 { ISD::SELECT, MVT::v2i64, { 2, 2, 3, 3 } }, // pand + pandn + por
3244 { ISD::SELECT, MVT::v4i32, { 2, 2, 3, 3 } }, // pand + pandn + por
3245 { ISD::SELECT, MVT::v8i16, { 2, 2, 3, 3 } }, // pand + pandn + por
3246 { ISD::SELECT, MVT::v16i8, { 2, 2, 3, 3 } }, // pand + pandn + por
3247 };
3248
3249 static const CostKindTblEntry SSE1CostTbl[] = {
3250 { ISD::SETCC, MVT::v4f32, { 2, 5, 1, 1 } },
3251 { ISD::SETCC, MVT::f32, { 1, 5, 1, 1 } },
3252
3253 { ISD::SELECT, MVT::v4f32, { 2, 2, 3, 3 } }, // andps + andnps + orps
3254 { ISD::SELECT, MVT::f32, { 2, 2, 3, 3 } }, // andps + andnps + orps
3255 };
3256
3257 if (ST->useSLMArithCosts())
3258 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
3259 if (auto KindCost = Entry->Cost[CostKind])
3260 return LT.first * (ExtraCost + KindCost.value());
3261
3262 if (ST->hasBWI())
3263 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3264 if (auto KindCost = Entry->Cost[CostKind])
3265 return LT.first * (ExtraCost + KindCost.value());
3266
3267 if (ST->hasAVX512())
3268 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3269 if (auto KindCost = Entry->Cost[CostKind])
3270 return LT.first * (ExtraCost + KindCost.value());
3271
3272 if (ST->hasAVX2())
3273 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3274 if (auto KindCost = Entry->Cost[CostKind])
3275 return LT.first * (ExtraCost + KindCost.value());
3276
3277 if (ST->hasXOP())
3278 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3279 if (auto KindCost = Entry->Cost[CostKind])
3280 return LT.first * (ExtraCost + KindCost.value());
3281
3282 if (ST->hasAVX())
3283 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3284 if (auto KindCost = Entry->Cost[CostKind])
3285 return LT.first * (ExtraCost + KindCost.value());
3286
3287 if (ST->hasSSE42())
3288 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3289 if (auto KindCost = Entry->Cost[CostKind])
3290 return LT.first * (ExtraCost + KindCost.value());
3291
3292 if (ST->hasSSE41())
3293 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3294 if (auto KindCost = Entry->Cost[CostKind])
3295 return LT.first * (ExtraCost + KindCost.value());
3296
3297 if (ST->hasSSE2())
3298 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3299 if (auto KindCost = Entry->Cost[CostKind])
3300 return LT.first * (ExtraCost + KindCost.value());
3301
3302 if (ST->hasSSE1())
3303 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3304 if (auto KindCost = Entry->Cost[CostKind])
3305 return LT.first * (ExtraCost + KindCost.value());
3306
3307 // Assume a 3cy latency for fp select ops.
3308 if (CostKind == TTI::TCK_Latency && Opcode == Instruction::Select)
3309 if (ValTy->getScalarType()->isFloatingPointTy())
3310 return 3;
3311
3312 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
3313}
3314
3315unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
3316
3317InstructionCost
3318X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
3319 TTI::TargetCostKind CostKind) {
3320 // Costs should match the codegen from:
3321 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
3322 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
3323 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
3324 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
3325 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
3326
3327 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
3328 // specialized in these tables yet.
3329 static const CostKindTblEntry AVX512VBMI2CostTbl[] = {
3330 { ISD::FSHL, MVT::v8i64, { 1, 1, 1, 1 } },
3331 { ISD::FSHL, MVT::v4i64, { 1, 1, 1, 1 } },
3332 { ISD::FSHL, MVT::v2i64, { 1, 1, 1, 1 } },
3333 { ISD::FSHL, MVT::v16i32, { 1, 1, 1, 1 } },
3334 { ISD::FSHL, MVT::v8i32, { 1, 1, 1, 1 } },
3335 { ISD::FSHL, MVT::v4i32, { 1, 1, 1, 1 } },
3336 { ISD::FSHL, MVT::v32i16, { 1, 1, 1, 1 } },
3337 { ISD::FSHL, MVT::v16i16, { 1, 1, 1, 1 } },
3338 { ISD::FSHL, MVT::v8i16, { 1, 1, 1, 1 } },
3339 { ISD::ROTL, MVT::v32i16, { 1, 1, 1, 1 } },
3340 { ISD::ROTL, MVT::v16i16, { 1, 1, 1, 1 } },
3341 { ISD::ROTL, MVT::v8i16, { 1, 1, 1, 1 } },
3342 { ISD::ROTR, MVT::v32i16, { 1, 1, 1, 1 } },
3343 { ISD::ROTR, MVT::v16i16, { 1, 1, 1, 1 } },
3344 { ISD::ROTR, MVT::v8i16, { 1, 1, 1, 1 } },
3345 };
3346 static const CostKindTblEntry AVX512BITALGCostTbl[] = {
3347 { ISD::CTPOP, MVT::v32i16, { 1, 1, 1, 1 } },
3348 { ISD::CTPOP, MVT::v64i8, { 1, 1, 1, 1 } },
3349 { ISD::CTPOP, MVT::v16i16, { 1, 1, 1, 1 } },
3350 { ISD::CTPOP, MVT::v32i8, { 1, 1, 1, 1 } },
3351 { ISD::CTPOP, MVT::v8i16, { 1, 1, 1, 1 } },
3352 { ISD::CTPOP, MVT::v16i8, { 1, 1, 1, 1 } },
3353 };
3354 static const CostKindTblEntry AVX512VPOPCNTDQCostTbl[] = {
3355 { ISD::CTPOP, MVT::v8i64, { 1, 1, 1, 1 } },
3356 { ISD::CTPOP, MVT::v16i32, { 1, 1, 1, 1 } },
3357 { ISD::CTPOP, MVT::v4i64, { 1, 1, 1, 1 } },
3358 { ISD::CTPOP, MVT::v8i32, { 1, 1, 1, 1 } },
3359 { ISD::CTPOP, MVT::v2i64, { 1, 1, 1, 1 } },
3360 { ISD::CTPOP, MVT::v4i32, { 1, 1, 1, 1 } },
3361 };
3362 static const CostKindTblEntry AVX512CDCostTbl[] = {
3363 { ISD::CTLZ, MVT::v8i64, { 1, 5, 1, 1 } },
3364 { ISD::CTLZ, MVT::v16i32, { 1, 5, 1, 1 } },
3365 { ISD::CTLZ, MVT::v32i16, { 18, 27, 23, 27 } },
3366 { ISD::CTLZ, MVT::v64i8, { 3, 16, 9, 11 } },
3367 { ISD::CTLZ, MVT::v4i64, { 1, 5, 1, 1 } },
3368 { ISD::CTLZ, MVT::v8i32, { 1, 5, 1, 1 } },
3369 { ISD::CTLZ, MVT::v16i16, { 8, 19, 11, 13 } },
3370 { ISD::CTLZ, MVT::v32i8, { 2, 11, 9, 10 } },
3371 { ISD::CTLZ, MVT::v2i64, { 1, 5, 1, 1 } },
3372 { ISD::CTLZ, MVT::v4i32, { 1, 5, 1, 1 } },
3373 { ISD::CTLZ, MVT::v8i16, { 3, 15, 4, 6 } },
3374 { ISD::CTLZ, MVT::v16i8, { 2, 10, 9, 10 } },
3375
3376 { ISD::CTTZ, MVT::v8i64, { 2, 8, 6, 7 } },
3377 { ISD::CTTZ, MVT::v16i32, { 2, 8, 6, 7 } },
3378 { ISD::CTTZ, MVT::v4i64, { 1, 8, 6, 6 } },
3379 { ISD::CTTZ, MVT::v8i32, { 1, 8, 6, 6 } },
3380 { ISD::CTTZ, MVT::v2i64, { 1, 8, 6, 6 } },
3381 { ISD::CTTZ, MVT::v4i32, { 1, 8, 6, 6 } },
3382 };
3383 static const CostKindTblEntry AVX512BWCostTbl[] = {
3384 { ISD::ABS, MVT::v32i16, { 1, 1, 1, 1 } },
3385 { ISD::ABS, MVT::v64i8, { 1, 1, 1, 1 } },
3386 { ISD::BITREVERSE, MVT::v8i64, { 3 } },
3387 { ISD::BITREVERSE, MVT::v16i32, { 3 } },
3388 { ISD::BITREVERSE, MVT::v32i16, { 3 } },
3389 { ISD::BITREVERSE, MVT::v64i8, { 2 } },
3390 { ISD::BSWAP, MVT::v8i64, { 1 } },
3391 { ISD::BSWAP, MVT::v16i32, { 1 } },
3392 { ISD::BSWAP, MVT::v32i16, { 1 } },
3393 { ISD::CTLZ, MVT::v8i64, { 8, 22, 23, 23 } },
3394 { ISD::CTLZ, MVT::v16i32, { 8, 23, 25, 25 } },
3395 { ISD::CTLZ, MVT::v32i16, { 4, 15, 15, 16 } },
3396 { ISD::CTLZ, MVT::v64i8, { 3, 12, 10, 9 } },
3397 { ISD::CTPOP, MVT::v2i64, { 3, 7, 10, 10 } },
3398 { ISD::CTPOP, MVT::v4i64, { 3, 7, 10, 10 } },
3399 { ISD::CTPOP, MVT::v8i64, { 3, 8, 10, 12 } },
3400 { ISD::CTPOP, MVT::v4i32, { 7, 11, 14, 14 } },
3401 { ISD::CTPOP, MVT::v8i32, { 7, 11, 14, 14 } },
3402 { ISD::CTPOP, MVT::v16i32, { 7, 12, 14, 16 } },
3403 { ISD::CTPOP, MVT::v8i16, { 2, 7, 11, 11 } },
3404 { ISD::CTPOP, MVT::v16i16, { 2, 7, 11, 11 } },
3405 { ISD::CTPOP, MVT::v32i16, { 3, 7, 11, 13 } },
3406 { ISD::CTPOP, MVT::v16i8, { 2, 4, 8, 8 } },
3407 { ISD::CTPOP, MVT::v32i8, { 2, 4, 8, 8 } },
3408 { ISD::CTPOP, MVT::v64i8, { 2, 5, 8, 10 } },
3409 { ISD::CTTZ, MVT::v8i16, { 3, 9, 14, 14 } },
3410 { ISD::CTTZ, MVT::v16i16, { 3, 9, 14, 14 } },
3411 { ISD::CTTZ, MVT::v32i16, { 3, 10, 14, 16 } },
3412 { ISD::CTTZ, MVT::v16i8, { 2, 6, 11, 11 } },
3413 { ISD::CTTZ, MVT::v32i8, { 2, 6, 11, 11 } },
3414 { ISD::CTTZ, MVT::v64i8, { 3, 7, 11, 13 } },
3415 { ISD::ROTL, MVT::v32i16, { 2, 8, 6, 8 } },
3416 { ISD::ROTL, MVT::v16i16, { 2, 8, 6, 7 } },
3417 { ISD::ROTL, MVT::v8i16, { 2, 7, 6, 7 } },
3418 { ISD::ROTL, MVT::v64i8, { 5, 6, 11, 12 } },
3419 { ISD::ROTL, MVT::v32i8, { 5, 15, 7, 10 } },
3420 { ISD::ROTL, MVT::v16i8, { 5, 15, 7, 10 } },
3421 { ISD::ROTR, MVT::v32i16, { 2, 8, 6, 8 } },
3422 { ISD::ROTR, MVT::v16i16, { 2, 8, 6, 7 } },
3423 { ISD::ROTR, MVT::v8i16, { 2, 7, 6, 7 } },
3424 { ISD::ROTR, MVT::v64i8, { 5, 6, 12, 14 } },
3425 { ISD::ROTR, MVT::v32i8, { 5, 14, 6, 9 } },
3426 { ISD::ROTR, MVT::v16i8, { 5, 14, 6, 9 } },
3427 { ISD::SADDSAT, MVT::v32i16, { 1 } },
3428 { ISD::SADDSAT, MVT::v64i8, { 1 } },
3429 { ISD::SMAX, MVT::v32i16, { 1, 1, 1, 1 } },
3430 { ISD::SMAX, MVT::v64i8, { 1, 1, 1, 1 } },
3431 { ISD::SMIN, MVT::v32i16, { 1, 1, 1, 1 } },
3432 { ISD::SMIN, MVT::v64i8, { 1, 1, 1, 1 } },
3433 { ISD::SSUBSAT, MVT::v32i16, { 1 } },
3434 { ISD::SSUBSAT, MVT::v64i8, { 1 } },
3435 { ISD::UADDSAT, MVT::v32i16, { 1 } },
3436 { ISD::UADDSAT, MVT::v64i8, { 1 } },
3437 { ISD::UMAX, MVT::v32i16, { 1, 1, 1, 1 } },
3438 { ISD::UMAX, MVT::v64i8, { 1, 1, 1, 1 } },
3439 { ISD::UMIN, MVT::v32i16, { 1, 1, 1, 1 } },
3440 { ISD::UMIN, MVT::v64i8, { 1, 1, 1, 1 } },
3441 { ISD::USUBSAT, MVT::v32i16, { 1 } },
3442 { ISD::USUBSAT, MVT::v64i8, { 1 } },
3443 };
3444 static const CostKindTblEntry AVX512CostTbl[] = {
3445 { ISD::ABS, MVT::v8i64, { 1, 1, 1, 1 } },
3446 { ISD::ABS, MVT::v4i64, { 1, 1, 1, 1 } },
3447 { ISD::ABS, MVT::v2i64, { 1, 1, 1, 1 } },
3448 { ISD::ABS, MVT::v16i32, { 1, 1, 1, 1 } },
3449 { ISD::ABS, MVT::v8i32, { 1, 1, 1, 1 } },
3450 { ISD::ABS, MVT::v32i16, { 2, 7, 4, 4 } },
3451 { ISD::ABS, MVT::v16i16, { 1, 1, 1, 1 } },
3452 { ISD::ABS, MVT::v64i8, { 2, 7, 4, 4 } },
3453 { ISD::ABS, MVT::v32i8, { 1, 1, 1, 1 } },
3454 { ISD::BITREVERSE, MVT::v8i64, { 36 } },
3455 { ISD::BITREVERSE, MVT::v16i32, { 24 } },
3456 { ISD::BITREVERSE, MVT::v32i16, { 10 } },
3457 { ISD::BITREVERSE, MVT::v64i8, { 10 } },
3458 { ISD::BSWAP, MVT::v8i64, { 4 } },
3459 { ISD::BSWAP, MVT::v16i32, { 4 } },
3460 { ISD::BSWAP, MVT::v32i16, { 4 } },
3461 { ISD::CTLZ, MVT::v8i64, { 10, 28, 32, 32 } },
3462 { ISD::CTLZ, MVT::v16i32, { 12, 30, 38, 38 } },
3463 { ISD::CTLZ, MVT::v32i16, { 8, 15, 29, 29 } },
3464 { ISD::CTLZ, MVT::v64i8, { 6, 11, 19, 19 } },
3465 { ISD::CTPOP, MVT::v8i64, { 16, 16, 19, 19 } },
3466 { ISD::CTPOP, MVT::v16i32, { 24, 19, 27, 27 } },
3467 { ISD::CTPOP, MVT::v32i16, { 18, 15, 22, 22 } },
3468 { ISD::CTPOP, MVT::v64i8, { 12, 11, 16, 16 } },
3469 { ISD::CTTZ, MVT::v8i64, { 2, 8, 6, 7 } },
3470 { ISD::CTTZ, MVT::v16i32, { 2, 8, 6, 7 } },
3471 { ISD::CTTZ, MVT::v32i16, { 7, 17, 27, 27 } },
3472 { ISD::CTTZ, MVT::v64i8, { 6, 13, 21, 21 } },
3473 { ISD::ROTL, MVT::v8i64, { 1, 1, 1, 1 } },
3474 { ISD::ROTL, MVT::v4i64, { 1, 1, 1, 1 } },
3475 { ISD::ROTL, MVT::v2i64, { 1, 1, 1, 1 } },
3476 { ISD::ROTL, MVT::v16i32, { 1, 1, 1, 1 } },
3477 { ISD::ROTL, MVT::v8i32, { 1, 1, 1, 1 } },
3478 { ISD::ROTL, MVT::v4i32, { 1, 1, 1, 1 } },
3479 { ISD::ROTR, MVT::v8i64, { 1, 1, 1, 1 } },
3480 { ISD::ROTR, MVT::v4i64, { 1, 1, 1, 1 } },
3481 { ISD::ROTR, MVT::v2i64, { 1, 1, 1, 1 } },
3482 { ISD::ROTR, MVT::v16i32, { 1, 1, 1, 1 } },
3483 { ISD::ROTR, MVT::v8i32, { 1, 1, 1, 1 } },
3484 { ISD::ROTR, MVT::v4i32, { 1, 1, 1, 1 } },
3485 { ISD::SMAX, MVT::v8i64, { 1, 3, 1, 1 } },
3486 { ISD::SMAX, MVT::v16i32, { 1, 1, 1, 1 } },
3487 { ISD::SMAX, MVT::v32i16, { 3, 7, 5, 5 } },
3488 { ISD::SMAX, MVT::v64i8, { 3, 7, 5, 5 } },
3489 { ISD::SMAX, MVT::v4i64, { 1, 3, 1, 1 } },
3490 { ISD::SMAX, MVT::v2i64, { 1, 3, 1, 1 } },
3491 { ISD::SMIN, MVT::v8i64, { 1, 3, 1, 1 } },
3492 { ISD::SMIN, MVT::v16i32, { 1, 1, 1, 1 } },
3493 { ISD::SMIN, MVT::v32i16, { 3, 7, 5, 5 } },
3494 { ISD::SMIN, MVT::v64i8, { 3, 7, 5, 5 } },
3495 { ISD::SMIN, MVT::v4i64, { 1, 3, 1, 1 } },
3496 { ISD::SMIN, MVT::v2i64, { 1, 3, 1, 1 } },
3497 { ISD::UMAX, MVT::v8i64, { 1, 3, 1, 1 } },
3498 { ISD::UMAX, MVT::v16i32, { 1, 1, 1, 1 } },
3499 { ISD::UMAX, MVT::v32i16, { 3, 7, 5, 5 } },
3500 { ISD::UMAX, MVT::v64i8, { 3, 7, 5, 5 } },
3501 { ISD::UMAX, MVT::v4i64, { 1, 3, 1, 1 } },
3502 { ISD::UMAX, MVT::v2i64, { 1, 3, 1, 1 } },
3503 { ISD::UMIN, MVT::v8i64, { 1, 3, 1, 1 } },
3504 { ISD::UMIN, MVT::v16i32, { 1, 1, 1, 1 } },
3505 { ISD::UMIN, MVT::v32i16, { 3, 7, 5, 5 } },
3506 { ISD::UMIN, MVT::v64i8, { 3, 7, 5, 5 } },
3507 { ISD::UMIN, MVT::v4i64, { 1, 3, 1, 1 } },
3508 { ISD::UMIN, MVT::v2i64, { 1, 3, 1, 1 } },
3509 { ISD::USUBSAT, MVT::v16i32, { 2 } }, // pmaxud + psubd
3510 { ISD::USUBSAT, MVT::v2i64, { 2 } }, // pmaxuq + psubq
3511 { ISD::USUBSAT, MVT::v4i64, { 2 } }, // pmaxuq + psubq
3512 { ISD::USUBSAT, MVT::v8i64, { 2 } }, // pmaxuq + psubq
3513 { ISD::UADDSAT, MVT::v16i32, { 3 } }, // not + pminud + paddd
3514 { ISD::UADDSAT, MVT::v2i64, { 3 } }, // not + pminuq + paddq
3515 { ISD::UADDSAT, MVT::v4i64, { 3 } }, // not + pminuq + paddq
3516 { ISD::UADDSAT, MVT::v8i64, { 3 } }, // not + pminuq + paddq
3517 { ISD::SADDSAT, MVT::v32i16, { 2 } },
3518 { ISD::SADDSAT, MVT::v64i8, { 2 } },
3519 { ISD::SSUBSAT, MVT::v32i16, { 2 } },
3520 { ISD::SSUBSAT, MVT::v64i8, { 2 } },
3521 { ISD::UADDSAT, MVT::v32i16, { 2 } },
3522 { ISD::UADDSAT, MVT::v64i8, { 2 } },
3523 { ISD::USUBSAT, MVT::v32i16, { 2 } },
3524 { ISD::USUBSAT, MVT::v64i8, { 2 } },
3525 { ISD::FMAXNUM, MVT::f32, { 2 } },
3526 { ISD::FMAXNUM, MVT::v4f32, { 2 } },
3527 { ISD::FMAXNUM, MVT::v8f32, { 2 } },
3528 { ISD::FMAXNUM, MVT::v16f32, { 2 } },
3529 { ISD::FMAXNUM, MVT::f64, { 2 } },
3530 { ISD::FMAXNUM, MVT::v2f64, { 2 } },
3531 { ISD::FMAXNUM, MVT::v4f64, { 2 } },
3532 { ISD::FMAXNUM, MVT::v8f64, { 2 } },
3533 { ISD::FSQRT, MVT::f32, { 3, 12, 1, 1 } }, // Skylake from http://www.agner.org/
3534 { ISD::FSQRT, MVT::v4f32, { 3, 12, 1, 1 } }, // Skylake from http://www.agner.org/
3535 { ISD::FSQRT, MVT::v8f32, { 6, 12, 1, 1 } }, // Skylake from http://www.agner.org/
3536 { ISD::FSQRT, MVT::v16f32, { 12, 20, 1, 3 } }, // Skylake from http://www.agner.org/
3537 { ISD::FSQRT, MVT::f64, { 6, 18, 1, 1 } }, // Skylake from http://www.agner.org/
3538 { ISD::FSQRT, MVT::v2f64, { 6, 18, 1, 1 } }, // Skylake from http://www.agner.org/
3539 { ISD::FSQRT, MVT::v4f64, { 12, 18, 1, 1 } }, // Skylake from http://www.agner.org/
3540 { ISD::FSQRT, MVT::v8f64, { 24, 32, 1, 3 } }, // Skylake from http://www.agner.org/
3541 };
3542 static const CostKindTblEntry XOPCostTbl[] = {
3543 { ISD::BITREVERSE, MVT::v4i64, { 4 } },
3544 { ISD::BITREVERSE, MVT::v8i32, { 4 } },
3545 { ISD::BITREVERSE, MVT::v16i16, { 4 } },
3546 { ISD::BITREVERSE, MVT::v32i8, { 4 } },
3547 { ISD::BITREVERSE, MVT::v2i64, { 1 } },
3548 { ISD::BITREVERSE, MVT::v4i32, { 1 } },
3549 { ISD::BITREVERSE, MVT::v8i16, { 1 } },
3550 { ISD::BITREVERSE, MVT::v16i8, { 1 } },
3551 { ISD::BITREVERSE, MVT::i64, { 3 } },
3552 { ISD::BITREVERSE, MVT::i32, { 3 } },
3553 { ISD::BITREVERSE, MVT::i16, { 3 } },
3554 { ISD::BITREVERSE, MVT::i8, { 3 } },
3555 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3556 { ISD::ROTL, MVT::v4i64, { 4, 7, 5, 6 } },
3557 { ISD::ROTL, MVT::v8i32, { 4, 7, 5, 6 } },
3558 { ISD::ROTL, MVT::v16i16, { 4, 7, 5, 6 } },
3559 { ISD::ROTL, MVT::v32i8, { 4, 7, 5, 6 } },
3560 { ISD::ROTL, MVT::v2i64, { 1, 3, 1, 1 } },
3561 { ISD::ROTL, MVT::v4i32, { 1, 3, 1, 1 } },
3562 { ISD::ROTL, MVT::v8i16, { 1, 3, 1, 1 } },
3563 { ISD::ROTL, MVT::v16i8, { 1, 3, 1, 1 } },
3564 { ISD::ROTR, MVT::v4i64, { 4, 7, 8, 9 } },
3565 { ISD::ROTR, MVT::v8i32, { 4, 7, 8, 9 } },
3566 { ISD::ROTR, MVT::v16i16, { 4, 7, 8, 9 } },
3567 { ISD::ROTR, MVT::v32i8, { 4, 7, 8, 9 } },
3568 { ISD::ROTR, MVT::v2i64, { 1, 3, 3, 3 } },
3569 { ISD::ROTR, MVT::v4i32, { 1, 3, 3, 3 } },
3570 { ISD::ROTR, MVT::v8i16, { 1, 3, 3, 3 } },
3571 { ISD::ROTR, MVT::v16i8, { 1, 3, 3, 3 } }
3572 };
3573 static const CostKindTblEntry AVX2CostTbl[] = {
3574 { ISD::ABS, MVT::v2i64, { 2, 4, 3, 5 } }, // VBLENDVPD(X,VPSUBQ(0,X),X)
3575 { ISD::ABS, MVT::v4i64, { 2, 4, 3, 5 } }, // VBLENDVPD(X,VPSUBQ(0,X),X)
3576 { ISD::ABS, MVT::v4i32, { 1, 1, 1, 1 } },
3577 { ISD::ABS, MVT::v8i32, { 1, 1, 1, 2 } },
3578 { ISD::ABS, MVT::v8i16, { 1, 1, 1, 1 } },
3579 { ISD::ABS, MVT::v16i16, { 1, 1, 1, 2 } },
3580 { ISD::ABS, MVT::v16i8, { 1, 1, 1, 1 } },
3581 { ISD::ABS, MVT::v32i8, { 1, 1, 1, 2 } },
3582 { ISD::BITREVERSE, MVT::v2i64, { 3 } },
3583 { ISD::BITREVERSE, MVT::v4i64, { 3 } },
3584 { ISD::BITREVERSE, MVT::v4i32, { 3 } },
3585 { ISD::BITREVERSE, MVT::v8i32, { 3 } },
3586 { ISD::BITREVERSE, MVT::v8i16, { 3 } },
3587 { ISD::BITREVERSE, MVT::v16i16, { 3 } },
3588 { ISD::BITREVERSE, MVT::v16i8, { 3 } },
3589 { ISD::BITREVERSE, MVT::v32i8, { 3 } },
3590 { ISD::BSWAP, MVT::v4i64, { 1 } },
3591 { ISD::BSWAP, MVT::v8i32, { 1 } },
3592 { ISD::BSWAP, MVT::v16i16, { 1 } },
3593 { ISD::CTLZ, MVT::v2i64, { 7, 18, 24, 25 } },
3594 { ISD::CTLZ, MVT::v4i64, { 14, 18, 24, 44 } },
3595 { ISD::CTLZ, MVT::v4i32, { 5, 16, 19, 20 } },
3596 { ISD::CTLZ, MVT::v8i32, { 10, 16, 19, 34 } },
3597 { ISD::CTLZ, MVT::v8i16, { 4, 13, 14, 15 } },
3598 { ISD::CTLZ, MVT::v16i16, { 6, 14, 14, 24 } },
3599 { ISD::CTLZ, MVT::v16i8, { 3, 12, 9, 10 } },
3600 { ISD::CTLZ, MVT::v32i8, { 4, 12, 9, 14 } },
3601 { ISD::CTPOP, MVT::v2i64, { 3, 9, 10, 10 } },
3602 { ISD::CTPOP, MVT::v4i64, { 4, 9, 10, 14 } },
3603 { ISD::CTPOP, MVT::v4i32, { 7, 12, 14, 14 } },
3604 { ISD::CTPOP, MVT::v8i32, { 7, 12, 14, 18 } },
3605 { ISD::CTPOP, MVT::v8i16, { 3, 7, 11, 11 } },
3606 { ISD::CTPOP, MVT::v16i16, { 6, 8, 11, 18 } },
3607 { ISD::CTPOP, MVT::v16i8, { 2, 5, 8, 8 } },
3608 { ISD::CTPOP, MVT::v32i8, { 3, 5, 8, 12 } },
3609 { ISD::CTTZ, MVT::v2i64, { 4, 11, 13, 13 } },
3610 { ISD::CTTZ, MVT::v4i64, { 5, 11, 13, 20 } },
3611 { ISD::CTTZ, MVT::v4i32, { 7, 14, 17, 17 } },
3612 { ISD::CTTZ, MVT::v8i32, { 7, 15, 17, 24 } },
3613 { ISD::CTTZ, MVT::v8i16, { 4, 9, 14, 14 } },
3614 { ISD::CTTZ, MVT::v16i16, { 6, 9, 14, 24 } },
3615 { ISD::CTTZ, MVT::v16i8, { 3, 7, 11, 11 } },
3616 { ISD::CTTZ, MVT::v32i8, { 5, 7, 11, 18 } },
3617 { ISD::SADDSAT, MVT::v16i16, { 1 } },
3618 { ISD::SADDSAT, MVT::v32i8, { 1 } },
3619 { ISD::SMAX, MVT::v2i64, { 2, 7, 2, 3 } },
3620 { ISD::SMAX, MVT::v4i64, { 2, 7, 2, 3 } },
3621 { ISD::SMAX, MVT::v8i32, { 1, 1, 1, 2 } },
3622 { ISD::SMAX, MVT::v16i16, { 1, 1, 1, 2 } },
3623 { ISD::SMAX, MVT::v32i8, { 1, 1, 1, 2 } },
3624 { ISD::SMIN, MVT::v2i64, { 2, 7, 2, 3 } },
3625 { ISD::SMIN, MVT::v4i64, { 2, 7, 2, 3 } },
3626 { ISD::SMIN, MVT::v8i32, { 1, 1, 1, 2 } },
3627 { ISD::SMIN, MVT::v16i16, { 1, 1, 1, 2 } },
3628 { ISD::SMIN, MVT::v32i8, { 1, 1, 1, 2 } },
3629 { ISD::SSUBSAT, MVT::v16i16, { 1 } },
3630 { ISD::SSUBSAT, MVT::v32i8, { 1 } },
3631 { ISD::UADDSAT, MVT::v16i16, { 1 } },
3632 { ISD::UADDSAT, MVT::v32i8, { 1 } },
3633 { ISD::UADDSAT, MVT::v8i32, { 3 } }, // not + pminud + paddd
3634 { ISD::UMAX, MVT::v2i64, { 2, 8, 5, 6 } },
3635 { ISD::UMAX, MVT::v4i64, { 2, 8, 5, 8 } },
3636 { ISD::UMAX, MVT::v8i32, { 1, 1, 1, 2 } },
3637 { ISD::UMAX, MVT::v16i16, { 1, 1, 1, 2 } },
3638 { ISD::UMAX, MVT::v32i8, { 1, 1, 1, 2 } },
3639 { ISD::UMIN, MVT::v2i64, { 2, 8, 5, 6 } },
3640 { ISD::UMIN, MVT::v4i64, { 2, 8, 5, 8 } },
3641 { ISD::UMIN, MVT::v8i32, { 1, 1, 1, 2 } },
3642 { ISD::UMIN, MVT::v16i16, { 1, 1, 1, 2 } },
3643 { ISD::UMIN, MVT::v32i8, { 1, 1, 1, 2 } },
3644 { ISD::USUBSAT, MVT::v16i16, { 1 } },
3645 { ISD::USUBSAT, MVT::v32i8, { 1 } },
3646 { ISD::USUBSAT, MVT::v8i32, { 2 } }, // pmaxud + psubd
3647 { ISD::FMAXNUM, MVT::v8f32, { 3 } }, // MAXPS + CMPUNORDPS + BLENDVPS
3648 { ISD::FMAXNUM, MVT::v4f64, { 3 } }, // MAXPD + CMPUNORDPD + BLENDVPD
3649 { ISD::FSQRT, MVT::f32, { 7, 15, 1, 1 } }, // vsqrtss
3650 { ISD::FSQRT, MVT::v4f32, { 7, 15, 1, 1 } }, // vsqrtps
3651 { ISD::FSQRT, MVT::v8f32, { 14, 21, 1, 3 } }, // vsqrtps
3652 { ISD::FSQRT, MVT::f64, { 14, 21, 1, 1 } }, // vsqrtsd
3653 { ISD::FSQRT, MVT::v2f64, { 14, 21, 1, 1 } }, // vsqrtpd
3654 { ISD::FSQRT, MVT::v4f64, { 28, 35, 1, 3 } }, // vsqrtpd
3655 };
3656 static const CostKindTblEntry AVX1CostTbl[] = {
3657 { ISD::ABS, MVT::v4i64, { 6, 8, 6, 12 } }, // VBLENDVPD(X,VPSUBQ(0,X),X)
3658 { ISD::ABS, MVT::v8i32, { 3, 6, 4, 5 } },
3659 { ISD::ABS, MVT::v16i16, { 3, 6, 4, 5 } },
3660 { ISD::ABS, MVT::v32i8, { 3, 6, 4, 5 } },
3661 { ISD::BITREVERSE, MVT::v4i64, { 12 } }, // 2 x 128-bit Op + extract/insert
3662 { ISD::BITREVERSE, MVT::v8i32, { 12 } }, // 2 x 128-bit Op + extract/insert
3663 { ISD::BITREVERSE, MVT::v16i16, { 12 } }, // 2 x 128-bit Op + extract/insert
3664 { ISD::BITREVERSE, MVT::v32i8, { 12 } }, // 2 x 128-bit Op + extract/insert
3665 { ISD::BSWAP, MVT::v4i64, { 4 } },
3666 { ISD::BSWAP, MVT::v8i32, { 4 } },
3667 { ISD::BSWAP, MVT::v16i16, { 4 } },
3668 { ISD::CTLZ, MVT::v4i64, { 29, 33, 49, 58 } }, // 2 x 128-bit Op + extract/insert
3669 { ISD::CTLZ, MVT::v2i64, { 14, 24, 24, 28 } },
3670 { ISD::CTLZ, MVT::v8i32, { 24, 28, 39, 48 } }, // 2 x 128-bit Op + extract/insert
3671 { ISD::CTLZ, MVT::v4i32, { 12, 20, 19, 23 } },
3672 { ISD::CTLZ, MVT::v16i16, { 19, 22, 29, 38 } }, // 2 x 128-bit Op + extract/insert
3673 { ISD::CTLZ, MVT::v8i16, { 9, 16, 14, 18 } },
3674 { ISD::CTLZ, MVT::v32i8, { 14, 15, 19, 28 } }, // 2 x 128-bit Op + extract/insert
3675 { ISD::CTLZ, MVT::v16i8, { 7, 12, 9, 13 } },
3676 { ISD::CTPOP, MVT::v4i64, { 14, 18, 19, 28 } }, // 2 x 128-bit Op + extract/insert
3677 { ISD::CTPOP, MVT::v2i64, { 7, 14, 10, 14 } },
3678 { ISD::CTPOP, MVT::v8i32, { 18, 24, 27, 36 } }, // 2 x 128-bit Op + extract/insert
3679 { ISD::CTPOP, MVT::v4i32, { 9, 20, 14, 18 } },
3680 { ISD::CTPOP, MVT::v16i16, { 16, 21, 22, 31 } }, // 2 x 128-bit Op + extract/insert
3681 { ISD::CTPOP, MVT::v8i16, { 8, 18, 11, 15 } },
3682 { ISD::CTPOP, MVT::v32i8, { 13, 15, 16, 25 } }, // 2 x 128-bit Op + extract/insert
3683 { ISD::CTPOP, MVT::v16i8, { 6, 12, 8, 12 } },
3684 { ISD::CTTZ, MVT::v4i64, { 17, 22, 24, 33 } }, // 2 x 128-bit Op + extract/insert
3685 { ISD::CTTZ, MVT::v2i64, { 9, 19, 13, 17 } },
3686 { ISD::CTTZ, MVT::v8i32, { 21, 27, 32, 41 } }, // 2 x 128-bit Op + extract/insert
3687 { ISD::CTTZ, MVT::v4i32, { 11, 24, 17, 21 } },
3688 { ISD::CTTZ, MVT::v16i16, { 18, 24, 27, 36 } }, // 2 x 128-bit Op + extract/insert
3689 { ISD::CTTZ, MVT::v8i16, { 9, 21, 14, 18 } },
3690 { ISD::CTTZ, MVT::v32i8, { 15, 18, 21, 30 } }, // 2 x 128-bit Op + extract/insert
3691 { ISD::CTTZ, MVT::v16i8, { 8, 16, 11, 15 } },
3692 { ISD::SADDSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
3693 { ISD::SADDSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
3694 { ISD::SMAX, MVT::v4i64, { 6, 9, 6, 12 } }, // 2 x 128-bit Op + extract/insert
3695 { ISD::SMAX, MVT::v2i64, { 3, 7, 2, 4 } },
3696 { ISD::SMAX, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3697 { ISD::SMAX, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3698 { ISD::SMAX, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3699 { ISD::SMIN, MVT::v4i64, { 6, 9, 6, 12 } }, // 2 x 128-bit Op + extract/insert
3700 { ISD::SMIN, MVT::v2i64, { 3, 7, 2, 3 } },
3701 { ISD::SMIN, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3702 { ISD::SMIN, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3703 { ISD::SMIN, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3704 { ISD::SSUBSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
3705 { ISD::SSUBSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
3706 { ISD::UADDSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
3707 { ISD::UADDSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
3708 { ISD::UADDSAT, MVT::v8i32, { 8 } }, // 2 x 128-bit Op + extract/insert
3709 { ISD::UMAX, MVT::v4i64, { 9, 10, 11, 17 } }, // 2 x 128-bit Op + extract/insert
3710 { ISD::UMAX, MVT::v2i64, { 4, 8, 5, 7 } },
3711 { ISD::UMAX, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3712 { ISD::UMAX, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3713 { ISD::UMAX, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3714 { ISD::UMIN, MVT::v4i64, { 9, 10, 11, 17 } }, // 2 x 128-bit Op + extract/insert
3715 { ISD::UMIN, MVT::v2i64, { 4, 8, 5, 7 } },
3716 { ISD::UMIN, MVT::v8i32, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3717 { ISD::UMIN, MVT::v16i16, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3718 { ISD::UMIN, MVT::v32i8, { 4, 6, 5, 6 } }, // 2 x 128-bit Op + extract/insert
3719 { ISD::USUBSAT, MVT::v16i16, { 4 } }, // 2 x 128-bit Op + extract/insert
3720 { ISD::USUBSAT, MVT::v32i8, { 4 } }, // 2 x 128-bit Op + extract/insert
3721 { ISD::USUBSAT, MVT::v8i32, { 6 } }, // 2 x 128-bit Op + extract/insert
3722 { ISD::FMAXNUM, MVT::f32, { 3 } }, // MAXSS + CMPUNORDSS + BLENDVPS
3723 { ISD::FMAXNUM, MVT::v4f32, { 3 } }, // MAXPS + CMPUNORDPS + BLENDVPS
3724 { ISD::FMAXNUM, MVT::v8f32, { 5 } }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
3725 { ISD::FMAXNUM, MVT::f64, { 3 } }, // MAXSD + CMPUNORDSD + BLENDVPD
3726 { ISD::FMAXNUM, MVT::v2f64, { 3 } }, // MAXPD + CMPUNORDPD + BLENDVPD
3727 { ISD::FMAXNUM, MVT::v4f64, { 5 } }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
3728 { ISD::FSQRT, MVT::f32, { 21, 21, 1, 1 } }, // vsqrtss
3729 { ISD::FSQRT, MVT::v4f32, { 21, 21, 1, 1 } }, // vsqrtps
3730 { ISD::FSQRT, MVT::v8f32, { 42, 42, 1, 3 } }, // vsqrtps
3731 { ISD::FSQRT, MVT::f64, { 27, 27, 1, 1 } }, // vsqrtsd
3732 { ISD::FSQRT, MVT::v2f64, { 27, 27, 1, 1 } }, // vsqrtpd
3733 { ISD::FSQRT, MVT::v4f64, { 54, 54, 1, 3 } }, // vsqrtpd
3734 };
3735 static const CostKindTblEntry GLMCostTbl[] = {
3736 { ISD::FSQRT, MVT::f32, { 19, 20, 1, 1 } }, // sqrtss
3737 { ISD::FSQRT, MVT::v4f32, { 37, 41, 1, 5 } }, // sqrtps
3738 { ISD::FSQRT, MVT::f64, { 34, 35, 1, 1 } }, // sqrtsd
3739 { ISD::FSQRT, MVT::v2f64, { 67, 71, 1, 5 } }, // sqrtpd
3740 };
3741 static const CostKindTblEntry SLMCostTbl[] = {
3742 { ISD::FSQRT, MVT::f32, { 20, 20, 1, 1 } }, // sqrtss
3743 { ISD::FSQRT, MVT::v4f32, { 40, 41, 1, 5 } }, // sqrtps
3744 { ISD::FSQRT, MVT::f64, { 35, 35, 1, 1 } }, // sqrtsd
3745 { ISD::FSQRT, MVT::v2f64, { 70, 71, 1, 5 } }, // sqrtpd
3746 };
3747 static const CostKindTblEntry SSE42CostTbl[] = {
3748 { ISD::USUBSAT, MVT::v4i32, { 2 } }, // pmaxud + psubd
3749 { ISD::UADDSAT, MVT::v4i32, { 3 } }, // not + pminud + paddd
3750 { ISD::FSQRT, MVT::f32, { 18, 18, 1, 1 } }, // Nehalem from http://www.agner.org/
3751 { ISD::FSQRT, MVT::v4f32, { 18, 18, 1, 1 } }, // Nehalem from http://www.agner.org/
3752 };
3753 static const CostKindTblEntry SSE41CostTbl[] = {
3754 { ISD::ABS, MVT::v2i64, { 3, 4, 3, 5 } }, // BLENDVPD(X,PSUBQ(0,X),X)
3755 { ISD::SMAX, MVT::v2i64, { 3, 7, 2, 3 } },
3756 { ISD::SMAX, MVT::v4i32, { 1, 1, 1, 1 } },
3757 { ISD::SMAX, MVT::v16i8, { 1, 1, 1, 1 } },
3758 { ISD::SMIN, MVT::v2i64, { 3, 7, 2, 3 } },
3759 { ISD::SMIN, MVT::v4i32, { 1, 1, 1, 1 } },
3760 { ISD::SMIN, MVT::v16i8, { 1, 1, 1, 1 } },
3761 { ISD::UMAX, MVT::v2i64, { 2, 11, 6, 7 } },
3762 { ISD::UMAX, MVT::v4i32, { 1, 1, 1, 1 } },
3763 { ISD::UMAX, MVT::v8i16, { 1, 1, 1, 1 } },
3764 { ISD::UMIN, MVT::v2i64, { 2, 11, 6, 7 } },
3765 { ISD::UMIN, MVT::v4i32, { 1, 1, 1, 1 } },
3766 { ISD::UMIN, MVT::v8i16, { 1, 1, 1, 1 } },
3767 };
3768 static const CostKindTblEntry SSSE3CostTbl[] = {
3769 { ISD::ABS, MVT::v4i32, { 1, 2, 1, 1 } },
3770 { ISD::ABS, MVT::v8i16, { 1, 2, 1, 1 } },
3771 { ISD::ABS, MVT::v16i8, { 1, 2, 1, 1 } },
3772 { ISD::BITREVERSE, MVT::v2i64, { 5 } },
3773 { ISD::BITREVERSE, MVT::v4i32, { 5 } },
3774 { ISD::BITREVERSE, MVT::v8i16, { 5 } },
3775 { ISD::BITREVERSE, MVT::v16i8, { 5 } },
3776 { ISD::BSWAP, MVT::v2i64, { 1 } },
3777 { ISD::BSWAP, MVT::v4i32, { 1 } },
3778 { ISD::BSWAP, MVT::v8i16, { 1 } },
3779 { ISD::CTLZ, MVT::v2i64, { 18, 28, 28, 35 } },
3780 { ISD::CTLZ, MVT::v4i32, { 15, 20, 22, 28 } },
3781 { ISD::CTLZ, MVT::v8i16, { 13, 17, 16, 22 } },
3782 { ISD::CTLZ, MVT::v16i8, { 11, 15, 10, 16 } },
3783 { ISD::CTPOP, MVT::v2i64, { 13, 19, 12, 18 } },
3784 { ISD::CTPOP, MVT::v4i32, { 18, 24, 16, 22 } },
3785 { ISD::CTPOP, MVT::v8i16, { 13, 18, 14, 20 } },
3786 { ISD::CTPOP, MVT::v16i8, { 11, 12, 10, 16 } },
3787 { ISD::CTTZ, MVT::v2i64, { 13, 25, 15, 22 } },
3788 { ISD::CTTZ, MVT::v4i32, { 18, 26, 19, 25 } },
3789 { ISD::CTTZ, MVT::v8i16, { 13, 20, 17, 23 } },
3790 { ISD::CTTZ, MVT::v16i8, { 11, 16, 13, 19 } }
3791 };
3792 static const CostKindTblEntry SSE2CostTbl[] = {
3793 { ISD::ABS, MVT::v2i64, { 3, 6, 5, 5 } },
3794 { ISD::ABS, MVT::v4i32, { 1, 4, 4, 4 } },
3795 { ISD::ABS, MVT::v8i16, { 1, 2, 3, 3 } },
3796 { ISD::ABS, MVT::v16i8, { 1, 2, 3, 3 } },
3797 { ISD::BITREVERSE, MVT::v2i64, { 29 } },
3798 { ISD::BITREVERSE, MVT::v4i32, { 27 } },
3799 { ISD::BITREVERSE, MVT::v8i16, { 27 } },
3800 { ISD::BITREVERSE, MVT::v16i8, { 20 } },
3801 { ISD::BSWAP, MVT::v2i64, { 7 } },
3802 { ISD::BSWAP, MVT::v4i32, { 7 } },
3803 { ISD::BSWAP, MVT::v8i16, { 7 } },
3804 { ISD::CTLZ, MVT::v2i64, { 10, 45, 36, 38 } },
3805 { ISD::CTLZ, MVT::v4i32, { 10, 45, 38, 40 } },
3806 { ISD::CTLZ, MVT::v8i16, { 9, 38, 32, 34 } },
3807 { ISD::CTLZ, MVT::v16i8, { 8, 39, 29, 32 } },
3808 { ISD::CTPOP, MVT::v2i64, { 12, 26, 16, 18 } },
3809 { ISD::CTPOP, MVT::v4i32, { 15, 29, 21, 23 } },
3810 { ISD::CTPOP, MVT::v8i16, { 13, 25, 18, 20 } },
3811 { ISD::CTPOP, MVT::v16i8, { 10, 21, 14, 16 } },
3812 { ISD::CTTZ, MVT::v2i64, { 14, 28, 19, 21 } },
3813 { ISD::CTTZ, MVT::v4i32, { 18, 31, 24, 26 } },
3814 { ISD::CTTZ, MVT::v8i16, { 16, 27, 21, 23 } },
3815 { ISD::CTTZ, MVT::v16i8, { 13, 23, 17, 19 } },
3816 { ISD::SADDSAT, MVT::v8i16, { 1 } },
3817 { ISD::SADDSAT, MVT::v16i8, { 1 } },
3818 { ISD::SMAX, MVT::v2i64, { 4, 8, 15, 15 } },
3819 { ISD::SMAX, MVT::v4i32, { 2, 4, 5, 5 } },
3820 { ISD::SMAX, MVT::v8i16, { 1, 1, 1, 1 } },
3821 { ISD::SMAX, MVT::v16i8, { 2, 4, 5, 5 } },
3822 { ISD::SMIN, MVT::v2i64, { 4, 8, 15, 15 } },
3823 { ISD::SMIN, MVT::v4i32, { 2, 4, 5, 5 } },
3824 { ISD::SMIN, MVT::v8i16, { 1, 1, 1, 1 } },
3825 { ISD::SMIN, MVT::v16i8, { 2, 4, 5, 5 } },
3826 { ISD::SSUBSAT, MVT::v8i16, { 1 } },
3827 { ISD::SSUBSAT, MVT::v16i8, { 1 } },
3828 { ISD::UADDSAT, MVT::v8i16, { 1 } },
3829 { ISD::UADDSAT, MVT::v16i8, { 1 } },
3830 { ISD::UMAX, MVT::v2i64, { 4, 8, 15, 15 } },
3831 { ISD::UMAX, MVT::v4i32, { 2, 5, 8, 8 } },
3832 { ISD::UMAX, MVT::v8i16, { 1, 3, 3, 3 } },
3833 { ISD::UMAX, MVT::v16i8, { 1, 1, 1, 1 } },
3834 { ISD::UMIN, MVT::v2i64, { 4, 8, 15, 15 } },
3835 { ISD::UMIN, MVT::v4i32, { 2, 5, 8, 8 } },
3836 { ISD::UMIN, MVT::v8i16, { 1, 3, 3, 3 } },
3837 { ISD::UMIN, MVT::v16i8, { 1, 1, 1, 1 } },
3838 { ISD::USUBSAT, MVT::v8i16, { 1 } },
3839 { ISD::USUBSAT, MVT::v16i8, { 1 } },
3840 { ISD::FMAXNUM, MVT::f64, { 4 } },
3841 { ISD::FMAXNUM, MVT::v2f64, { 4 } },
3842 { ISD::FSQRT, MVT::f64, { 32, 32, 1, 1 } }, // Nehalem from http://www.agner.org/
3843 { ISD::FSQRT, MVT::v2f64, { 32, 32, 1, 1 } }, // Nehalem from http://www.agner.org/
3844 };
3845 static const CostKindTblEntry SSE1CostTbl[] = {
3846 { ISD::FMAXNUM, MVT::f32, { 4 } },
3847 { ISD::FMAXNUM, MVT::v4f32, { 4 } },
3848 { ISD::FSQRT, MVT::f32, { 28, 30, 1, 2 } }, // Pentium III from http://www.agner.org/
3849 { ISD::FSQRT, MVT::v4f32, { 56, 56, 1, 2 } }, // Pentium III from http://www.agner.org/
3850 };
3851 static const CostKindTblEntry BMI64CostTbl[] = { // 64-bit targets
3852 { ISD::CTTZ, MVT::i64, { 1 } },
3853 };
3854 static const CostKindTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
3855 { ISD::CTTZ, MVT::i32, { 1 } },
3856 { ISD::CTTZ, MVT::i16, { 1 } },
3857 { ISD::CTTZ, MVT::i8, { 1 } },
3858 };
3859 static const CostKindTblEntry LZCNT64CostTbl[] = { // 64-bit targets
3860 { ISD::CTLZ, MVT::i64, { 1 } },
3861 };
3862 static const CostKindTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
3863 { ISD::CTLZ, MVT::i32, { 1 } },
3864 { ISD::CTLZ, MVT::i16, { 2 } },
3865 { ISD::CTLZ, MVT::i8, { 2 } },
3866 };
3867 static const CostKindTblEntry POPCNT64CostTbl[] = { // 64-bit targets
3868 { ISD::CTPOP, MVT::i64, { 1, 1, 1, 1 } }, // popcnt
3869 };
3870 static const CostKindTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
3871 { ISD::CTPOP, MVT::i32, { 1, 1, 1, 1 } }, // popcnt
3872 { ISD::CTPOP, MVT::i16, { 1, 1, 2, 2 } }, // popcnt(zext())
3873 { ISD::CTPOP, MVT::i8, { 1, 1, 2, 2 } }, // popcnt(zext())
3874 };
3875 static const CostKindTblEntry X64CostTbl[] = { // 64-bit targets
3876 { ISD::ABS, MVT::i64, { 1, 2, 3, 4 } }, // SUB+CMOV
3877 { ISD::BITREVERSE, MVT::i64, { 14 } },
3878 { ISD::BSWAP, MVT::i64, { 1 } },
3879 { ISD::CTLZ, MVT::i64, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
3880 { ISD::CTLZ_ZERO_UNDEF, MVT::i64,{ 1, 1, 1, 1 } }, // BSR+XOR
3881 { ISD::CTTZ, MVT::i64, { 3 } }, // TEST+BSF+CMOV/BRANCH
3882 { ISD::CTTZ_ZERO_UNDEF, MVT::i64,{ 1, 1, 1, 1 } }, // BSR
3883 { ISD::CTPOP, MVT::i64, { 10, 6, 19, 19 } },
3884 { ISD::ROTL, MVT::i64, { 2, 3, 1, 3 } },
3885 { ISD::ROTR, MVT::i64, { 2, 3, 1, 3 } },
3886 { ISD::FSHL, MVT::i64, { 4, 4, 1, 4 } },
3887 { ISD::SMAX, MVT::i64, { 1, 3, 2, 3 } },
3888 { ISD::SMIN, MVT::i64, { 1, 3, 2, 3 } },
3889 { ISD::UMAX, MVT::i64, { 1, 3, 2, 3 } },
3890 { ISD::UMIN, MVT::i64, { 1, 3, 2, 3 } },
3891 { ISD::SADDO, MVT::i64, { 1 } },
3892 { ISD::UADDO, MVT::i64, { 1 } },
3893 { ISD::UMULO, MVT::i64, { 2 } }, // mulq + seto
3894 };
3895 static const CostKindTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3896 { ISD::ABS, MVT::i32, { 1, 2, 3, 4 } }, // SUB+XOR+SRA or SUB+CMOV
3897 { ISD::ABS, MVT::i16, { 2, 2, 3, 4 } }, // SUB+XOR+SRA or SUB+CMOV
3898 { ISD::ABS, MVT::i8, { 2, 4, 4, 4 } }, // SUB+XOR+SRA
3899 { ISD::BITREVERSE, MVT::i32, { 14 } },
3900 { ISD::BITREVERSE, MVT::i16, { 14 } },
3901 { ISD::BITREVERSE, MVT::i8, { 11 } },
3902 { ISD::BSWAP, MVT::i32, { 1 } },
3903 { ISD::BSWAP, MVT::i16, { 1 } }, // ROL
3904 { ISD::CTLZ, MVT::i32, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
3905 { ISD::CTLZ, MVT::i16, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
3906 { ISD::CTLZ, MVT::i8, { 4 } }, // BSR+XOR or BSR+XOR+CMOV
3907 { ISD::CTLZ_ZERO_UNDEF, MVT::i32,{ 1, 1, 1, 1 } }, // BSR+XOR
3908 { ISD::CTLZ_ZERO_UNDEF, MVT::i16,{ 2, 2, 3, 3 } }, // BSR+XOR
3909 { ISD::CTLZ_ZERO_UNDEF, MVT::i8, { 2, 2, 3, 3 } }, // BSR+XOR
3910 { ISD::CTTZ, MVT::i32, { 3 } }, // TEST+BSF+CMOV/BRANCH
3911 { ISD::CTTZ, MVT::i16, { 3 } }, // TEST+BSF+CMOV/BRANCH
3912 { ISD::CTTZ, MVT::i8, { 3 } }, // TEST+BSF+CMOV/BRANCH
3913 { ISD::CTTZ_ZERO_UNDEF, MVT::i32,{ 1, 1, 1, 1 } }, // BSF
3914 { ISD::CTTZ_ZERO_UNDEF, MVT::i16,{ 2, 2, 1, 1 } }, // BSF
3915 { ISD::CTTZ_ZERO_UNDEF, MVT::i8, { 2, 2, 1, 1 } }, // BSF
3916 { ISD::CTPOP, MVT::i32, { 8, 7, 15, 15 } },
3917 { ISD::CTPOP, MVT::i16, { 9, 8, 17, 17 } },
3918 { ISD::CTPOP, MVT::i8, { 7, 6, 13, 13 } },
3919 { ISD::ROTL, MVT::i32, { 2, 3, 1, 3 } },
3920 { ISD::ROTL, MVT::i16, { 2, 3, 1, 3 } },
3921 { ISD::ROTL, MVT::i8, { 2, 3, 1, 3 } },
3922 { ISD::ROTR, MVT::i32, { 2, 3, 1, 3 } },
3923 { ISD::ROTR, MVT::i16, { 2, 3, 1, 3 } },
3924 { ISD::ROTR, MVT::i8, { 2, 3, 1, 3 } },
3925 { ISD::FSHL, MVT::i32, { 4, 4, 1, 4 } },
3926 { ISD::FSHL, MVT::i16, { 4, 4, 2, 5 } },
3927 { ISD::FSHL, MVT::i8, { 4, 4, 2, 5 } },
3928 { ISD::SMAX, MVT::i32, { 1, 2, 2, 3 } },
3929 { ISD::SMAX, MVT::i16, { 1, 4, 2, 4 } },
3930 { ISD::SMAX, MVT::i8, { 1, 4, 2, 4 } },
3931 { ISD::SMIN, MVT::i32, { 1, 2, 2, 3 } },
3932 { ISD::SMIN, MVT::i16, { 1, 4, 2, 4 } },
3933 { ISD::SMIN, MVT::i8, { 1, 4, 2, 4 } },
3934 { ISD::UMAX, MVT::i32, { 1, 2, 2, 3 } },
3935 { ISD::UMAX, MVT::i16, { 1, 4, 2, 4 } },
3936 { ISD::UMAX, MVT::i8, { 1, 4, 2, 4 } },
3937 { ISD::UMIN, MVT::i32, { 1, 2, 2, 3 } },
3938 { ISD::UMIN, MVT::i16, { 1, 4, 2, 4 } },
3939 { ISD::UMIN, MVT::i8, { 1, 4, 2, 4 } },
3940 { ISD::SADDO, MVT::i32, { 1 } },
3941 { ISD::SADDO, MVT::i16, { 1 } },
3942 { ISD::SADDO, MVT::i8, { 1 } },
3943 { ISD::UADDO, MVT::i32, { 1 } },
3944 { ISD::UADDO, MVT::i16, { 1 } },
3945 { ISD::UADDO, MVT::i8, { 1 } },
3946 { ISD::UMULO, MVT::i32, { 2 } }, // mul + seto
3947 { ISD::UMULO, MVT::i16, { 2 } },
3948 { ISD::UMULO, MVT::i8, { 2 } },
3949 };
3950
3951 Type *RetTy = ICA.getReturnType();
3952 Type *OpTy = RetTy;
3953 Intrinsic::ID IID = ICA.getID();
3954 unsigned ISD = ISD::DELETED_NODE;
3955 switch (IID) {
3956 default:
3957 break;
3958 case Intrinsic::abs:
3959 ISD = ISD::ABS;
3960 break;
3961 case Intrinsic::bitreverse:
3962 ISD = ISD::BITREVERSE;
3963 break;
3964 case Intrinsic::bswap:
3965 ISD = ISD::BSWAP;
3966 break;
3967 case Intrinsic::ctlz:
3968 ISD = ISD::CTLZ;
3969 break;
3970 case Intrinsic::ctpop:
3971 ISD = ISD::CTPOP;
3972 break;
3973 case Intrinsic::cttz:
3974 ISD = ISD::CTTZ;
3975 break;
3976 case Intrinsic::fshl:
3977 ISD = ISD::FSHL;
3978 if (!ICA.isTypeBasedOnly()) {
3979 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
3980 if (Args[0] == Args[1])
3981 ISD = ISD::ROTL;
3982 }
3983 break;
3984 case Intrinsic::fshr:
3985 // FSHR has same costs so don't duplicate.
3986 ISD = ISD::FSHL;
3987 if (!ICA.isTypeBasedOnly()) {
3988 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
3989 if (Args[0] == Args[1])
3990 ISD = ISD::ROTR;
3991 }
3992 break;
3993 case Intrinsic::maxnum:
3994 case Intrinsic::minnum:
3995 // FMINNUM has same costs so don't duplicate.
3996 ISD = ISD::FMAXNUM;
3997 break;
3998 case Intrinsic::sadd_sat:
3999 ISD = ISD::SADDSAT;
4000 break;
4001 case Intrinsic::smax:
4002 ISD = ISD::SMAX;
4003 break;
4004 case Intrinsic::smin:
4005 ISD = ISD::SMIN;
4006 break;
4007 case Intrinsic::ssub_sat:
4008 ISD = ISD::SSUBSAT;
4009 break;
4010 case Intrinsic::uadd_sat:
4011 ISD = ISD::UADDSAT;
4012 break;
4013 case Intrinsic::umax:
4014 ISD = ISD::UMAX;
4015 break;
4016 case Intrinsic::umin:
4017 ISD = ISD::UMIN;
4018 break;
4019 case Intrinsic::usub_sat:
4020 ISD = ISD::USUBSAT;
4021 break;
4022 case Intrinsic::sqrt:
4023 ISD = ISD::FSQRT;
4024 break;
4025 case Intrinsic::sadd_with_overflow:
4026 case Intrinsic::ssub_with_overflow:
4027 // SSUBO has same costs so don't duplicate.
4028 ISD = ISD::SADDO;
4029 OpTy = RetTy->getContainedType(0);
4030 break;
4031 case Intrinsic::uadd_with_overflow:
4032 case Intrinsic::usub_with_overflow:
4033 // USUBO has same costs so don't duplicate.
4034 ISD = ISD::UADDO;
4035 OpTy = RetTy->getContainedType(0);
4036 break;
4037 case Intrinsic::umul_with_overflow:
4038 case Intrinsic::smul_with_overflow:
4039 // SMULO has same costs so don't duplicate.
4040 ISD = ISD::UMULO;
4041 OpTy = RetTy->getContainedType(0);
4042 break;
4043 }
4044
4045 if (ISD != ISD::DELETED_NODE) {
4046 // Legalize the type.
4047 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(OpTy);
4048 MVT MTy = LT.second;
4049
4050 // Attempt to lookup cost.
4051 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
4052 MTy.isVector()) {
4053 // With PSHUFB the code is very similar for all types. If we have integer
4054 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
4055 // we also need a PSHUFB.
4056 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
4057
4058 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
4059 // instructions. We also need an extract and an insert.
4060 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
4061 (ST->hasBWI() && MTy.is512BitVector())))
4062 Cost = Cost * 2 + 2;
4063
4064 return LT.first * Cost;
4065 }
4066
4067 // Without BMI/LZCNT see if we're only looking for a *_ZERO_UNDEF cost.
4068 if (((ISD == ISD::CTTZ && !ST->hasBMI()) ||
4069 (ISD == ISD::CTLZ && !ST->hasLZCNT())) &&
4070 !MTy.isVector() && !ICA.isTypeBasedOnly()) {
4071 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
4072 if (auto *Cst = dyn_cast<ConstantInt>(Args[1]))
4073 if (Cst->isAllOnesValue())
4074 ISD = ISD == ISD::CTTZ ? ISD::CTTZ_ZERO_UNDEF : ISD::CTLZ_ZERO_UNDEF;
4075 }
4076
4077 // FSQRT is a single instruction.
4078 if (ISD == ISD::FSQRT && CostKind == TTI::TCK_CodeSize)
4079 return LT.first;
4080
4081 auto adjustTableCost = [](int ISD, unsigned Cost,
4082 InstructionCost LegalizationCost,
4083 FastMathFlags FMF) {
4084 // If there are no NANs to deal with, then these are reduced to a
4085 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
4086 // assume is used in the non-fast case.
4087 if (ISD == ISD::FMAXNUM || ISD == ISD::FMINNUM) {
4088 if (FMF.noNaNs())
4089 return LegalizationCost * 1;
4090 }
4091 return LegalizationCost * (int)Cost;
4092 };
4093
4094 if (ST->useGLMDivSqrtCosts())
4095 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
4096 if (auto KindCost = Entry->Cost[CostKind])
4097 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4098 ICA.getFlags());
4099
4100 if (ST->useSLMArithCosts())
4101 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
4102 if (auto KindCost = Entry->Cost[CostKind])
4103 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4104 ICA.getFlags());
4105
4106 if (ST->hasVBMI2())
4107 if (const auto *Entry = CostTableLookup(AVX512VBMI2CostTbl, ISD, MTy))
4108 if (auto KindCost = Entry->Cost[CostKind])
4109 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4110 ICA.getFlags());
4111
4112 if (ST->hasBITALG())
4113 if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy))
4114 if (auto KindCost = Entry->Cost[CostKind])
4115 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4116 ICA.getFlags());
4117
4118 if (ST->hasVPOPCNTDQ())
4119 if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy))
4120 if (auto KindCost = Entry->Cost[CostKind])
4121 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4122 ICA.getFlags());
4123
4124 if (ST->hasCDI())
4125 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
4126 if (auto KindCost = Entry->Cost[CostKind])
4127 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4128 ICA.getFlags());
4129
4130 if (ST->hasBWI())
4131 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
4132 if (auto KindCost = Entry->Cost[CostKind])
4133 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4134 ICA.getFlags());
4135
4136 if (ST->hasAVX512())
4137 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
4138 if (auto KindCost = Entry->Cost[CostKind])
4139 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4140 ICA.getFlags());
4141
4142 if (ST->hasXOP())
4143 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
4144 if (auto KindCost = Entry->Cost[CostKind])
4145 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4146 ICA.getFlags());
4147
4148 if (ST->hasAVX2())
4149 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
4150 if (auto KindCost = Entry->Cost[CostKind])
4151 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4152 ICA.getFlags());
4153
4154 if (ST->hasAVX())
4155 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
4156 if (auto KindCost = Entry->Cost[CostKind])
4157 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4158 ICA.getFlags());
4159
4160 if (ST->hasSSE42())
4161 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
4162 if (auto KindCost = Entry->Cost[CostKind])
4163 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4164 ICA.getFlags());
4165
4166 if (ST->hasSSE41())
4167 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
4168 if (auto KindCost = Entry->Cost[CostKind])
4169 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4170 ICA.getFlags());
4171
4172 if (ST->hasSSSE3())
4173 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
4174 if (auto KindCost = Entry->Cost[CostKind])
4175 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4176 ICA.getFlags());
4177
4178 if (ST->hasSSE2())
4179 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
4180 if (auto KindCost = Entry->Cost[CostKind])
4181 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4182 ICA.getFlags());
4183
4184 if (ST->hasSSE1())
4185 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
4186 if (auto KindCost = Entry->Cost[CostKind])
4187 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4188 ICA.getFlags());
4189
4190 if (ST->hasBMI()) {
4191 if (ST->is64Bit())
4192 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
4193 if (auto KindCost = Entry->Cost[CostKind])
4194 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4195 ICA.getFlags());
4196
4197 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
4198 if (auto KindCost = Entry->Cost[CostKind])
4199 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4200 ICA.getFlags());
4201 }
4202
4203 if (ST->hasLZCNT()) {
4204 if (ST->is64Bit())
4205 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
4206 if (auto KindCost = Entry->Cost[CostKind])
4207 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4208 ICA.getFlags());
4209
4210 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
4211 if (auto KindCost = Entry->Cost[CostKind])
4212 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4213 ICA.getFlags());
4214 }
4215
4216 if (ST->hasPOPCNT()) {
4217 if (ST->is64Bit())
4218 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
4219 if (auto KindCost = Entry->Cost[CostKind])
4220 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4221 ICA.getFlags());
4222
4223 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
4224 if (auto KindCost = Entry->Cost[CostKind])
4225 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4226 ICA.getFlags());
4227 }
4228
4229 if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
4230 if (const Instruction *II = ICA.getInst()) {
4231 if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
4232 return TTI::TCC_Free;
4233 if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
4234 if (LI->hasOneUse())
4235 return TTI::TCC_Free;
4236 }
4237 }
4238 }
4239
4240 if (ST->is64Bit())
4241 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
4242 if (auto KindCost = Entry->Cost[CostKind])
4243 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4244 ICA.getFlags());
4245
4246 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
4247 if (auto KindCost = Entry->Cost[CostKind])
4248 return adjustTableCost(Entry->ISD, KindCost.value(), LT.first,
4249 ICA.getFlags());
4250 }
4251
4252 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
4253}
4254
4255InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
4256 unsigned Index) {
4257 static const CostTblEntry SLMCostTbl[] = {
4258 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 },
4259 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 },
4260 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 },
4261 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 }
4262 };
4263
4264 assert(Val->isVectorTy() && "This must be a vector type")(static_cast <bool> (Val->isVectorTy() && "This must be a vector type"
) ? void (0) : __assert_fail ("Val->isVectorTy() && \"This must be a vector type\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4264, __extension__
__PRETTY_FUNCTION__))
;
41
Assuming the condition is true
42
'?' condition is true
4265 Type *ScalarType = Val->getScalarType();
4266 InstructionCost RegisterFileMoveCost = 0;
4267 TTI::TargetCostKind CostKind = TTI::TargetCostKind::TCK_RecipThroughput;
4268
4269 // Non-immediate extraction/insertion can be handled as a sequence of
4270 // aliased loads+stores via the stack.
4271 if (Index == -1U && (Opcode == Instruction::ExtractElement ||
4272 Opcode == Instruction::InsertElement)) {
4273 // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns:
4274 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
4275
4276 // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling.
4277 assert(isa<FixedVectorType>(Val) && "Fixed vector type expected")(static_cast <bool> (isa<FixedVectorType>(Val) &&
"Fixed vector type expected") ? void (0) : __assert_fail ("isa<FixedVectorType>(Val) && \"Fixed vector type expected\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4277, __extension__
__PRETTY_FUNCTION__))
;
4278 Align VecAlign = DL.getPrefTypeAlign(Val);
4279 Align SclAlign = DL.getPrefTypeAlign(ScalarType);
4280
4281 // Extract - store vector to stack, load scalar.
4282 if (Opcode == Instruction::ExtractElement) {
4283 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, CostKind) +
4284 getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0,
4285 CostKind);
4286 }
4287 // Insert - store vector to stack, store scalar, load vector.
4288 if (Opcode == Instruction::InsertElement) {
4289 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0, CostKind) +
4290 getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0,
4291 CostKind) +
4292 getMemoryOpCost(Instruction::Load, Val, VecAlign, 0, CostKind);
4293 }
4294 }
4295
4296 if (Index != -1U && (Opcode
42.1
'Opcode' is equal to ExtractElement
42.1
'Opcode' is equal to ExtractElement
== Instruction::ExtractElement ||
4297 Opcode == Instruction::InsertElement)) {
4298 // Extraction of vXi1 elements are now efficiently handled by MOVMSK.
4299 if (Opcode
42.2
'Opcode' is equal to ExtractElement
42.2
'Opcode' is equal to ExtractElement
== Instruction::ExtractElement &&
4300 ScalarType->getScalarSizeInBits() == 1 &&
43
Assuming the condition is false
4301 cast<FixedVectorType>(Val)->getNumElements() > 1)
4302 return 1;
4303
4304 // Legalize the type.
4305 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Val);
4306
4307 // This type is legalized to a scalar type.
4308 if (!LT.second.isVector())
44
Assuming the condition is false
45
Taking false branch
4309 return 0;
4310
4311 // The type may be split. Normalize the index to the new type.
4312 unsigned SizeInBits = LT.second.getSizeInBits();
4313 unsigned NumElts = LT.second.getVectorNumElements();
4314 unsigned SubNumElts = NumElts;
4315 Index = Index % NumElts;
4316
4317 // For >128-bit vectors, we need to extract higher 128-bit subvectors.
4318 // For inserts, we also need to insert the subvector back.
4319 if (SizeInBits > 128) {
46
Assuming 'SizeInBits' is > 128
4320 assert((SizeInBits % 128) == 0 && "Illegal vector")(static_cast <bool> ((SizeInBits % 128) == 0 &&
"Illegal vector") ? void (0) : __assert_fail ("(SizeInBits % 128) == 0 && \"Illegal vector\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4320, __extension__
__PRETTY_FUNCTION__))
;
47
Taking true branch
48
Assuming the condition is true
49
'?' condition is true
4321 unsigned NumSubVecs = SizeInBits / 128;
4322 SubNumElts = NumElts / NumSubVecs;
50
Value assigned to 'SubNumElts'
4323 if (SubNumElts <= Index) {
51
Assuming 'SubNumElts' is <= 'Index'
52
Taking true branch
4324 RegisterFileMoveCost += (Opcode
52.1
'Opcode' is not equal to InsertElement
52.1
'Opcode' is not equal to InsertElement
== Instruction::InsertElement ? 2 : 1);
53
'?' condition is false
4325 Index %= SubNumElts;
54
Division by zero
4326 }
4327 }
4328
4329 if (Index == 0) {
4330 // Floating point scalars are already located in index #0.
4331 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
4332 // true for all.
4333 if (ScalarType->isFloatingPointTy())
4334 return RegisterFileMoveCost;
4335
4336 // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
4337 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
4338 return 1 + RegisterFileMoveCost;
4339 }
4340
4341 int ISD = TLI->InstructionOpcodeToISD(Opcode);
4342 assert(ISD && "Unexpected vector opcode")(static_cast <bool> (ISD && "Unexpected vector opcode"
) ? void (0) : __assert_fail ("ISD && \"Unexpected vector opcode\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4342, __extension__
__PRETTY_FUNCTION__))
;
4343 MVT MScalarTy = LT.second.getScalarType();
4344 if (ST->useSLMArithCosts())
4345 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
4346 return Entry->Cost + RegisterFileMoveCost;
4347
4348 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
4349 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
4350 (MScalarTy.isInteger() && ST->hasSSE41()))
4351 return 1 + RegisterFileMoveCost;
4352
4353 // Assume insertps is relatively cheap on all targets.
4354 if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
4355 Opcode == Instruction::InsertElement)
4356 return 1 + RegisterFileMoveCost;
4357
4358 // For extractions we just need to shuffle the element to index 0, which
4359 // should be very cheap (assume cost = 1). For insertions we need to shuffle
4360 // the elements to its destination. In both cases we must handle the
4361 // subvector move(s).
4362 // If the vector type is already less than 128-bits then don't reduce it.
4363 // TODO: Under what circumstances should we shuffle using the full width?
4364 InstructionCost ShuffleCost = 1;
4365 if (Opcode == Instruction::InsertElement) {
4366 auto *SubTy = cast<VectorType>(Val);
4367 EVT VT = TLI->getValueType(DL, Val);
4368 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
4369 SubTy = FixedVectorType::get(ScalarType, SubNumElts);
4370 ShuffleCost = getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, CostKind,
4371 0, SubTy);
4372 }
4373 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
4374 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
4375 }
4376
4377 // Add to the base cost if we know that the extracted element of a vector is
4378 // destined to be moved to and used in the integer register file.
4379 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
4380 RegisterFileMoveCost += 1;
4381
4382 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
4383}
4384
4385InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
4386 const APInt &DemandedElts,
4387 bool Insert,
4388 bool Extract) {
4389 assert(DemandedElts.getBitWidth() ==(static_cast <bool> (DemandedElts.getBitWidth() == cast
<FixedVectorType>(Ty)->getNumElements() && "Vector size mismatch"
) ? void (0) : __assert_fail ("DemandedElts.getBitWidth() == cast<FixedVectorType>(Ty)->getNumElements() && \"Vector size mismatch\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4391, __extension__
__PRETTY_FUNCTION__))
20
'Ty' is a 'CastReturnType'
21
Assuming the condition is true
22
'?' condition is true
4390 cast<FixedVectorType>(Ty)->getNumElements() &&(static_cast <bool> (DemandedElts.getBitWidth() == cast
<FixedVectorType>(Ty)->getNumElements() && "Vector size mismatch"
) ? void (0) : __assert_fail ("DemandedElts.getBitWidth() == cast<FixedVectorType>(Ty)->getNumElements() && \"Vector size mismatch\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4391, __extension__
__PRETTY_FUNCTION__))
4391 "Vector size mismatch")(static_cast <bool> (DemandedElts.getBitWidth() == cast
<FixedVectorType>(Ty)->getNumElements() && "Vector size mismatch"
) ? void (0) : __assert_fail ("DemandedElts.getBitWidth() == cast<FixedVectorType>(Ty)->getNumElements() && \"Vector size mismatch\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4391, __extension__
__PRETTY_FUNCTION__))
;
4392
4393 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
4394 MVT MScalarTy = LT.second.getScalarType();
4395 unsigned SizeInBits = LT.second.getSizeInBits();
4396 TTI::TargetCostKind CostKind = TTI::TargetCostKind::TCK_RecipThroughput;
4397 InstructionCost Cost = 0;
4398
4399 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
4400 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
4401 if (Insert
22.1
'Insert' is false
22.1
'Insert' is false
) {
23
Taking false branch
4402 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
4403 (MScalarTy.isInteger() && ST->hasSSE41()) ||
4404 (MScalarTy == MVT::f32 && ST->hasSSE41())) {
4405 // For types we can insert directly, insertion into 128-bit sub vectors is
4406 // cheap, followed by a cheap chain of concatenations.
4407 if (SizeInBits <= 128) {
4408 Cost +=
4409 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
4410 } else {
4411 // In each 128-lane, if at least one index is demanded but not all
4412 // indices are demanded and this 128-lane is not the first 128-lane of
4413 // the legalized-vector, then this 128-lane needs a extracti128; If in
4414 // each 128-lane, there is at least one demanded index, this 128-lane
4415 // needs a inserti128.
4416
4417 // The following cases will help you build a better understanding:
4418 // Assume we insert several elements into a v8i32 vector in avx2,
4419 // Case#1: inserting into 1th index needs vpinsrd + inserti128.
4420 // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
4421 // inserti128.
4422 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
4423 const int CostValue = *LT.first.getValue();
4424 assert(CostValue >= 0 && "Negative cost!")(static_cast <bool> (CostValue >= 0 && "Negative cost!"
) ? void (0) : __assert_fail ("CostValue >= 0 && \"Negative cost!\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4424, __extension__
__PRETTY_FUNCTION__))
;
4425 unsigned Num128Lanes = SizeInBits / 128 * CostValue;
4426 unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
4427 APInt WidenedDemandedElts = DemandedElts.zext(NumElts);
4428 unsigned Scale = NumElts / Num128Lanes;
4429 // We iterate each 128-lane, and check if we need a
4430 // extracti128/inserti128 for this 128-lane.
4431 for (unsigned I = 0; I < NumElts; I += Scale) {
4432 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
4433 APInt MaskedDE = Mask & WidenedDemandedElts;
4434 unsigned Population = MaskedDE.countPopulation();
4435 Cost += (Population > 0 && Population != Scale &&
4436 I % LT.second.getVectorNumElements() != 0);
4437 Cost += Population > 0;
4438 }
4439 Cost += DemandedElts.countPopulation();
4440
4441 // For vXf32 cases, insertion into the 0'th index in each v4f32
4442 // 128-bit vector is free.
4443 // NOTE: This assumes legalization widens vXf32 vectors.
4444 if (MScalarTy == MVT::f32)
4445 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
4446 i < e; i += 4)
4447 if (DemandedElts[i])
4448 Cost--;
4449 }
4450 } else if (LT.second.isVector()) {
4451 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
4452 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
4453 // series of UNPCK followed by CONCAT_VECTORS - all of these can be
4454 // considered cheap.
4455 if (Ty->isIntOrIntVectorTy())
4456 Cost += DemandedElts.countPopulation();
4457
4458 // Get the smaller of the legalized or original pow2-extended number of
4459 // vector elements, which represents the number of unpacks we'll end up
4460 // performing.
4461 unsigned NumElts = LT.second.getVectorNumElements();
4462 unsigned Pow2Elts =
4463 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
4464 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
4465 }
4466 }
4467
4468 if (Extract
23.1
'Extract' is true
23.1
'Extract' is true
) {
4469 // vXi1 can be efficiently extracted with MOVMSK.
4470 // TODO: AVX512 predicate mask handling.
4471 // NOTE: This doesn't work well for roundtrip scalarization.
4472 if (!Insert
23.2
'Insert' is false
23.2
'Insert' is false
&& Ty->getScalarSizeInBits() == 1 && !ST->hasAVX512()) {
24
Assuming the condition is false
4473 unsigned NumElts = cast<FixedVectorType>(Ty)->getNumElements();
4474 unsigned MaxElts = ST->hasAVX2() ? 32 : 16;
4475 unsigned MOVMSKCost = (NumElts + MaxElts - 1) / MaxElts;
4476 return MOVMSKCost;
4477 }
4478
4479 if (LT.second.isVector()) {
25
Taking true branch
4480 int CostValue = *LT.first.getValue();
4481 assert(CostValue >= 0 && "Negative cost!")(static_cast <bool> (CostValue >= 0 && "Negative cost!"
) ? void (0) : __assert_fail ("CostValue >= 0 && \"Negative cost!\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4481, __extension__
__PRETTY_FUNCTION__))
;
26
'?' condition is true
4482
4483 unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
4484 assert(NumElts >= DemandedElts.getBitWidth() &&(static_cast <bool> (NumElts >= DemandedElts.getBitWidth
() && "Vector has been legalized to smaller element count"
) ? void (0) : __assert_fail ("NumElts >= DemandedElts.getBitWidth() && \"Vector has been legalized to smaller element count\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4485, __extension__
__PRETTY_FUNCTION__))
27
Assuming the condition is true
28
'?' condition is true
4485 "Vector has been legalized to smaller element count")(static_cast <bool> (NumElts >= DemandedElts.getBitWidth
() && "Vector has been legalized to smaller element count"
) ? void (0) : __assert_fail ("NumElts >= DemandedElts.getBitWidth() && \"Vector has been legalized to smaller element count\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4485, __extension__
__PRETTY_FUNCTION__))
;
4486
4487 // If we're extracting elements from a 128-bit subvector lane, we only need
4488 // to extract each lane once, not for every element.
4489 if (SizeInBits > 128) {
29
Assuming 'SizeInBits' is > 128
4490 assert((SizeInBits % 128) == 0 && "Illegal vector")(static_cast <bool> ((SizeInBits % 128) == 0 &&
"Illegal vector") ? void (0) : __assert_fail ("(SizeInBits % 128) == 0 && \"Illegal vector\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4490, __extension__
__PRETTY_FUNCTION__))
;
30
Taking true branch
31
Assuming the condition is true
32
'?' condition is true
4491 unsigned NumLegal128Lanes = SizeInBits / 128;
4492 unsigned Num128Lanes = NumLegal128Lanes * CostValue;
4493 APInt WidenedDemandedElts = DemandedElts.zext(NumElts);
4494 unsigned Scale = NumElts / Num128Lanes;
4495
4496 // Add cost for each demanded 128-bit subvector extraction.
4497 // Luckily this is a lot easier than for insertion.
4498 APInt DemandedUpper128Lanes =
4499 APIntOps::ScaleBitMask(WidenedDemandedElts, Num128Lanes);
4500 auto *Ty128 = FixedVectorType::get(Ty->getElementType(), Scale);
4501 for (unsigned I = 0; I
32.1
'I' is not equal to 'Num128Lanes'
32.1
'I' is not equal to 'Num128Lanes'
!= Num128Lanes
; ++I)
33
Loop condition is true. Entering loop body
35
Assuming 'I' is equal to 'Num128Lanes'
36
Loop condition is false. Execution continues on line 4508
4502 if (DemandedUpper128Lanes[I])
34
Taking false branch
4503 Cost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, CostKind,
4504 I * Scale, Ty128);
4505
4506 // Add all the demanded element extractions together, but adjust the
4507 // index to use the equivalent of the bottom 128 bit lane.
4508 for (unsigned I = 0; I != NumElts; ++I)
37
Assuming 'I' is not equal to 'NumElts'
38
Loop condition is true. Entering loop body
4509 if (WidenedDemandedElts[I]) {
39
Taking true branch
4510 unsigned Idx = I % Scale;
4511 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, Idx);
40
Calling 'X86TTIImpl::getVectorInstrCost'
4512 }
4513
4514 return Cost;
4515 }
4516 }
4517
4518 // Fallback to default extraction.
4519 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
4520 }
4521
4522 return Cost;
4523}
4524
4525InstructionCost
4526X86TTIImpl::getReplicationShuffleCost(Type *EltTy, int ReplicationFactor,
4527 int VF, const APInt &DemandedDstElts,
4528 TTI::TargetCostKind CostKind) {
4529 const unsigned EltTyBits = DL.getTypeSizeInBits(EltTy);
4530 // We don't differentiate element types here, only element bit width.
4531 EltTy = IntegerType::getIntNTy(EltTy->getContext(), EltTyBits);
4532
4533 auto bailout = [&]() {
4534 return BaseT::getReplicationShuffleCost(EltTy, ReplicationFactor, VF,
4535 DemandedDstElts, CostKind);
4536 };
4537
4538 // For now, only deal with AVX512 cases.
4539 if (!ST->hasAVX512())
4540 return bailout();
4541
4542 // Do we have a native shuffle for this element type, or should we promote?
4543 unsigned PromEltTyBits = EltTyBits;
4544 switch (EltTyBits) {
4545 case 32:
4546 case 64:
4547 break; // AVX512F.
4548 case 16:
4549 if (!ST->hasBWI())
4550 PromEltTyBits = 32; // promote to i32, AVX512F.
4551 break; // AVX512BW
4552 case 8:
4553 if (!ST->hasVBMI())
4554 PromEltTyBits = 32; // promote to i32, AVX512F.
4555 break; // AVX512VBMI
4556 case 1:
4557 // There is no support for shuffling i1 elements. We *must* promote.
4558 if (ST->hasBWI()) {
4559 if (ST->hasVBMI())
4560 PromEltTyBits = 8; // promote to i8, AVX512VBMI.
4561 else
4562 PromEltTyBits = 16; // promote to i16, AVX512BW.
4563 break;
4564 }
4565 if (ST->hasDQI()) {
4566 PromEltTyBits = 32; // promote to i32, AVX512F.
4567 break;
4568 }
4569 return bailout();
4570 default:
4571 return bailout();
4572 }
4573 auto *PromEltTy = IntegerType::getIntNTy(EltTy->getContext(), PromEltTyBits);
4574
4575 auto *SrcVecTy = FixedVectorType::get(EltTy, VF);
4576 auto *PromSrcVecTy = FixedVectorType::get(PromEltTy, VF);
4577
4578 int NumDstElements = VF * ReplicationFactor;
4579 auto *PromDstVecTy = FixedVectorType::get(PromEltTy, NumDstElements);
4580 auto *DstVecTy = FixedVectorType::get(EltTy, NumDstElements);
4581
4582 // Legalize the types.
4583 MVT LegalSrcVecTy = getTypeLegalizationCost(SrcVecTy).second;
4584 MVT LegalPromSrcVecTy = getTypeLegalizationCost(PromSrcVecTy).second;
4585 MVT LegalPromDstVecTy = getTypeLegalizationCost(PromDstVecTy).second;
4586 MVT LegalDstVecTy = getTypeLegalizationCost(DstVecTy).second;
4587 // They should have legalized into vector types.
4588 if (!LegalSrcVecTy.isVector() || !LegalPromSrcVecTy.isVector() ||
4589 !LegalPromDstVecTy.isVector() || !LegalDstVecTy.isVector())
4590 return bailout();
4591
4592 if (PromEltTyBits != EltTyBits) {
4593 // If we have to perform the shuffle with wider elt type than our data type,
4594 // then we will first need to anyext (we don't care about the new bits)
4595 // the source elements, and then truncate Dst elements.
4596 InstructionCost PromotionCost;
4597 PromotionCost += getCastInstrCost(
4598 Instruction::SExt, /*Dst=*/PromSrcVecTy, /*Src=*/SrcVecTy,
4599 TargetTransformInfo::CastContextHint::None, CostKind);
4600 PromotionCost +=
4601 getCastInstrCost(Instruction::Trunc, /*Dst=*/DstVecTy,
4602 /*Src=*/PromDstVecTy,
4603 TargetTransformInfo::CastContextHint::None, CostKind);
4604 return PromotionCost + getReplicationShuffleCost(PromEltTy,
4605 ReplicationFactor, VF,
4606 DemandedDstElts, CostKind);
4607 }
4608
4609 assert(LegalSrcVecTy.getScalarSizeInBits() == EltTyBits &&(static_cast <bool> (LegalSrcVecTy.getScalarSizeInBits(
) == EltTyBits && LegalSrcVecTy.getScalarType() == LegalDstVecTy
.getScalarType() && "We expect that the legalization doesn't affect the element width, "
"doesn't coalesce/split elements.") ? void (0) : __assert_fail
("LegalSrcVecTy.getScalarSizeInBits() == EltTyBits && LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() && \"We expect that the legalization doesn't affect the element width, \" \"doesn't coalesce/split elements.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4612, __extension__
__PRETTY_FUNCTION__))
4610 LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() &&(static_cast <bool> (LegalSrcVecTy.getScalarSizeInBits(
) == EltTyBits && LegalSrcVecTy.getScalarType() == LegalDstVecTy
.getScalarType() && "We expect that the legalization doesn't affect the element width, "
"doesn't coalesce/split elements.") ? void (0) : __assert_fail
("LegalSrcVecTy.getScalarSizeInBits() == EltTyBits && LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() && \"We expect that the legalization doesn't affect the element width, \" \"doesn't coalesce/split elements.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4612, __extension__
__PRETTY_FUNCTION__))
4611 "We expect that the legalization doesn't affect the element width, "(static_cast <bool> (LegalSrcVecTy.getScalarSizeInBits(
) == EltTyBits && LegalSrcVecTy.getScalarType() == LegalDstVecTy
.getScalarType() && "We expect that the legalization doesn't affect the element width, "
"doesn't coalesce/split elements.") ? void (0) : __assert_fail
("LegalSrcVecTy.getScalarSizeInBits() == EltTyBits && LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() && \"We expect that the legalization doesn't affect the element width, \" \"doesn't coalesce/split elements.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4612, __extension__
__PRETTY_FUNCTION__))
4612 "doesn't coalesce/split elements.")(static_cast <bool> (LegalSrcVecTy.getScalarSizeInBits(
) == EltTyBits && LegalSrcVecTy.getScalarType() == LegalDstVecTy
.getScalarType() && "We expect that the legalization doesn't affect the element width, "
"doesn't coalesce/split elements.") ? void (0) : __assert_fail
("LegalSrcVecTy.getScalarSizeInBits() == EltTyBits && LegalSrcVecTy.getScalarType() == LegalDstVecTy.getScalarType() && \"We expect that the legalization doesn't affect the element width, \" \"doesn't coalesce/split elements.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4612, __extension__
__PRETTY_FUNCTION__))
;
4613
4614 unsigned NumEltsPerDstVec = LegalDstVecTy.getVectorNumElements();
4615 unsigned NumDstVectors =
4616 divideCeil(DstVecTy->getNumElements(), NumEltsPerDstVec);
4617
4618 auto *SingleDstVecTy = FixedVectorType::get(EltTy, NumEltsPerDstVec);
4619
4620 // Not all the produced Dst elements may be demanded. In our case,
4621 // given that a single Dst vector is formed by a single shuffle,
4622 // if all elements that will form a single Dst vector aren't demanded,
4623 // then we won't need to do that shuffle, so adjust the cost accordingly.
4624 APInt DemandedDstVectors = APIntOps::ScaleBitMask(
4625 DemandedDstElts.zext(NumDstVectors * NumEltsPerDstVec), NumDstVectors);
4626 unsigned NumDstVectorsDemanded = DemandedDstVectors.countPopulation();
4627
4628 InstructionCost SingleShuffleCost =
4629 getShuffleCost(TTI::SK_PermuteSingleSrc, SingleDstVecTy, /*Mask=*/None,
4630 CostKind, /*Index=*/0, /*SubTp=*/nullptr);
4631 return NumDstVectorsDemanded * SingleShuffleCost;
4632}
4633
4634InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
4635 MaybeAlign Alignment,
4636 unsigned AddressSpace,
4637 TTI::TargetCostKind CostKind,
4638 TTI::OperandValueInfo OpInfo,
4639 const Instruction *I) {
4640 // TODO: Handle other cost kinds.
4641 if (CostKind != TTI::TCK_RecipThroughput) {
4642 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
4643 // Store instruction with index and scale costs 2 Uops.
4644 // Check the preceding GEP to identify non-const indices.
4645 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
4646 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
4647 return TTI::TCC_Basic * 2;
4648 }
4649 }
4650 return TTI::TCC_Basic;
4651 }
4652
4653 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&(static_cast <bool> ((Opcode == Instruction::Load || Opcode
== Instruction::Store) && "Invalid Opcode") ? void (
0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4654, __extension__
__PRETTY_FUNCTION__))
4654 "Invalid Opcode")(static_cast <bool> ((Opcode == Instruction::Load || Opcode
== Instruction::Store) && "Invalid Opcode") ? void (
0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4654, __extension__
__PRETTY_FUNCTION__))
;
4655 // Type legalization can't handle structs
4656 if (TLI->getValueType(DL, Src, true) == MVT::Other)
4657 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
4658 CostKind);
4659
4660 // Legalize the type.
4661 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Src);
4662
4663 auto *VTy = dyn_cast<FixedVectorType>(Src);
4664
4665 InstructionCost Cost = 0;
4666
4667 // Add a cost for constant load to vector.
4668 if (Opcode == Instruction::Store && OpInfo.isConstant())
4669 Cost += getMemoryOpCost(Instruction::Load, Src, DL.getABITypeAlign(Src),
4670 /*AddressSpace=*/0, CostKind);
4671
4672 // Handle the simple case of non-vectors.
4673 // NOTE: this assumes that legalization never creates vector from scalars!
4674 if (!VTy || !LT.second.isVector()) {
4675 // Each load/store unit costs 1.
4676 return (LT.second.isFloatingPoint() ? Cost : 0) + LT.first * 1;
4677 }
4678
4679 bool IsLoad = Opcode == Instruction::Load;
4680
4681 Type *EltTy = VTy->getElementType();
4682
4683 const int EltTyBits = DL.getTypeSizeInBits(EltTy);
4684
4685 // Source of truth: how many elements were there in the original IR vector?
4686 const unsigned SrcNumElt = VTy->getNumElements();
4687
4688 // How far have we gotten?
4689 int NumEltRemaining = SrcNumElt;
4690 // Note that we intentionally capture by-reference, NumEltRemaining changes.
4691 auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
4692
4693 const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
4694
4695 // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
4696 const unsigned XMMBits = 128;
4697 if (XMMBits % EltTyBits != 0)
4698 // Vector size must be a multiple of the element size. I.e. no padding.
4699 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
4700 CostKind);
4701 const int NumEltPerXMM = XMMBits / EltTyBits;
4702
4703 auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
4704
4705 for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
4706 NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
4707 // How many elements would a single op deal with at once?
4708 if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
4709 // Vector size must be a multiple of the element size. I.e. no padding.
4710 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
4711 CostKind);
4712 int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
4713
4714 assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?")(static_cast <bool> (CurrOpSizeBytes > 0 && CurrNumEltPerOp
> 0 && "How'd we get here?") ? void (0) : __assert_fail
("CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && \"How'd we get here?\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4714, __extension__
__PRETTY_FUNCTION__))
;
4715 assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4718, __extension__
__PRETTY_FUNCTION__))
4716 (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4718, __extension__
__PRETTY_FUNCTION__))
4717 "Unless we haven't halved the op size yet, "(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4718, __extension__
__PRETTY_FUNCTION__))
4718 "we have less than two op's sized units of work left.")(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4718, __extension__
__PRETTY_FUNCTION__))
;
4719
4720 auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
4721 ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
4722 : XMMVecTy;
4723
4724 assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&(static_cast <bool> (CurrVecTy->getNumElements() % CurrNumEltPerOp
== 0 && "After halving sizes, the vector elt count is no longer a multiple "
"of number of elements per operation?") ? void (0) : __assert_fail
("CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && \"After halving sizes, the vector elt count is no longer a multiple \" \"of number of elements per operation?\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4726, __extension__
__PRETTY_FUNCTION__))
4725 "After halving sizes, the vector elt count is no longer a multiple "(static_cast <bool> (CurrVecTy->getNumElements() % CurrNumEltPerOp
== 0 && "After halving sizes, the vector elt count is no longer a multiple "
"of number of elements per operation?") ? void (0) : __assert_fail
("CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && \"After halving sizes, the vector elt count is no longer a multiple \" \"of number of elements per operation?\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4726, __extension__
__PRETTY_FUNCTION__))
4726 "of number of elements per operation?")(static_cast <bool> (CurrVecTy->getNumElements() % CurrNumEltPerOp
== 0 && "After halving sizes, the vector elt count is no longer a multiple "
"of number of elements per operation?") ? void (0) : __assert_fail
("CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && \"After halving sizes, the vector elt count is no longer a multiple \" \"of number of elements per operation?\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4726, __extension__
__PRETTY_FUNCTION__))
;
4727 auto *CoalescedVecTy =
4728 CurrNumEltPerOp == 1
4729 ? CurrVecTy
4730 : FixedVectorType::get(
4731 IntegerType::get(Src->getContext(),
4732 EltTyBits * CurrNumEltPerOp),
4733 CurrVecTy->getNumElements() / CurrNumEltPerOp);
4734 assert(DL.getTypeSizeInBits(CoalescedVecTy) ==(static_cast <bool> (DL.getTypeSizeInBits(CoalescedVecTy
) == DL.getTypeSizeInBits(CurrVecTy) && "coalesciing elements doesn't change vector width."
) ? void (0) : __assert_fail ("DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && \"coalesciing elements doesn't change vector width.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4736, __extension__
__PRETTY_FUNCTION__))
4735 DL.getTypeSizeInBits(CurrVecTy) &&(static_cast <bool> (DL.getTypeSizeInBits(CoalescedVecTy
) == DL.getTypeSizeInBits(CurrVecTy) && "coalesciing elements doesn't change vector width."
) ? void (0) : __assert_fail ("DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && \"coalesciing elements doesn't change vector width.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4736, __extension__
__PRETTY_FUNCTION__))
4736 "coalesciing elements doesn't change vector width.")(static_cast <bool> (DL.getTypeSizeInBits(CoalescedVecTy
) == DL.getTypeSizeInBits(CurrVecTy) && "coalesciing elements doesn't change vector width."
) ? void (0) : __assert_fail ("DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && \"coalesciing elements doesn't change vector width.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4736, __extension__
__PRETTY_FUNCTION__))
;
4737
4738 while (NumEltRemaining > 0) {
4739 assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?")(static_cast <bool> (SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?"
) ? void (0) : __assert_fail ("SubVecEltsLeft >= 0 && \"Subreg element count overconsumtion?\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4739, __extension__
__PRETTY_FUNCTION__))
;
4740
4741 // Can we use this vector size, as per the remaining element count?
4742 // Iff the vector is naturally aligned, we can do a wide load regardless.
4743 if (NumEltRemaining < CurrNumEltPerOp &&
4744 (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
4745 CurrOpSizeBytes != 1)
4746 break; // Try smalled vector size.
4747
4748 bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
4749
4750 // If we have fully processed the previous reg, we need to replenish it.
4751 if (SubVecEltsLeft == 0) {
4752 SubVecEltsLeft += CurrVecTy->getNumElements();
4753 // And that's free only for the 0'th subvector of a legalized vector.
4754 if (!Is0thSubVec)
4755 Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
4756 : TTI::ShuffleKind::SK_ExtractSubvector,
4757 VTy, None, CostKind, NumEltDone(), CurrVecTy);
4758 }
4759
4760 // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
4761 // for smaller widths (32/16/8) we have to insert/extract them separately.
4762 // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
4763 // but let's pretend that it is also true for 16/8 bit wide ops...)
4764 if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
4765 int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
4766 assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "")(static_cast <bool> (NumEltDoneInCurrXMM % CurrNumEltPerOp
== 0 && "") ? void (0) : __assert_fail ("NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && \"\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4766, __extension__
__PRETTY_FUNCTION__))
;
4767 int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
4768 APInt DemandedElts =
4769 APInt::getBitsSet(CoalescedVecTy->getNumElements(),
4770 CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
4771 assert(DemandedElts.countPopulation() == 1 && "Inserting single value")(static_cast <bool> (DemandedElts.countPopulation() == 1
&& "Inserting single value") ? void (0) : __assert_fail
("DemandedElts.countPopulation() == 1 && \"Inserting single value\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4771, __extension__
__PRETTY_FUNCTION__))
;
4772 Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
4773 !IsLoad);
4774 }
4775
4776 // This isn't exactly right. We're using slow unaligned 32-byte accesses
4777 // as a proxy for a double-pumped AVX memory interface such as on
4778 // Sandybridge.
4779 if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
4780 Cost += 2;
4781 else
4782 Cost += 1;
4783
4784 SubVecEltsLeft -= CurrNumEltPerOp;
4785 NumEltRemaining -= CurrNumEltPerOp;
4786 Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
4787 }
4788 }
4789
4790 assert(NumEltRemaining <= 0 && "Should have processed all the elements.")(static_cast <bool> (NumEltRemaining <= 0 &&
"Should have processed all the elements.") ? void (0) : __assert_fail
("NumEltRemaining <= 0 && \"Should have processed all the elements.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4790, __extension__
__PRETTY_FUNCTION__))
;
4791
4792 return Cost;
4793}
4794
4795InstructionCost
4796X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
4797 unsigned AddressSpace,
4798 TTI::TargetCostKind CostKind) {
4799 bool IsLoad = (Instruction::Load == Opcode);
14
Assuming 'Opcode' is not equal to Load
4800 bool IsStore = (Instruction::Store == Opcode);
15
Assuming 'Opcode' is equal to Store
4801
4802 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
16
'SrcTy' is a 'FixedVectorType'
4803 if (!SrcVTy
16.1
'SrcVTy' is non-null
16.1
'SrcVTy' is non-null
)
17
Taking false branch
4804 // To calculate scalar take the regular cost, without mask
4805 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
4806
4807 unsigned NumElem = SrcVTy->getNumElements();
4808 auto *MaskTy =
4809 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
4810 if ((IsLoad
17.1
'IsLoad' is false
17.1
'IsLoad' is false
&& !isLegalMaskedLoad(SrcVTy, Alignment)) ||
18
Taking true branch
4811 (IsStore
17.2
'IsStore' is true
17.2
'IsStore' is true
&& !isLegalMaskedStore(SrcVTy, Alignment))) {
4812 // Scalarization
4813 APInt DemandedElts = APInt::getAllOnes(NumElem);
4814 InstructionCost MaskSplitCost =
4815 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
19
Calling 'X86TTIImpl::getScalarizationOverhead'
4816 InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4817 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
4818 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4819 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4820 InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
4821 InstructionCost ValueSplitCost =
4822 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
4823 InstructionCost MemopCost =
4824 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4825 Alignment, AddressSpace, CostKind);
4826 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
4827 }
4828
4829 // Legalize the type.
4830 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(SrcVTy);
4831 auto VT = TLI->getValueType(DL, SrcVTy);
4832 InstructionCost Cost = 0;
4833 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
4834 LT.second.getVectorNumElements() == NumElem)
4835 // Promotion requires extend/truncate for data and a shuffle for mask.
4836 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, CostKind, 0,
4837 nullptr) +
4838 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, CostKind, 0,
4839 nullptr);
4840
4841 else if (LT.first * LT.second.getVectorNumElements() > NumElem) {
4842 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
4843 LT.second.getVectorNumElements());
4844 // Expanding requires fill mask with zeroes
4845 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, CostKind,
4846 0, MaskTy);
4847 }
4848
4849 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
4850 if (!ST->hasAVX512())
4851 return Cost + LT.first * (IsLoad ? 2 : 8);
4852
4853 // AVX-512 masked load/store is cheaper
4854 return Cost + LT.first;
4855}
4856
4857InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
4858 ScalarEvolution *SE,
4859 const SCEV *Ptr) {
4860 // Address computations in vectorized code with non-consecutive addresses will
4861 // likely result in more instructions compared to scalar code where the
4862 // computation can more often be merged into the index mode. The resulting
4863 // extra micro-ops can significantly decrease throughput.
4864 const unsigned NumVectorInstToHideOverhead = 10;
4865
4866 // Cost modeling of Strided Access Computation is hidden by the indexing
4867 // modes of X86 regardless of the stride value. We dont believe that there
4868 // is a difference between constant strided access in gerenal and constant
4869 // strided value which is less than or equal to 64.
4870 // Even in the case of (loop invariant) stride whose value is not known at
4871 // compile time, the address computation will not incur more than one extra
4872 // ADD instruction.
4873 if (Ty->isVectorTy() && SE && !ST->hasAVX2()) {
4874 // TODO: AVX2 is the current cut-off because we don't have correct
4875 // interleaving costs for prior ISA's.
4876 if (!BaseT::isStridedAccess(Ptr))
4877 return NumVectorInstToHideOverhead;
4878 if (!BaseT::getConstantStrideStep(SE, Ptr))
4879 return 1;
4880 }
4881
4882 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
4883}
4884
4885InstructionCost
4886X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
4887 Optional<FastMathFlags> FMF,
4888 TTI::TargetCostKind CostKind) {
4889 if (TTI::requiresOrderedReduction(FMF))
4890 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
4891
4892 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
4893 // and make it as the cost.
4894
4895 static const CostTblEntry SLMCostTblNoPairWise[] = {
4896 { ISD::FADD, MVT::v2f64, 3 },
4897 { ISD::ADD, MVT::v2i64, 5 },
4898 };
4899
4900 static const CostTblEntry SSE2CostTblNoPairWise[] = {
4901 { ISD::FADD, MVT::v2f64, 2 },
4902 { ISD::FADD, MVT::v2f32, 2 },
4903 { ISD::FADD, MVT::v4f32, 4 },
4904 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
4905 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32
4906 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
4907 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3".
4908 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3".
4909 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
4910 { ISD::ADD, MVT::v2i8, 2 },
4911 { ISD::ADD, MVT::v4i8, 2 },
4912 { ISD::ADD, MVT::v8i8, 2 },
4913 { ISD::ADD, MVT::v16i8, 3 },
4914 };
4915
4916 static const CostTblEntry AVX1CostTblNoPairWise[] = {
4917 { ISD::FADD, MVT::v4f64, 3 },
4918 { ISD::FADD, MVT::v4f32, 3 },
4919 { ISD::FADD, MVT::v8f32, 4 },
4920 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
4921 { ISD::ADD, MVT::v4i64, 3 },
4922 { ISD::ADD, MVT::v8i32, 5 },
4923 { ISD::ADD, MVT::v16i16, 5 },
4924 { ISD::ADD, MVT::v32i8, 4 },
4925 };
4926
4927 int ISD = TLI->InstructionOpcodeToISD(Opcode);
4928 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 4928, __extension__
__PRETTY_FUNCTION__))
;
4929
4930 // Before legalizing the type, give a chance to look up illegal narrow types
4931 // in the table.
4932 // FIXME: Is there a better way to do this?
4933 EVT VT = TLI->getValueType(DL, ValTy);
4934 if (VT.isSimple()) {
4935 MVT MTy = VT.getSimpleVT();
4936 if (ST->useSLMArithCosts())
4937 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
4938 return Entry->Cost;
4939
4940 if (ST->hasAVX())
4941 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4942 return Entry->Cost;
4943
4944 if (ST->hasSSE2())
4945 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4946 return Entry->Cost;
4947 }
4948
4949 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
4950
4951 MVT MTy = LT.second;
4952
4953 auto *ValVTy = cast<FixedVectorType>(ValTy);
4954
4955 // Special case: vXi8 mul reductions are performed as vXi16.
4956 if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
4957 auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
4958 auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
4959 return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
4960 TargetTransformInfo::CastContextHint::None,
4961 CostKind) +
4962 getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind);
4963 }
4964
4965 InstructionCost ArithmeticCost = 0;
4966 if (LT.first != 1 && MTy.isVector() &&
4967 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4968 // Type needs to be split. We need LT.first - 1 arithmetic ops.
4969 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
4970 MTy.getVectorNumElements());
4971 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
4972 ArithmeticCost *= LT.first - 1;
4973 }
4974
4975 if (ST->useSLMArithCosts())
4976 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
4977 return ArithmeticCost + Entry->Cost;
4978
4979 if (ST->hasAVX())
4980 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4981 return ArithmeticCost + Entry->Cost;
4982
4983 if (ST->hasSSE2())
4984 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4985 return ArithmeticCost + Entry->Cost;
4986
4987 // FIXME: These assume a naive kshift+binop lowering, which is probably
4988 // conservative in most cases.
4989 static const CostTblEntry AVX512BoolReduction[] = {
4990 { ISD::AND, MVT::v2i1, 3 },
4991 { ISD::AND, MVT::v4i1, 5 },
4992 { ISD::AND, MVT::v8i1, 7 },
4993 { ISD::AND, MVT::v16i1, 9 },
4994 { ISD::AND, MVT::v32i1, 11 },
4995 { ISD::AND, MVT::v64i1, 13 },
4996 { ISD::OR, MVT::v2i1, 3 },
4997 { ISD::OR, MVT::v4i1, 5 },
4998 { ISD::OR, MVT::v8i1, 7 },
4999 { ISD::OR, MVT::v16i1, 9 },
5000 { ISD::OR, MVT::v32i1, 11 },
5001 { ISD::OR, MVT::v64i1, 13 },
5002 };
5003
5004 static const CostTblEntry AVX2BoolReduction[] = {
5005 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
5006 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
5007 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
5008 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
5009 };
5010
5011 static const CostTblEntry AVX1BoolReduction[] = {
5012 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
5013 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
5014 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
5015 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
5016 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
5017 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
5018 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
5019 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
5020 };
5021
5022 static const CostTblEntry SSE2BoolReduction[] = {
5023 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
5024 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
5025 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
5026 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
5027 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
5028 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
5029 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
5030 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
5031 };
5032
5033 // Handle bool allof/anyof patterns.
5034 if (ValVTy->getElementType()->isIntegerTy(1)) {
5035 InstructionCost ArithmeticCost = 0;
5036 if (LT.first != 1 && MTy.isVector() &&
5037 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
5038 // Type needs to be split. We need LT.first - 1 arithmetic ops.
5039 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
5040 MTy.getVectorNumElements());
5041 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
5042 ArithmeticCost *= LT.first - 1;
5043 }
5044
5045 if (ST->hasAVX512())
5046 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
5047 return ArithmeticCost + Entry->Cost;
5048 if (ST->hasAVX2())
5049 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
5050 return ArithmeticCost + Entry->Cost;
5051 if (ST->hasAVX())
5052 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
5053 return ArithmeticCost + Entry->Cost;
5054 if (ST->hasSSE2())
5055 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
5056 return ArithmeticCost + Entry->Cost;
5057
5058 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
5059 }
5060
5061 unsigned NumVecElts = ValVTy->getNumElements();
5062 unsigned ScalarSize = ValVTy->getScalarSizeInBits();
5063
5064 // Special case power of 2 reductions where the scalar type isn't changed
5065 // by type legalization.
5066 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
5067 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
5068
5069 InstructionCost ReductionCost = 0;
5070
5071 auto *Ty = ValVTy;
5072 if (LT.first != 1 && MTy.isVector() &&
5073 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
5074 // Type needs to be split. We need LT.first - 1 arithmetic ops.
5075 Ty = FixedVectorType::get(ValVTy->getElementType(),
5076 MTy.getVectorNumElements());
5077 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
5078 ReductionCost *= LT.first - 1;
5079 NumVecElts = MTy.getVectorNumElements();
5080 }
5081
5082 // Now handle reduction with the legal type, taking into account size changes
5083 // at each level.
5084 while (NumVecElts > 1) {
5085 // Determine the size of the remaining vector we need to reduce.
5086 unsigned Size = NumVecElts * ScalarSize;
5087 NumVecElts /= 2;
5088 // If we're reducing from 256/512 bits, use an extract_subvector.
5089 if (Size > 128) {
5090 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
5091 ReductionCost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, None,
5092 CostKind, NumVecElts, SubTy);
5093 Ty = SubTy;
5094 } else if (Size == 128) {
5095 // Reducing from 128 bits is a permute of v2f64/v2i64.
5096 FixedVectorType *ShufTy;
5097 if (ValVTy->isFloatingPointTy())
5098 ShufTy =
5099 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
5100 else
5101 ShufTy =
5102 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
5103 ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
5104 None, CostKind, 0, nullptr);
5105 } else if (Size == 64) {
5106 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
5107 FixedVectorType *ShufTy;
5108 if (ValVTy->isFloatingPointTy())
5109 ShufTy =
5110 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
5111 else
5112 ShufTy =
5113 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
5114 ReductionCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy,
5115 None, CostKind, 0, nullptr);
5116 } else {
5117 // Reducing from smaller size is a shift by immediate.
5118 auto *ShiftTy = FixedVectorType::get(
5119 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
5120 ReductionCost += getArithmeticInstrCost(
5121 Instruction::LShr, ShiftTy, CostKind,
5122 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5123 {TargetTransformInfo::OK_UniformConstantValue, TargetTransformInfo::OP_None});
5124 }
5125
5126 // Add the arithmetic op for this level.
5127 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
5128 }
5129
5130 // Add the final extract element to the cost.
5131 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
5132}
5133
5134InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy,
5135 bool IsUnsigned) {
5136 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(Ty);
5137
5138 MVT MTy = LT.second;
5139
5140 int ISD;
5141 if (Ty->isIntOrIntVectorTy()) {
5142 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
5143 } else {
5144 assert(Ty->isFPOrFPVectorTy() &&(static_cast <bool> (Ty->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 5145, __extension__
__PRETTY_FUNCTION__))
5145 "Expected float point or integer vector type.")(static_cast <bool> (Ty->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 5145, __extension__
__PRETTY_FUNCTION__))
;
5146 ISD = ISD::FMINNUM;
5147 }
5148
5149 static const CostTblEntry SSE1CostTbl[] = {
5150 {ISD::FMINNUM, MVT::v4f32, 1},
5151 };
5152
5153 static const CostTblEntry SSE2CostTbl[] = {
5154 {ISD::FMINNUM, MVT::v2f64, 1},
5155 {ISD::SMIN, MVT::v8i16, 1},
5156 {ISD::UMIN, MVT::v16i8, 1},
5157 };
5158
5159 static const CostTblEntry SSE41CostTbl[] = {
5160 {ISD::SMIN, MVT::v4i32, 1},
5161 {ISD::UMIN, MVT::v4i32, 1},
5162 {ISD::UMIN, MVT::v8i16, 1},
5163 {ISD::SMIN, MVT::v16i8, 1},
5164 };
5165
5166 static const CostTblEntry SSE42CostTbl[] = {
5167 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
5168 };
5169
5170 static const CostTblEntry AVX1CostTbl[] = {
5171 {ISD::FMINNUM, MVT::v8f32, 1},
5172 {ISD::FMINNUM, MVT::v4f64, 1},
5173 {ISD::SMIN, MVT::v8i32, 3},
5174 {ISD::UMIN, MVT::v8i32, 3},
5175 {ISD::SMIN, MVT::v16i16, 3},
5176 {ISD::UMIN, MVT::v16i16, 3},
5177 {ISD::SMIN, MVT::v32i8, 3},
5178 {ISD::UMIN, MVT::v32i8, 3},
5179 };
5180
5181 static const CostTblEntry AVX2CostTbl[] = {
5182 {ISD::SMIN, MVT::v8i32, 1},
5183 {ISD::UMIN, MVT::v8i32, 1},
5184 {ISD::SMIN, MVT::v16i16, 1},
5185 {ISD::UMIN, MVT::v16i16, 1},
5186 {ISD::SMIN, MVT::v32i8, 1},
5187 {ISD::UMIN, MVT::v32i8, 1},
5188 };
5189
5190 static const CostTblEntry AVX512CostTbl[] = {
5191 {ISD::FMINNUM, MVT::v16f32, 1},
5192 {ISD::FMINNUM, MVT::v8f64, 1},
5193 {ISD::SMIN, MVT::v2i64, 1},
5194 {ISD::UMIN, MVT::v2i64, 1},
5195 {ISD::SMIN, MVT::v4i64, 1},
5196 {ISD::UMIN, MVT::v4i64, 1},
5197 {ISD::SMIN, MVT::v8i64, 1},
5198 {ISD::UMIN, MVT::v8i64, 1},
5199 {ISD::SMIN, MVT::v16i32, 1},
5200 {ISD::UMIN, MVT::v16i32, 1},
5201 };
5202
5203 static const CostTblEntry AVX512BWCostTbl[] = {
5204 {ISD::SMIN, MVT::v32i16, 1},
5205 {ISD::UMIN, MVT::v32i16, 1},
5206 {ISD::SMIN, MVT::v64i8, 1},
5207 {ISD::UMIN, MVT::v64i8, 1},
5208 };
5209
5210 // If we have a native MIN/MAX instruction for this type, use it.
5211 if (ST->hasBWI())
5212 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
5213 return LT.first * Entry->Cost;
5214
5215 if (ST->hasAVX512())
5216 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
5217 return LT.first * Entry->Cost;
5218
5219 if (ST->hasAVX2())
5220 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
5221 return LT.first * Entry->Cost;
5222
5223 if (ST->hasAVX())
5224 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
5225 return LT.first * Entry->Cost;
5226
5227 if (ST->hasSSE42())
5228 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
5229 return LT.first * Entry->Cost;
5230
5231 if (ST->hasSSE41())
5232 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
5233 return LT.first * Entry->Cost;
5234
5235 if (ST->hasSSE2())
5236 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
5237 return LT.first * Entry->Cost;
5238
5239 if (ST->hasSSE1())
5240 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
5241 return LT.first * Entry->Cost;
5242
5243 unsigned CmpOpcode;
5244 if (Ty->isFPOrFPVectorTy()) {
5245 CmpOpcode = Instruction::FCmp;
5246 } else {
5247 assert(Ty->isIntOrIntVectorTy() &&(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 5248, __extension__
__PRETTY_FUNCTION__))
5248 "expecting floating point or integer type for min/max reduction")(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 5248, __extension__
__PRETTY_FUNCTION__))
;
5249 CmpOpcode = Instruction::ICmp;
5250 }
5251
5252 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
5253 // Otherwise fall back to cmp+select.
5254 InstructionCost Result =
5255 getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
5256 CostKind) +
5257 getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
5258 CmpInst::BAD_ICMP_PREDICATE, CostKind);
5259 return Result;
5260}
5261
5262InstructionCost
5263X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
5264 bool IsUnsigned,
5265 TTI::TargetCostKind CostKind) {
5266 std::pair<InstructionCost, MVT> LT = getTypeLegalizationCost(ValTy);
5267
5268 MVT MTy = LT.second;
5269
5270 int ISD;
5271 if (ValTy->isIntOrIntVectorTy()) {
5272 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
5273 } else {
5274 assert(ValTy->isFPOrFPVectorTy() &&(static_cast <bool> (ValTy->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 5275, __extension__
__PRETTY_FUNCTION__))
5275 "Expected float point or integer vector type.")(static_cast <bool> (ValTy->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "llvm/lib/Target/X86/X86TargetTransformInfo.cpp", 5275, __extension__
__PRETTY_FUNCTION__))
;
5276 ISD = ISD::FMINNUM;
5277 }
5278
5279 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
5280 // and make it as the cost.
5281
5282 static const CostTblEntry SSE2CostTblNoPairWise[] = {
5283 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
5284 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
5285 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
5286 };
5287
5288 static const CostTblEntry SSE41CostTblNoPairWise[] = {
5289 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
5290 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
5291 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
5292 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
5293 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
5294 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
5295 {ISD::SMIN, MVT::v2i8, 3}, // pminsb
5296 {ISD::SMIN, MVT::v4i8, 5}, // pminsb
5297 {ISD::SMIN, MVT::v8i8, 7}, // pminsb
5298 {ISD::SMIN, MVT::v16i8, 6},
5299 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2
5300 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2
5301 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2
5302 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
5303 };
5304
5305 static const CostTblEntry AVX1CostTblNoPairWise[] = {
5306 {ISD::SMIN, MVT::v16i16, 6},
5307 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
5308 {ISD::SMIN, MVT::v32i8, 8},
5309 {ISD::UMIN, MVT::v32i8, 8},
5310 };
5311
5312 static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
5313 {ISD::SMIN, MVT::v32i16, 8},
5314 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
5315 {ISD::SMIN, MVT::v64i8, 10},
5316 {ISD::UMIN, MVT::v64i8, 10},
5317 };
5318
5319 // Before legalizing the type, give a chance to look up illegal narrow types
5320 // in the table.
5321 // FIXME: Is there a better way to do this?
5322 EVT VT = TLI->getValueType(DL, ValTy);
5323 if (VT.isSimple()) {
5324 MVT MTy = VT.getSimpleVT();
5325 if (ST->hasBWI())
5326 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
5327 return Entry->Cost;
5328
5329 if (ST->hasAVX())
5330 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
5331 return Entry->Cost;
5332
5333 if (ST->hasSSE41())
5334 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
5335 return Entry->Cost;
5336
5337 if (ST->hasSSE2())
5338 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
5339 return Entry->Cost;
5340 }
5341
5342 auto *ValVTy = cast<FixedVectorType>(ValTy);
5343 unsigned NumVecElts = ValVTy->getNumElements();
5344
5345 auto *Ty = ValVTy;
5346 InstructionCost MinMaxCost = 0;
5347 if (LT.first != 1 && MTy.isVector() &&
5348 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
5349 // Type needs to be split. We need LT.first - 1 operations ops.
5350 Ty = FixedVectorType::get(ValVTy->getElementType(),
5351 MTy.getVectorNumElements());
5352 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
5353 MTy.getVectorNumElements());
5354 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
5355 MinMaxCost *= LT.first - 1;
5356 NumVecElts = MTy.getVectorNumElements();
5357 }
5358
5359 if (ST->hasBWI())
5360 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
5361 return MinMaxCost + Entry->Cost;
5362
5363 if (ST->hasAVX())
5364 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
5365 return MinMaxCost + Entry->Cost;
5366
5367 if (ST->hasSSE41())
5368 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
5369 return MinMaxCost + Entry->Cost;
5370
5371 if (ST->hasSSE2())
5372 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
5373 return MinMaxCost + Entry->Cost;
5374
5375 unsigned ScalarSize = ValTy->getScalarSizeInBits();
5376
5377 // Special case power of 2 reductions where the scalar type isn't changed
5378 // by type legalization.
5379 if (!isPowerOf2_32(ValVTy->getNumElements()) ||
5380 ScalarSize != MTy.getScalarSizeInBits())
5381 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind);
5382
5383 // Now handle reduction with the legal type, taking into account size changes
5384 // at each level.
5385 while (NumVecElts > 1) {
5386 // Determine the size of the remaining vector we need to reduce.
5387 unsigned Size = NumVecElts * ScalarSize;
5388 NumVecElts /= 2;
5389 // If we're reducing from 256/512 bits, use an extract_subvector.
5390 if (Size > 128) {
5391 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
5392 MinMaxCost += getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, CostKind,
5393 NumVecElts, SubTy);
5394 Ty = SubTy;
5395 } else if (Size == 128) {
5396 // Reducing from 128 bits is a permute of v2f64/v2i64.
5397 VectorType *ShufTy;
5398 if (ValTy->isFloatingPointTy())
5399 ShufTy =
5400 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
5401 else
5402 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
5403 MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None,
5404 CostKind, 0, nullptr);
5405 } else if (Size == 64) {
5406 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
5407 FixedVectorType *ShufTy;
5408 if (ValTy->isFloatingPointTy())
5409 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
5410 else
5411 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
5412 MinMaxCost += getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None,
5413 CostKind, 0, nullptr);
5414 } else {
5415 // Reducing from smaller size is a shift by immediate.
5416 auto *ShiftTy = FixedVectorType::get(
5417 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
5418 MinMaxCost += getArithmeticInstrCost(
5419 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
5420 {TargetTransformInfo::OK_AnyValue, TargetTransformInfo::OP_None},
5421 {TargetTransformInfo::OK_UniformConstantValue, TargetTransformInfo::OP_None});
5422 }
5423
5424 // Add the arithmetic op for this level.
5425 auto *SubCondTy =
5426 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
5427 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
5428 }
5429
5430 // Add the final extract element to the cost.
5431 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
5432}
5433
5434/// Calculate the cost of materializing a 64-bit value. This helper
5435/// method might only calculate a fraction of a larger immediate. Therefore it
5436/// is valid to return a cost of ZERO.
5437InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
5438 if (Val == 0)
5439 return TTI::TCC_Free;
5440
5441 if (isInt<32>(Val))
5442 return TTI::TCC_Basic;
5443
5444 return 2 * TTI::TCC_Basic;
5445}
5446
5447InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
5448 TTI::TargetCostKind CostKind) {
5449 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5449, __extension__ __PRETTY_FUNCTION__))
;
5450
5451 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5452 if (BitSize == 0)
5453 return ~0U;
5454
5455 // Never hoist constants larger than 128bit, because this might lead to
5456 // incorrect code generation or assertions in codegen.
5457 // Fixme: Create a cost model for types larger than i128 once the codegen
5458 // issues have been fixed.
5459 if (BitSize > 128)
5460 return TTI::TCC_Free;
5461
5462 if (Imm == 0)
5463 return TTI::TCC_Free;
5464
5465 // Sign-extend all constants to a multiple of 64-bit.
5466 APInt ImmVal = Imm;
5467 if (BitSize % 64 != 0)
5468 ImmVal = Imm.sext(alignTo(BitSize, 64));
5469
5470 // Split the constant into 64-bit chunks and calculate the cost for each
5471 // chunk.
5472 InstructionCost Cost = 0;
5473 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
5474 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
5475 int64_t Val = Tmp.getSExtValue();
5476 Cost += getIntImmCost(Val);
5477 }
5478 // We need at least one instruction to materialize the constant.
5479 return std::max<InstructionCost>(1, Cost);
5480}
5481
5482InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
5483 const APInt &Imm, Type *Ty,
5484 TTI::TargetCostKind CostKind,
5485 Instruction *Inst) {
5486 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5486, __extension__ __PRETTY_FUNCTION__))
;
5487
5488 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5489 // There is no cost model for constants with a bit size of 0. Return TCC_Free
5490 // here, so that constant hoisting will ignore this constant.
5491 if (BitSize == 0)
5492 return TTI::TCC_Free;
5493
5494 unsigned ImmIdx = ~0U;
5495 switch (Opcode) {
5496 default:
5497 return TTI::TCC_Free;
5498 case Instruction::GetElementPtr:
5499 // Always hoist the base address of a GetElementPtr. This prevents the
5500 // creation of new constants for every base constant that gets constant
5501 // folded with the offset.
5502 if (Idx == 0)
5503 return 2 * TTI::TCC_Basic;
5504 return TTI::TCC_Free;
5505 case Instruction::Store:
5506 ImmIdx = 0;
5507 break;
5508 case Instruction::ICmp:
5509 // This is an imperfect hack to prevent constant hoisting of
5510 // compares that might be trying to check if a 64-bit value fits in
5511 // 32-bits. The backend can optimize these cases using a right shift by 32.
5512 // Ideally we would check the compare predicate here. There also other
5513 // similar immediates the backend can use shifts for.
5514 if (Idx == 1 && Imm.getBitWidth() == 64) {
5515 uint64_t ImmVal = Imm.getZExtValue();
5516 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
5517 return TTI::TCC_Free;
5518 }
5519 ImmIdx = 1;
5520 break;
5521 case Instruction::And:
5522 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
5523 // by using a 32-bit operation with implicit zero extension. Detect such
5524 // immediates here as the normal path expects bit 31 to be sign extended.
5525 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
5526 return TTI::TCC_Free;
5527 ImmIdx = 1;
5528 break;
5529 case Instruction::Add:
5530 case Instruction::Sub:
5531 // For add/sub, we can use the opposite instruction for INT32_MIN.
5532 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
5533 return TTI::TCC_Free;
5534 ImmIdx = 1;
5535 break;
5536 case Instruction::UDiv:
5537 case Instruction::SDiv:
5538 case Instruction::URem:
5539 case Instruction::SRem:
5540 // Division by constant is typically expanded later into a different
5541 // instruction sequence. This completely changes the constants.
5542 // Report them as "free" to stop ConstantHoist from marking them as opaque.
5543 return TTI::TCC_Free;
5544 case Instruction::Mul:
5545 case Instruction::Or:
5546 case Instruction::Xor:
5547 ImmIdx = 1;
5548 break;
5549 // Always return TCC_Free for the shift value of a shift instruction.
5550 case Instruction::Shl:
5551 case Instruction::LShr:
5552 case Instruction::AShr:
5553 if (Idx == 1)
5554 return TTI::TCC_Free;
5555 break;
5556 case Instruction::Trunc:
5557 case Instruction::ZExt:
5558 case Instruction::SExt:
5559 case Instruction::IntToPtr:
5560 case Instruction::PtrToInt:
5561 case Instruction::BitCast:
5562 case Instruction::PHI:
5563 case Instr