Bug Summary

File:llvm/lib/Target/X86/X86TargetTransformInfo.cpp
Warning:line 3453, column 15
Division by zero

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86TargetTransformInfo.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/X86 -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86 -I include -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-26-234817-15343-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp

1//===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements a TargetTransformInfo analysis pass specific to the
10/// X86 target machine. It uses the target's detailed information to provide
11/// more precise answers to certain TTI queries, while letting the target
12/// independent and default TTI implementations handle the rest.
13///
14//===----------------------------------------------------------------------===//
15/// About Cost Model numbers used below it's necessary to say the following:
16/// the numbers correspond to some "generic" X86 CPU instead of usage of
17/// concrete CPU model. Usually the numbers correspond to CPU where the feature
18/// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19/// the lookups below the cost is based on Nehalem as that was the first CPU
20/// to support that feature level and thus has most likely the worst case cost.
21/// Some examples of other technologies/CPUs:
22/// SSE 3 - Pentium4 / Athlon64
23/// SSE 4.1 - Penryn
24/// SSE 4.2 - Nehalem
25/// AVX - Sandy Bridge
26/// AVX2 - Haswell
27/// AVX-512 - Xeon Phi / Skylake
28/// And some examples of instruction target dependent costs (latency)
29/// divss sqrtss rsqrtss
30/// AMD K7 11-16 19 3
31/// Piledriver 9-24 13-15 5
32/// Jaguar 14 16 2
33/// Pentium II,III 18 30 2
34/// Nehalem 7-14 7-18 3
35/// Haswell 10-13 11 5
36/// TODO: Develop and implement the target dependent cost model and
37/// specialize cost numbers for different Cost Model Targets such as throughput,
38/// code size, latency and uop count.
39//===----------------------------------------------------------------------===//
40
41#include "X86TargetTransformInfo.h"
42#include "llvm/Analysis/TargetTransformInfo.h"
43#include "llvm/CodeGen/BasicTTIImpl.h"
44#include "llvm/CodeGen/CostTable.h"
45#include "llvm/CodeGen/TargetLowering.h"
46#include "llvm/IR/IntrinsicInst.h"
47#include "llvm/Support/Debug.h"
48
49using namespace llvm;
50
51#define DEBUG_TYPE"x86tti" "x86tti"
52
53//===----------------------------------------------------------------------===//
54//
55// X86 cost model.
56//
57//===----------------------------------------------------------------------===//
58
59TargetTransformInfo::PopcntSupportKind
60X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2")(static_cast <bool> (isPowerOf2_32(TyWidth) && "Ty width must be power of 2"
) ? void (0) : __assert_fail ("isPowerOf2_32(TyWidth) && \"Ty width must be power of 2\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 61, __extension__ __PRETTY_FUNCTION__))
;
62 // TODO: Currently the __builtin_popcount() implementation using SSE3
63 // instructions is inefficient. Once the problem is fixed, we should
64 // call ST->hasSSE3() instead of ST->hasPOPCNT().
65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
66}
67
68llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69 TargetTransformInfo::CacheLevel Level) const {
70 switch (Level) {
71 case TargetTransformInfo::CacheLevel::L1D:
72 // - Penryn
73 // - Nehalem
74 // - Westmere
75 // - Sandy Bridge
76 // - Ivy Bridge
77 // - Haswell
78 // - Broadwell
79 // - Skylake
80 // - Kabylake
81 return 32 * 1024; // 32 KByte
82 case TargetTransformInfo::CacheLevel::L2D:
83 // - Penryn
84 // - Nehalem
85 // - Westmere
86 // - Sandy Bridge
87 // - Ivy Bridge
88 // - Haswell
89 // - Broadwell
90 // - Skylake
91 // - Kabylake
92 return 256 * 1024; // 256 KByte
93 }
94
95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 95)
;
96}
97
98llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99 TargetTransformInfo::CacheLevel Level) const {
100 // - Penryn
101 // - Nehalem
102 // - Westmere
103 // - Sandy Bridge
104 // - Ivy Bridge
105 // - Haswell
106 // - Broadwell
107 // - Skylake
108 // - Kabylake
109 switch (Level) {
110 case TargetTransformInfo::CacheLevel::L1D:
111 LLVM_FALLTHROUGH[[gnu::fallthrough]];
112 case TargetTransformInfo::CacheLevel::L2D:
113 return 8;
114 }
115
116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel")::llvm::llvm_unreachable_internal("Unknown TargetTransformInfo::CacheLevel"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 116)
;
117}
118
119unsigned X86TTIImpl::getNumberOfRegisters(unsigned ClassID) const {
120 bool Vector = (ClassID == 1);
121 if (Vector && !ST->hasSSE1())
122 return 0;
123
124 if (ST->is64Bit()) {
125 if (Vector && ST->hasAVX512())
126 return 32;
127 return 16;
128 }
129 return 8;
130}
131
132TypeSize
133X86TTIImpl::getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const {
134 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
135 switch (K) {
136 case TargetTransformInfo::RGK_Scalar:
137 return TypeSize::getFixed(ST->is64Bit() ? 64 : 32);
138 case TargetTransformInfo::RGK_FixedWidthVector:
139 if (ST->hasAVX512() && PreferVectorWidth >= 512)
140 return TypeSize::getFixed(512);
141 if (ST->hasAVX() && PreferVectorWidth >= 256)
142 return TypeSize::getFixed(256);
143 if (ST->hasSSE1() && PreferVectorWidth >= 128)
144 return TypeSize::getFixed(128);
145 return TypeSize::getFixed(0);
146 case TargetTransformInfo::RGK_ScalableVector:
147 return TypeSize::getScalable(0);
148 }
149
150 llvm_unreachable("Unsupported register kind")::llvm::llvm_unreachable_internal("Unsupported register kind"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 150)
;
151}
152
153unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
154 return getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
155 .getFixedSize();
156}
157
158unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
159 // If the loop will not be vectorized, don't interleave the loop.
160 // Let regular unroll to unroll the loop, which saves the overflow
161 // check and memory check cost.
162 if (VF == 1)
163 return 1;
164
165 if (ST->isAtom())
166 return 1;
167
168 // Sandybridge and Haswell have multiple execution ports and pipelined
169 // vector units.
170 if (ST->hasAVX())
171 return 4;
172
173 return 2;
174}
175
176InstructionCost X86TTIImpl::getArithmeticInstrCost(
177 unsigned Opcode, Type *Ty, TTI::TargetCostKind CostKind,
178 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
179 TTI::OperandValueProperties Opd1PropInfo,
180 TTI::OperandValueProperties Opd2PropInfo, ArrayRef<const Value *> Args,
181 const Instruction *CxtI) {
182 // TODO: Handle more cost kinds.
183 if (CostKind != TTI::TCK_RecipThroughput)
184 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info,
185 Op2Info, Opd1PropInfo,
186 Opd2PropInfo, Args, CxtI);
187
188 // vXi8 multiplications are always promoted to vXi16.
189 if (Opcode == Instruction::Mul && Ty->isVectorTy() &&
190 Ty->getScalarSizeInBits() == 8) {
191 Type *WideVecTy =
192 VectorType::getExtendedElementVectorType(cast<VectorType>(Ty));
193 return getCastInstrCost(Instruction::ZExt, WideVecTy, Ty,
194 TargetTransformInfo::CastContextHint::None,
195 CostKind) +
196 getCastInstrCost(Instruction::Trunc, Ty, WideVecTy,
197 TargetTransformInfo::CastContextHint::None,
198 CostKind) +
199 getArithmeticInstrCost(Opcode, WideVecTy, CostKind, Op1Info, Op2Info,
200 Opd1PropInfo, Opd2PropInfo);
201 }
202
203 // Legalize the type.
204 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
205
206 int ISD = TLI->InstructionOpcodeToISD(Opcode);
207 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 207, __extension__ __PRETTY_FUNCTION__))
;
208
209 if (ISD == ISD::MUL && Args.size() == 2 && LT.second.isVector() &&
210 LT.second.getScalarType() == MVT::i32) {
211 // Check if the operands can be represented as a smaller datatype.
212 bool Op1Signed = false, Op2Signed = false;
213 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
214 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
215 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
216
217 // If both are representable as i15 and at least one is constant,
218 // zero-extended, or sign-extended from vXi16 then we can treat this as
219 // PMADDWD which has the same costs as a vXi16 multiply.
220 if (OpMinSize <= 15 && !ST->isPMADDWDSlow()) {
221 bool Op1Constant =
222 isa<ConstantDataVector>(Args[0]) || isa<ConstantVector>(Args[0]);
223 bool Op2Constant =
224 isa<ConstantDataVector>(Args[1]) || isa<ConstantVector>(Args[1]);
225 bool Op1Sext16 = isa<SExtInst>(Args[0]) && Op1MinSize == 15;
226 bool Op2Sext16 = isa<SExtInst>(Args[1]) && Op2MinSize == 15;
227
228 bool IsZeroExtended = !Op1Signed || !Op2Signed;
229 bool IsConstant = Op1Constant || Op2Constant;
230 bool IsSext16 = Op1Sext16 || Op2Sext16;
231 if (IsConstant || IsZeroExtended || IsSext16)
232 LT.second =
233 MVT::getVectorVT(MVT::i16, 2 * LT.second.getVectorNumElements());
234 }
235 }
236
237 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
238 ISD == ISD::UREM) &&
239 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
240 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
241 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
242 if (ISD == ISD::SDIV || ISD == ISD::SREM) {
243 // On X86, vector signed division by constants power-of-two are
244 // normally expanded to the sequence SRA + SRL + ADD + SRA.
245 // The OperandValue properties may not be the same as that of the previous
246 // operation; conservatively assume OP_None.
247 InstructionCost Cost =
248 2 * getArithmeticInstrCost(Instruction::AShr, Ty, CostKind, Op1Info,
249 Op2Info, TargetTransformInfo::OP_None,
250 TargetTransformInfo::OP_None);
251 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
252 Op2Info, TargetTransformInfo::OP_None,
253 TargetTransformInfo::OP_None);
254 Cost += getArithmeticInstrCost(Instruction::Add, Ty, CostKind, Op1Info,
255 Op2Info, TargetTransformInfo::OP_None,
256 TargetTransformInfo::OP_None);
257
258 if (ISD == ISD::SREM) {
259 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
260 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, CostKind, Op1Info,
261 Op2Info);
262 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, CostKind, Op1Info,
263 Op2Info);
264 }
265
266 return Cost;
267 }
268
269 // Vector unsigned division/remainder will be simplified to shifts/masks.
270 if (ISD == ISD::UDIV)
271 return getArithmeticInstrCost(Instruction::LShr, Ty, CostKind, Op1Info,
272 Op2Info, TargetTransformInfo::OP_None,
273 TargetTransformInfo::OP_None);
274 // UREM
275 return getArithmeticInstrCost(Instruction::And, Ty, CostKind, Op1Info,
276 Op2Info, TargetTransformInfo::OP_None,
277 TargetTransformInfo::OP_None);
278 }
279
280 static const CostTblEntry GLMCostTable[] = {
281 { ISD::FDIV, MVT::f32, 18 }, // divss
282 { ISD::FDIV, MVT::v4f32, 35 }, // divps
283 { ISD::FDIV, MVT::f64, 33 }, // divsd
284 { ISD::FDIV, MVT::v2f64, 65 }, // divpd
285 };
286
287 if (ST->useGLMDivSqrtCosts())
288 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
289 LT.second))
290 return LT.first * Entry->Cost;
291
292 static const CostTblEntry SLMCostTable[] = {
293 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
294 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
295 { ISD::FMUL, MVT::f64, 2 }, // mulsd
296 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
297 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
298 { ISD::FDIV, MVT::f32, 17 }, // divss
299 { ISD::FDIV, MVT::v4f32, 39 }, // divps
300 { ISD::FDIV, MVT::f64, 32 }, // divsd
301 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
302 { ISD::FADD, MVT::v2f64, 2 }, // addpd
303 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
304 // v2i64/v4i64 mul is custom lowered as a series of long:
305 // multiplies(3), shifts(3) and adds(2)
306 // slm muldq version throughput is 2 and addq throughput 4
307 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
308 // 3X4 (addq throughput) = 17
309 { ISD::MUL, MVT::v2i64, 17 },
310 // slm addq\subq throughput is 4
311 { ISD::ADD, MVT::v2i64, 4 },
312 { ISD::SUB, MVT::v2i64, 4 },
313 };
314
315 if (ST->isSLM()) {
316 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
317 // Check if the operands can be shrinked into a smaller datatype.
318 // TODO: Merge this into generiic vXi32 MUL patterns above.
319 bool Op1Signed = false;
320 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
321 bool Op2Signed = false;
322 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
323
324 bool SignedMode = Op1Signed || Op2Signed;
325 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
326
327 if (OpMinSize <= 7)
328 return LT.first * 3; // pmullw/sext
329 if (!SignedMode && OpMinSize <= 8)
330 return LT.first * 3; // pmullw/zext
331 if (OpMinSize <= 15)
332 return LT.first * 5; // pmullw/pmulhw/pshuf
333 if (!SignedMode && OpMinSize <= 16)
334 return LT.first * 5; // pmullw/pmulhw/pshuf
335 }
336
337 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
338 LT.second)) {
339 return LT.first * Entry->Cost;
340 }
341 }
342
343 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
344 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
345 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
346 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
347 };
348
349 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
350 ST->hasBWI()) {
351 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
352 LT.second))
353 return LT.first * Entry->Cost;
354 }
355
356 static const CostTblEntry AVX512UniformConstCostTable[] = {
357 { ISD::SRA, MVT::v2i64, 1 },
358 { ISD::SRA, MVT::v4i64, 1 },
359 { ISD::SRA, MVT::v8i64, 1 },
360
361 { ISD::SHL, MVT::v64i8, 4 }, // psllw + pand.
362 { ISD::SRL, MVT::v64i8, 4 }, // psrlw + pand.
363 { ISD::SRA, MVT::v64i8, 8 }, // psrlw, pand, pxor, psubb.
364
365 { ISD::SDIV, MVT::v16i32, 6 }, // pmuludq sequence
366 { ISD::SREM, MVT::v16i32, 8 }, // pmuludq+mul+sub sequence
367 { ISD::UDIV, MVT::v16i32, 5 }, // pmuludq sequence
368 { ISD::UREM, MVT::v16i32, 7 }, // pmuludq+mul+sub sequence
369 };
370
371 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
372 ST->hasAVX512()) {
373 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
374 LT.second))
375 return LT.first * Entry->Cost;
376 }
377
378 static const CostTblEntry AVX2UniformConstCostTable[] = {
379 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
380 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
381 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
382
383 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
384
385 { ISD::SDIV, MVT::v8i32, 6 }, // pmuludq sequence
386 { ISD::SREM, MVT::v8i32, 8 }, // pmuludq+mul+sub sequence
387 { ISD::UDIV, MVT::v8i32, 5 }, // pmuludq sequence
388 { ISD::UREM, MVT::v8i32, 7 }, // pmuludq+mul+sub sequence
389 };
390
391 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
392 ST->hasAVX2()) {
393 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
394 LT.second))
395 return LT.first * Entry->Cost;
396 }
397
398 static const CostTblEntry SSE2UniformConstCostTable[] = {
399 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
400 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
401 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
402
403 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split.
404 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split.
405 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
406
407 { ISD::SDIV, MVT::v8i32, 12+2 }, // 2*pmuludq sequence + split.
408 { ISD::SREM, MVT::v8i32, 16+2 }, // 2*pmuludq+mul+sub sequence + split.
409 { ISD::SDIV, MVT::v4i32, 6 }, // pmuludq sequence
410 { ISD::SREM, MVT::v4i32, 8 }, // pmuludq+mul+sub sequence
411 { ISD::UDIV, MVT::v8i32, 10+2 }, // 2*pmuludq sequence + split.
412 { ISD::UREM, MVT::v8i32, 14+2 }, // 2*pmuludq+mul+sub sequence + split.
413 { ISD::UDIV, MVT::v4i32, 5 }, // pmuludq sequence
414 { ISD::UREM, MVT::v4i32, 7 }, // pmuludq+mul+sub sequence
415 };
416
417 // XOP has faster vXi8 shifts.
418 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
419 ST->hasSSE2() && !ST->hasXOP()) {
420 if (const auto *Entry =
421 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
422 return LT.first * Entry->Cost;
423 }
424
425 static const CostTblEntry AVX512BWConstCostTable[] = {
426 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
427 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
428 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
429 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
430 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
431 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence
432 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
433 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence
434 };
435
436 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
437 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
438 ST->hasBWI()) {
439 if (const auto *Entry =
440 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
441 return LT.first * Entry->Cost;
442 }
443
444 static const CostTblEntry AVX512ConstCostTable[] = {
445 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
446 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
447 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
448 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
449 { ISD::SDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
450 { ISD::SREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
451 { ISD::UDIV, MVT::v64i8, 28 }, // 4*ext+4*pmulhw sequence
452 { ISD::UREM, MVT::v64i8, 32 }, // 4*ext+4*pmulhw+mul+sub sequence
453 { ISD::SDIV, MVT::v32i16, 12 }, // 2*vpmulhw sequence
454 { ISD::SREM, MVT::v32i16, 16 }, // 2*vpmulhw+mul+sub sequence
455 { ISD::UDIV, MVT::v32i16, 12 }, // 2*vpmulhuw sequence
456 { ISD::UREM, MVT::v32i16, 16 }, // 2*vpmulhuw+mul+sub sequence
457 };
458
459 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
460 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
461 ST->hasAVX512()) {
462 if (const auto *Entry =
463 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
464 return LT.first * Entry->Cost;
465 }
466
467 static const CostTblEntry AVX2ConstCostTable[] = {
468 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
469 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
470 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
471 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
472 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
473 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence
474 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
475 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence
476 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
477 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence
478 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
479 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence
480 };
481
482 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
483 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
484 ST->hasAVX2()) {
485 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
486 return LT.first * Entry->Cost;
487 }
488
489 static const CostTblEntry SSE2ConstCostTable[] = {
490 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
491 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
492 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
493 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
494 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
495 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
496 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
497 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
498 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
499 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
500 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
501 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence
502 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
503 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
504 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
505 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence
506 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split.
507 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
508 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
509 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence
510 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split.
511 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
512 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
513 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence
514 };
515
516 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
517 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
518 ST->hasSSE2()) {
519 // pmuldq sequence.
520 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
521 return LT.first * 32;
522 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
523 return LT.first * 38;
524 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
525 return LT.first * 15;
526 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
527 return LT.first * 20;
528
529 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
530 return LT.first * Entry->Cost;
531 }
532
533 static const CostTblEntry AVX512BWShiftCostTable[] = {
534 { ISD::SHL, MVT::v16i8, 4 }, // extend/vpsllvw/pack sequence.
535 { ISD::SRL, MVT::v16i8, 4 }, // extend/vpsrlvw/pack sequence.
536 { ISD::SRA, MVT::v16i8, 4 }, // extend/vpsravw/pack sequence.
537 { ISD::SHL, MVT::v32i8, 4 }, // extend/vpsllvw/pack sequence.
538 { ISD::SRL, MVT::v32i8, 4 }, // extend/vpsrlvw/pack sequence.
539 { ISD::SRA, MVT::v32i8, 6 }, // extend/vpsravw/pack sequence.
540 { ISD::SHL, MVT::v64i8, 6 }, // extend/vpsllvw/pack sequence.
541 { ISD::SRL, MVT::v64i8, 7 }, // extend/vpsrlvw/pack sequence.
542 { ISD::SRA, MVT::v64i8, 15 }, // extend/vpsravw/pack sequence.
543
544 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
545 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
546 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
547 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
548 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
549 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
550 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
551 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
552 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
553 };
554
555 if (ST->hasBWI())
556 if (const auto *Entry = CostTableLookup(AVX512BWShiftCostTable, ISD, LT.second))
557 return LT.first * Entry->Cost;
558
559 static const CostTblEntry AVX2UniformCostTable[] = {
560 // Uniform splats are cheaper for the following instructions.
561 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
562 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
563 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
564 { ISD::SHL, MVT::v32i16, 2 }, // 2*psllw.
565 { ISD::SRL, MVT::v32i16, 2 }, // 2*psrlw.
566 { ISD::SRA, MVT::v32i16, 2 }, // 2*psraw.
567
568 { ISD::SHL, MVT::v8i32, 1 }, // pslld
569 { ISD::SRL, MVT::v8i32, 1 }, // psrld
570 { ISD::SRA, MVT::v8i32, 1 }, // psrad
571 { ISD::SHL, MVT::v4i64, 1 }, // psllq
572 { ISD::SRL, MVT::v4i64, 1 }, // psrlq
573 };
574
575 if (ST->hasAVX2() &&
576 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
577 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
578 if (const auto *Entry =
579 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
580 return LT.first * Entry->Cost;
581 }
582
583 static const CostTblEntry SSE2UniformCostTable[] = {
584 // Uniform splats are cheaper for the following instructions.
585 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
586 { ISD::SHL, MVT::v4i32, 1 }, // pslld
587 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
588
589 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
590 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
591 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
592
593 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
594 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
595 };
596
597 if (ST->hasSSE2() &&
598 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
599 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
600 if (const auto *Entry =
601 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
602 return LT.first * Entry->Cost;
603 }
604
605 static const CostTblEntry AVX512DQCostTable[] = {
606 { ISD::MUL, MVT::v2i64, 2 }, // pmullq
607 { ISD::MUL, MVT::v4i64, 2 }, // pmullq
608 { ISD::MUL, MVT::v8i64, 2 } // pmullq
609 };
610
611 // Look for AVX512DQ lowering tricks for custom cases.
612 if (ST->hasDQI())
613 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
614 return LT.first * Entry->Cost;
615
616 static const CostTblEntry AVX512BWCostTable[] = {
617 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
618 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
619 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
620 };
621
622 // Look for AVX512BW lowering tricks for custom cases.
623 if (ST->hasBWI())
624 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
625 return LT.first * Entry->Cost;
626
627 static const CostTblEntry AVX512CostTable[] = {
628 { ISD::SHL, MVT::v4i32, 1 },
629 { ISD::SRL, MVT::v4i32, 1 },
630 { ISD::SRA, MVT::v4i32, 1 },
631 { ISD::SHL, MVT::v8i32, 1 },
632 { ISD::SRL, MVT::v8i32, 1 },
633 { ISD::SRA, MVT::v8i32, 1 },
634 { ISD::SHL, MVT::v16i32, 1 },
635 { ISD::SRL, MVT::v16i32, 1 },
636 { ISD::SRA, MVT::v16i32, 1 },
637
638 { ISD::SHL, MVT::v2i64, 1 },
639 { ISD::SRL, MVT::v2i64, 1 },
640 { ISD::SHL, MVT::v4i64, 1 },
641 { ISD::SRL, MVT::v4i64, 1 },
642 { ISD::SHL, MVT::v8i64, 1 },
643 { ISD::SRL, MVT::v8i64, 1 },
644
645 { ISD::SRA, MVT::v2i64, 1 },
646 { ISD::SRA, MVT::v4i64, 1 },
647 { ISD::SRA, MVT::v8i64, 1 },
648
649 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org)
650 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org)
651 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org)
652 { ISD::MUL, MVT::v8i64, 6 }, // 3*pmuludq/3*shift/2*add
653
654 { ISD::FNEG, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
655 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
656 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
657 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
658 { ISD::FDIV, MVT::f64, 4 }, // Skylake from http://www.agner.org/
659 { ISD::FDIV, MVT::v2f64, 4 }, // Skylake from http://www.agner.org/
660 { ISD::FDIV, MVT::v4f64, 8 }, // Skylake from http://www.agner.org/
661 { ISD::FDIV, MVT::v8f64, 16 }, // Skylake from http://www.agner.org/
662
663 { ISD::FNEG, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
664 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
665 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
666 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
667 { ISD::FDIV, MVT::f32, 3 }, // Skylake from http://www.agner.org/
668 { ISD::FDIV, MVT::v4f32, 3 }, // Skylake from http://www.agner.org/
669 { ISD::FDIV, MVT::v8f32, 5 }, // Skylake from http://www.agner.org/
670 { ISD::FDIV, MVT::v16f32, 10 }, // Skylake from http://www.agner.org/
671 };
672
673 if (ST->hasAVX512())
674 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
675 return LT.first * Entry->Cost;
676
677 static const CostTblEntry AVX2ShiftCostTable[] = {
678 // Shifts on vXi64/vXi32 on AVX2 is legal even though we declare to
679 // customize them to detect the cases where shift amount is a scalar one.
680 { ISD::SHL, MVT::v4i32, 2 }, // vpsllvd (Haswell from agner.org)
681 { ISD::SRL, MVT::v4i32, 2 }, // vpsrlvd (Haswell from agner.org)
682 { ISD::SRA, MVT::v4i32, 2 }, // vpsravd (Haswell from agner.org)
683 { ISD::SHL, MVT::v8i32, 2 }, // vpsllvd (Haswell from agner.org)
684 { ISD::SRL, MVT::v8i32, 2 }, // vpsrlvd (Haswell from agner.org)
685 { ISD::SRA, MVT::v8i32, 2 }, // vpsravd (Haswell from agner.org)
686 { ISD::SHL, MVT::v2i64, 1 }, // vpsllvq (Haswell from agner.org)
687 { ISD::SRL, MVT::v2i64, 1 }, // vpsrlvq (Haswell from agner.org)
688 { ISD::SHL, MVT::v4i64, 1 }, // vpsllvq (Haswell from agner.org)
689 { ISD::SRL, MVT::v4i64, 1 }, // vpsrlvq (Haswell from agner.org)
690 };
691
692 if (ST->hasAVX512()) {
693 if (ISD == ISD::SHL && LT.second == MVT::v32i16 &&
694 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
695 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
696 // On AVX512, a packed v32i16 shift left by a constant build_vector
697 // is lowered into a vector multiply (vpmullw).
698 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
699 Op1Info, Op2Info,
700 TargetTransformInfo::OP_None,
701 TargetTransformInfo::OP_None);
702 }
703
704 // Look for AVX2 lowering tricks (XOP is always better at v4i32 shifts).
705 if (ST->hasAVX2() && !(ST->hasXOP() && LT.second == MVT::v4i32)) {
706 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
707 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
708 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
709 // On AVX2, a packed v16i16 shift left by a constant build_vector
710 // is lowered into a vector multiply (vpmullw).
711 return getArithmeticInstrCost(Instruction::Mul, Ty, CostKind,
712 Op1Info, Op2Info,
713 TargetTransformInfo::OP_None,
714 TargetTransformInfo::OP_None);
715
716 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
717 return LT.first * Entry->Cost;
718 }
719
720 static const CostTblEntry XOPShiftCostTable[] = {
721 // 128bit shifts take 1cy, but right shifts require negation beforehand.
722 { ISD::SHL, MVT::v16i8, 1 },
723 { ISD::SRL, MVT::v16i8, 2 },
724 { ISD::SRA, MVT::v16i8, 2 },
725 { ISD::SHL, MVT::v8i16, 1 },
726 { ISD::SRL, MVT::v8i16, 2 },
727 { ISD::SRA, MVT::v8i16, 2 },
728 { ISD::SHL, MVT::v4i32, 1 },
729 { ISD::SRL, MVT::v4i32, 2 },
730 { ISD::SRA, MVT::v4i32, 2 },
731 { ISD::SHL, MVT::v2i64, 1 },
732 { ISD::SRL, MVT::v2i64, 2 },
733 { ISD::SRA, MVT::v2i64, 2 },
734 // 256bit shifts require splitting if AVX2 didn't catch them above.
735 { ISD::SHL, MVT::v32i8, 2+2 },
736 { ISD::SRL, MVT::v32i8, 4+2 },
737 { ISD::SRA, MVT::v32i8, 4+2 },
738 { ISD::SHL, MVT::v16i16, 2+2 },
739 { ISD::SRL, MVT::v16i16, 4+2 },
740 { ISD::SRA, MVT::v16i16, 4+2 },
741 { ISD::SHL, MVT::v8i32, 2+2 },
742 { ISD::SRL, MVT::v8i32, 4+2 },
743 { ISD::SRA, MVT::v8i32, 4+2 },
744 { ISD::SHL, MVT::v4i64, 2+2 },
745 { ISD::SRL, MVT::v4i64, 4+2 },
746 { ISD::SRA, MVT::v4i64, 4+2 },
747 };
748
749 // Look for XOP lowering tricks.
750 if (ST->hasXOP()) {
751 // If the right shift is constant then we'll fold the negation so
752 // it's as cheap as a left shift.
753 int ShiftISD = ISD;
754 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
755 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
756 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
757 ShiftISD = ISD::SHL;
758 if (const auto *Entry =
759 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
760 return LT.first * Entry->Cost;
761 }
762
763 static const CostTblEntry SSE2UniformShiftCostTable[] = {
764 // Uniform splats are cheaper for the following instructions.
765 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split.
766 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split.
767 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split.
768
769 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split.
770 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split.
771 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split.
772
773 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split.
774 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split.
775 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle.
776 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split.
777 };
778
779 if (ST->hasSSE2() &&
780 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
781 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
782
783 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
784 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
785 return LT.first * 4; // 2*psrad + shuffle.
786
787 if (const auto *Entry =
788 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
789 return LT.first * Entry->Cost;
790 }
791
792 if (ISD == ISD::SHL &&
793 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
794 MVT VT = LT.second;
795 // Vector shift left by non uniform constant can be lowered
796 // into vector multiply.
797 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
798 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
799 ISD = ISD::MUL;
800 }
801
802 static const CostTblEntry AVX2CostTable[] = {
803 { ISD::SHL, MVT::v16i8, 6 }, // vpblendvb sequence.
804 { ISD::SHL, MVT::v32i8, 6 }, // vpblendvb sequence.
805 { ISD::SHL, MVT::v64i8, 12 }, // 2*vpblendvb sequence.
806 { ISD::SHL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence.
807 { ISD::SHL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence.
808 { ISD::SHL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence.
809
810 { ISD::SRL, MVT::v16i8, 6 }, // vpblendvb sequence.
811 { ISD::SRL, MVT::v32i8, 6 }, // vpblendvb sequence.
812 { ISD::SRL, MVT::v64i8, 12 }, // 2*vpblendvb sequence.
813 { ISD::SRL, MVT::v8i16, 5 }, // extend/vpsrlvd/pack sequence.
814 { ISD::SRL, MVT::v16i16, 7 }, // extend/vpsrlvd/pack sequence.
815 { ISD::SRL, MVT::v32i16, 14 }, // 2*extend/vpsrlvd/pack sequence.
816
817 { ISD::SRA, MVT::v16i8, 17 }, // vpblendvb sequence.
818 { ISD::SRA, MVT::v32i8, 17 }, // vpblendvb sequence.
819 { ISD::SRA, MVT::v64i8, 34 }, // 2*vpblendvb sequence.
820 { ISD::SRA, MVT::v8i16, 5 }, // extend/vpsravd/pack sequence.
821 { ISD::SRA, MVT::v16i16, 7 }, // extend/vpsravd/pack sequence.
822 { ISD::SRA, MVT::v32i16, 14 }, // 2*extend/vpsravd/pack sequence.
823 { ISD::SRA, MVT::v2i64, 2 }, // srl/xor/sub sequence.
824 { ISD::SRA, MVT::v4i64, 2 }, // srl/xor/sub sequence.
825
826 { ISD::SUB, MVT::v32i8, 1 }, // psubb
827 { ISD::ADD, MVT::v32i8, 1 }, // paddb
828 { ISD::SUB, MVT::v16i16, 1 }, // psubw
829 { ISD::ADD, MVT::v16i16, 1 }, // paddw
830 { ISD::SUB, MVT::v8i32, 1 }, // psubd
831 { ISD::ADD, MVT::v8i32, 1 }, // paddd
832 { ISD::SUB, MVT::v4i64, 1 }, // psubq
833 { ISD::ADD, MVT::v4i64, 1 }, // paddq
834
835 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
836 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org)
837 { ISD::MUL, MVT::v4i64, 6 }, // 3*pmuludq/3*shift/2*add
838
839 { ISD::FNEG, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
840 { ISD::FNEG, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
841 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
842 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
843 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
844 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
845 { ISD::FMUL, MVT::f64, 1 }, // Haswell from http://www.agner.org/
846 { ISD::FMUL, MVT::v2f64, 1 }, // Haswell from http://www.agner.org/
847 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
848 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
849
850 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
851 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
852 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
853 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
854 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
855 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
856 };
857
858 // Look for AVX2 lowering tricks for custom cases.
859 if (ST->hasAVX2())
860 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
861 return LT.first * Entry->Cost;
862
863 static const CostTblEntry AVX1CostTable[] = {
864 // We don't have to scalarize unsupported ops. We can issue two half-sized
865 // operations and we only need to extract the upper YMM half.
866 // Two ops + 1 extract + 1 insert = 4.
867 { ISD::MUL, MVT::v16i16, 4 },
868 { ISD::MUL, MVT::v8i32, 5 }, // BTVER2 from http://www.agner.org/
869 { ISD::MUL, MVT::v4i64, 12 },
870
871 { ISD::SUB, MVT::v32i8, 4 },
872 { ISD::ADD, MVT::v32i8, 4 },
873 { ISD::SUB, MVT::v16i16, 4 },
874 { ISD::ADD, MVT::v16i16, 4 },
875 { ISD::SUB, MVT::v8i32, 4 },
876 { ISD::ADD, MVT::v8i32, 4 },
877 { ISD::SUB, MVT::v4i64, 4 },
878 { ISD::ADD, MVT::v4i64, 4 },
879
880 { ISD::SHL, MVT::v32i8, 22 }, // pblendvb sequence + split.
881 { ISD::SHL, MVT::v8i16, 6 }, // pblendvb sequence.
882 { ISD::SHL, MVT::v16i16, 13 }, // pblendvb sequence + split.
883 { ISD::SHL, MVT::v4i32, 3 }, // pslld/paddd/cvttps2dq/pmulld
884 { ISD::SHL, MVT::v8i32, 9 }, // pslld/paddd/cvttps2dq/pmulld + split
885 { ISD::SHL, MVT::v2i64, 2 }, // Shift each lane + blend.
886 { ISD::SHL, MVT::v4i64, 6 }, // Shift each lane + blend + split.
887
888 { ISD::SRL, MVT::v32i8, 23 }, // pblendvb sequence + split.
889 { ISD::SRL, MVT::v16i16, 28 }, // pblendvb sequence + split.
890 { ISD::SRL, MVT::v4i32, 6 }, // Shift each lane + blend.
891 { ISD::SRL, MVT::v8i32, 14 }, // Shift each lane + blend + split.
892 { ISD::SRL, MVT::v2i64, 2 }, // Shift each lane + blend.
893 { ISD::SRL, MVT::v4i64, 6 }, // Shift each lane + blend + split.
894
895 { ISD::SRA, MVT::v32i8, 44 }, // pblendvb sequence + split.
896 { ISD::SRA, MVT::v16i16, 28 }, // pblendvb sequence + split.
897 { ISD::SRA, MVT::v4i32, 6 }, // Shift each lane + blend.
898 { ISD::SRA, MVT::v8i32, 14 }, // Shift each lane + blend + split.
899 { ISD::SRA, MVT::v2i64, 5 }, // Shift each lane + blend.
900 { ISD::SRA, MVT::v4i64, 12 }, // Shift each lane + blend + split.
901
902 { ISD::FNEG, MVT::v4f64, 2 }, // BTVER2 from http://www.agner.org/
903 { ISD::FNEG, MVT::v8f32, 2 }, // BTVER2 from http://www.agner.org/
904
905 { ISD::FMUL, MVT::f64, 2 }, // BTVER2 from http://www.agner.org/
906 { ISD::FMUL, MVT::v2f64, 2 }, // BTVER2 from http://www.agner.org/
907 { ISD::FMUL, MVT::v4f64, 4 }, // BTVER2 from http://www.agner.org/
908
909 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
910 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
911 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
912 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
913 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
914 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
915 };
916
917 if (ST->hasAVX())
918 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
919 return LT.first * Entry->Cost;
920
921 static const CostTblEntry SSE42CostTable[] = {
922 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
923 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
924 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
925 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
926
927 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
928 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/
929 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
930 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
931
932 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
933 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
934 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
935 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
936
937 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
938 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
939 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
940 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
941
942 { ISD::MUL, MVT::v2i64, 6 } // 3*pmuludq/3*shift/2*add
943 };
944
945 if (ST->hasSSE42())
946 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
947 return LT.first * Entry->Cost;
948
949 static const CostTblEntry SSE41CostTable[] = {
950 { ISD::SHL, MVT::v16i8, 10 }, // pblendvb sequence.
951 { ISD::SHL, MVT::v8i16, 11 }, // pblendvb sequence.
952 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
953
954 { ISD::SRL, MVT::v16i8, 11 }, // pblendvb sequence.
955 { ISD::SRL, MVT::v8i16, 13 }, // pblendvb sequence.
956 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
957
958 { ISD::SRA, MVT::v16i8, 21 }, // pblendvb sequence.
959 { ISD::SRA, MVT::v8i16, 13 }, // pblendvb sequence.
960
961 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org)
962 };
963
964 if (ST->hasSSE41())
965 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
966 return LT.first * Entry->Cost;
967
968 static const CostTblEntry SSE2CostTable[] = {
969 // We don't correctly identify costs of casts because they are marked as
970 // custom.
971 { ISD::SHL, MVT::v16i8, 13 }, // cmpgtb sequence.
972 { ISD::SHL, MVT::v8i16, 25 }, // cmpgtw sequence.
973 { ISD::SHL, MVT::v4i32, 16 }, // pslld/paddd/cvttps2dq/pmuludq.
974 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
975
976 { ISD::SRL, MVT::v16i8, 14 }, // cmpgtb sequence.
977 { ISD::SRL, MVT::v8i16, 16 }, // cmpgtw sequence.
978 { ISD::SRL, MVT::v4i32, 12 }, // Shift each lane + blend.
979 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
980
981 { ISD::SRA, MVT::v16i8, 27 }, // unpacked cmpgtb sequence.
982 { ISD::SRA, MVT::v8i16, 16 }, // cmpgtw sequence.
983 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
984 { ISD::SRA, MVT::v2i64, 8 }, // srl/xor/sub splat+shuffle sequence.
985
986 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
987 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
988 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
989
990 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
991 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
992 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
993 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
994
995 { ISD::FNEG, MVT::f32, 1 }, // Pentium IV from http://www.agner.org/
996 { ISD::FNEG, MVT::f64, 1 }, // Pentium IV from http://www.agner.org/
997 { ISD::FNEG, MVT::v4f32, 1 }, // Pentium IV from http://www.agner.org/
998 { ISD::FNEG, MVT::v2f64, 1 }, // Pentium IV from http://www.agner.org/
999
1000 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
1001 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
1002
1003 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
1004 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
1005 };
1006
1007 if (ST->hasSSE2())
1008 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
1009 return LT.first * Entry->Cost;
1010
1011 static const CostTblEntry SSE1CostTable[] = {
1012 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
1013 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
1014
1015 { ISD::FNEG, MVT::f32, 2 }, // Pentium III from http://www.agner.org/
1016 { ISD::FNEG, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
1017
1018 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
1019 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
1020
1021 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
1022 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
1023 };
1024
1025 if (ST->hasSSE1())
1026 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
1027 return LT.first * Entry->Cost;
1028
1029 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
1030 { ISD::ADD, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/
1031 { ISD::SUB, MVT::i64, 1 }, // Core (Merom) from http://www.agner.org/
1032 { ISD::MUL, MVT::i64, 2 }, // Nehalem from http://www.agner.org/
1033 };
1034
1035 if (ST->is64Bit())
1036 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, LT.second))
1037 return LT.first * Entry->Cost;
1038
1039 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
1040 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
1041 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
1042 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
1043
1044 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
1045 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
1046 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
1047 };
1048
1049 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, LT.second))
1050 return LT.first * Entry->Cost;
1051
1052 // It is not a good idea to vectorize division. We have to scalarize it and
1053 // in the process we will often end up having to spilling regular
1054 // registers. The overhead of division is going to dominate most kernels
1055 // anyways so try hard to prevent vectorization of division - it is
1056 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
1057 // to hide "20 cycles" for each lane.
1058 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
1059 ISD == ISD::UDIV || ISD == ISD::UREM)) {
1060 InstructionCost ScalarCost = getArithmeticInstrCost(
1061 Opcode, Ty->getScalarType(), CostKind, Op1Info, Op2Info,
1062 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
1063 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
1064 }
1065
1066 // Fallback to the default implementation.
1067 return BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Op1Info, Op2Info);
1068}
1069
1070InstructionCost X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
1071 VectorType *BaseTp,
1072 ArrayRef<int> Mask, int Index,
1073 VectorType *SubTp) {
1074 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
1075 // 64-bit packed integer vectors (v2i32) are widened to type v4i32.
1076 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, BaseTp);
1077
1078 Kind = improveShuffleKindFromMask(Kind, Mask);
1079 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
1080 if (Kind == TTI::SK_Transpose)
1081 Kind = TTI::SK_PermuteTwoSrc;
1082
1083 // For Broadcasts we are splatting the first element from the first input
1084 // register, so only need to reference that input and all the output
1085 // registers are the same.
1086 if (Kind == TTI::SK_Broadcast)
1087 LT.first = 1;
1088
1089 // Subvector extractions are free if they start at the beginning of a
1090 // vector and cheap if the subvectors are aligned.
1091 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
1092 int NumElts = LT.second.getVectorNumElements();
1093 if ((Index % NumElts) == 0)
1094 return 0;
1095 std::pair<InstructionCost, MVT> SubLT =
1096 TLI->getTypeLegalizationCost(DL, SubTp);
1097 if (SubLT.second.isVector()) {
1098 int NumSubElts = SubLT.second.getVectorNumElements();
1099 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1100 return SubLT.first;
1101 // Handle some cases for widening legalization. For now we only handle
1102 // cases where the original subvector was naturally aligned and evenly
1103 // fit in its legalized subvector type.
1104 // FIXME: Remove some of the alignment restrictions.
1105 // FIXME: We can use permq for 64-bit or larger extracts from 256-bit
1106 // vectors.
1107 int OrigSubElts = cast<FixedVectorType>(SubTp)->getNumElements();
1108 if (NumSubElts > OrigSubElts && (Index % OrigSubElts) == 0 &&
1109 (NumSubElts % OrigSubElts) == 0 &&
1110 LT.second.getVectorElementType() ==
1111 SubLT.second.getVectorElementType() &&
1112 LT.second.getVectorElementType().getSizeInBits() ==
1113 BaseTp->getElementType()->getPrimitiveSizeInBits()) {
1114 assert(NumElts >= NumSubElts && NumElts > OrigSubElts &&(static_cast <bool> (NumElts >= NumSubElts &&
NumElts > OrigSubElts && "Unexpected number of elements!"
) ? void (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1115, __extension__ __PRETTY_FUNCTION__))
1115 "Unexpected number of elements!")(static_cast <bool> (NumElts >= NumSubElts &&
NumElts > OrigSubElts && "Unexpected number of elements!"
) ? void (0) : __assert_fail ("NumElts >= NumSubElts && NumElts > OrigSubElts && \"Unexpected number of elements!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1115, __extension__ __PRETTY_FUNCTION__))
;
1116 auto *VecTy = FixedVectorType::get(BaseTp->getElementType(),
1117 LT.second.getVectorNumElements());
1118 auto *SubTy = FixedVectorType::get(BaseTp->getElementType(),
1119 SubLT.second.getVectorNumElements());
1120 int ExtractIndex = alignDown((Index % NumElts), NumSubElts);
1121 InstructionCost ExtractCost = getShuffleCost(
1122 TTI::SK_ExtractSubvector, VecTy, None, ExtractIndex, SubTy);
1123
1124 // If the original size is 32-bits or more, we can use pshufd. Otherwise
1125 // if we have SSSE3 we can use pshufb.
1126 if (SubTp->getPrimitiveSizeInBits() >= 32 || ST->hasSSSE3())
1127 return ExtractCost + 1; // pshufd or pshufb
1128
1129 assert(SubTp->getPrimitiveSizeInBits() == 16 &&(static_cast <bool> (SubTp->getPrimitiveSizeInBits()
== 16 && "Unexpected vector size") ? void (0) : __assert_fail
("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1130, __extension__ __PRETTY_FUNCTION__))
1130 "Unexpected vector size")(static_cast <bool> (SubTp->getPrimitiveSizeInBits()
== 16 && "Unexpected vector size") ? void (0) : __assert_fail
("SubTp->getPrimitiveSizeInBits() == 16 && \"Unexpected vector size\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1130, __extension__ __PRETTY_FUNCTION__))
;
1131
1132 return ExtractCost + 2; // worst case pshufhw + pshufd
1133 }
1134 }
1135 }
1136
1137 // Subvector insertions are cheap if the subvectors are aligned.
1138 // Note that in general, the insertion starting at the beginning of a vector
1139 // isn't free, because we need to preserve the rest of the wide vector.
1140 if (Kind == TTI::SK_InsertSubvector && LT.second.isVector()) {
1141 int NumElts = LT.second.getVectorNumElements();
1142 std::pair<InstructionCost, MVT> SubLT =
1143 TLI->getTypeLegalizationCost(DL, SubTp);
1144 if (SubLT.second.isVector()) {
1145 int NumSubElts = SubLT.second.getVectorNumElements();
1146 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
1147 return SubLT.first;
1148 }
1149
1150 // If the insertion isn't aligned, treat it like a 2-op shuffle.
1151 Kind = TTI::SK_PermuteTwoSrc;
1152 }
1153
1154 // Handle some common (illegal) sub-vector types as they are often very cheap
1155 // to shuffle even on targets without PSHUFB.
1156 EVT VT = TLI->getValueType(DL, BaseTp);
1157 if (VT.isSimple() && VT.isVector() && VT.getSizeInBits() < 128 &&
1158 !ST->hasSSSE3()) {
1159 static const CostTblEntry SSE2SubVectorShuffleTbl[] = {
1160 {TTI::SK_Broadcast, MVT::v4i16, 1}, // pshuflw
1161 {TTI::SK_Broadcast, MVT::v2i16, 1}, // pshuflw
1162 {TTI::SK_Broadcast, MVT::v8i8, 2}, // punpck/pshuflw
1163 {TTI::SK_Broadcast, MVT::v4i8, 2}, // punpck/pshuflw
1164 {TTI::SK_Broadcast, MVT::v2i8, 1}, // punpck
1165
1166 {TTI::SK_Reverse, MVT::v4i16, 1}, // pshuflw
1167 {TTI::SK_Reverse, MVT::v2i16, 1}, // pshuflw
1168 {TTI::SK_Reverse, MVT::v4i8, 3}, // punpck/pshuflw/packus
1169 {TTI::SK_Reverse, MVT::v2i8, 1}, // punpck
1170
1171 {TTI::SK_PermuteTwoSrc, MVT::v4i16, 2}, // punpck/pshuflw
1172 {TTI::SK_PermuteTwoSrc, MVT::v2i16, 2}, // punpck/pshuflw
1173 {TTI::SK_PermuteTwoSrc, MVT::v8i8, 7}, // punpck/pshuflw
1174 {TTI::SK_PermuteTwoSrc, MVT::v4i8, 4}, // punpck/pshuflw
1175 {TTI::SK_PermuteTwoSrc, MVT::v2i8, 2}, // punpck
1176
1177 {TTI::SK_PermuteSingleSrc, MVT::v4i16, 1}, // pshuflw
1178 {TTI::SK_PermuteSingleSrc, MVT::v2i16, 1}, // pshuflw
1179 {TTI::SK_PermuteSingleSrc, MVT::v8i8, 5}, // punpck/pshuflw
1180 {TTI::SK_PermuteSingleSrc, MVT::v4i8, 3}, // punpck/pshuflw
1181 {TTI::SK_PermuteSingleSrc, MVT::v2i8, 1}, // punpck
1182 };
1183
1184 if (ST->hasSSE2())
1185 if (const auto *Entry =
1186 CostTableLookup(SSE2SubVectorShuffleTbl, Kind, VT.getSimpleVT()))
1187 return Entry->Cost;
1188 }
1189
1190 // We are going to permute multiple sources and the result will be in multiple
1191 // destinations. Providing an accurate cost only for splits where the element
1192 // type remains the same.
1193 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
1194 MVT LegalVT = LT.second;
1195 if (LegalVT.isVector() &&
1196 LegalVT.getVectorElementType().getSizeInBits() ==
1197 BaseTp->getElementType()->getPrimitiveSizeInBits() &&
1198 LegalVT.getVectorNumElements() <
1199 cast<FixedVectorType>(BaseTp)->getNumElements()) {
1200
1201 unsigned VecTySize = DL.getTypeStoreSize(BaseTp);
1202 unsigned LegalVTSize = LegalVT.getStoreSize();
1203 // Number of source vectors after legalization:
1204 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
1205 // Number of destination vectors after legalization:
1206 InstructionCost NumOfDests = LT.first;
1207
1208 auto *SingleOpTy = FixedVectorType::get(BaseTp->getElementType(),
1209 LegalVT.getVectorNumElements());
1210
1211 InstructionCost NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
1212 return NumOfShuffles * getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy,
1213 None, 0, nullptr);
1214 }
1215
1216 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1217 }
1218
1219 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
1220 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
1221 // We assume that source and destination have the same vector type.
1222 InstructionCost NumOfDests = LT.first;
1223 InstructionCost NumOfShufflesPerDest = LT.first * 2 - 1;
1224 LT.first = NumOfDests * NumOfShufflesPerDest;
1225 }
1226
1227 static const CostTblEntry AVX512FP16ShuffleTbl[] = {
1228 {TTI::SK_Broadcast, MVT::v32f16, 1}, // vpbroadcastw
1229 {TTI::SK_Broadcast, MVT::v16f16, 1}, // vpbroadcastw
1230 {TTI::SK_Broadcast, MVT::v8f16, 1}, // vpbroadcastw
1231
1232 {TTI::SK_Reverse, MVT::v32f16, 2}, // vpermw
1233 {TTI::SK_Reverse, MVT::v16f16, 2}, // vpermw
1234 {TTI::SK_Reverse, MVT::v8f16, 1}, // vpshufb
1235
1236 {TTI::SK_PermuteSingleSrc, MVT::v32f16, 2}, // vpermw
1237 {TTI::SK_PermuteSingleSrc, MVT::v16f16, 2}, // vpermw
1238 {TTI::SK_PermuteSingleSrc, MVT::v8f16, 1}, // vpshufb
1239
1240 {TTI::SK_PermuteTwoSrc, MVT::v32f16, 2}, // vpermt2w
1241 {TTI::SK_PermuteTwoSrc, MVT::v16f16, 2}, // vpermt2w
1242 {TTI::SK_PermuteTwoSrc, MVT::v8f16, 2} // vpermt2w
1243 };
1244
1245 if (!ST->useSoftFloat() && ST->hasFP16())
1246 if (const auto *Entry =
1247 CostTableLookup(AVX512FP16ShuffleTbl, Kind, LT.second))
1248 return LT.first * Entry->Cost;
1249
1250 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
1251 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
1252 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
1253
1254 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
1255 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
1256
1257 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 2}, // vpermt2b
1258 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 2}, // vpermt2b
1259 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 2} // vpermt2b
1260 };
1261
1262 if (ST->hasVBMI())
1263 if (const auto *Entry =
1264 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
1265 return LT.first * Entry->Cost;
1266
1267 static const CostTblEntry AVX512BWShuffleTbl[] = {
1268 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1269 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1270
1271 {TTI::SK_Reverse, MVT::v32i16, 2}, // vpermw
1272 {TTI::SK_Reverse, MVT::v16i16, 2}, // vpermw
1273 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
1274
1275 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 2}, // vpermw
1276 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 2}, // vpermw
1277 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
1278
1279 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 2}, // vpermt2w
1280 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 2}, // vpermt2w
1281 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 2}, // vpermt2w
1282 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
1283
1284 {TTI::SK_Select, MVT::v32i16, 1}, // vblendmw
1285 {TTI::SK_Select, MVT::v64i8, 1}, // vblendmb
1286 };
1287
1288 if (ST->hasBWI())
1289 if (const auto *Entry =
1290 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
1291 return LT.first * Entry->Cost;
1292
1293 static const CostTblEntry AVX512ShuffleTbl[] = {
1294 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd
1295 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1296 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq
1297 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1298 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
1299 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
1300
1301 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd
1302 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1303 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq
1304 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1305 {TTI::SK_Reverse, MVT::v32i16, 7}, // per mca
1306 {TTI::SK_Reverse, MVT::v64i8, 7}, // per mca
1307
1308 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd
1309 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1310 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd
1311 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1312 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1313 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps
1314 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq
1315 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1316 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq
1317 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1318 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1319 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd
1320 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1321
1322 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd
1323 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1324 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q
1325 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1326 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd
1327 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps
1328 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q
1329 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d
1330 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd
1331 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps
1332 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q
1333 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1}, // vpermt2d
1334
1335 // FIXME: This just applies the type legalization cost rules above
1336 // assuming these completely split.
1337 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 14},
1338 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 14},
1339 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 42},
1340 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 42},
1341
1342 {TTI::SK_Select, MVT::v32i16, 1}, // vpternlogq
1343 {TTI::SK_Select, MVT::v64i8, 1}, // vpternlogq
1344 {TTI::SK_Select, MVT::v8f64, 1}, // vblendmpd
1345 {TTI::SK_Select, MVT::v16f32, 1}, // vblendmps
1346 {TTI::SK_Select, MVT::v8i64, 1}, // vblendmq
1347 {TTI::SK_Select, MVT::v16i32, 1}, // vblendmd
1348 };
1349
1350 if (ST->hasAVX512())
1351 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1352 return LT.first * Entry->Cost;
1353
1354 static const CostTblEntry AVX2ShuffleTbl[] = {
1355 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1356 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1357 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1358 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1359 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1360 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1361
1362 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1363 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1364 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1365 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1366 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1367 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1368
1369 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1370 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1371
1372 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1373 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1374 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1375 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1376 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1377 // + vpblendvb
1378 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1379 // + vpblendvb
1380
1381 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1382 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1383 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1384 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1385 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1386 // + vpblendvb
1387 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1388 // + vpblendvb
1389 };
1390
1391 if (ST->hasAVX2())
1392 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1393 return LT.first * Entry->Cost;
1394
1395 static const CostTblEntry XOPShuffleTbl[] = {
1396 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1397 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1398 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1399 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1400 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1401 // + vinsertf128
1402 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1403 // + vinsertf128
1404
1405 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1406 // + vinsertf128
1407 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1408 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1409 // + vinsertf128
1410 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1411 };
1412
1413 if (ST->hasXOP())
1414 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1415 return LT.first * Entry->Cost;
1416
1417 static const CostTblEntry AVX1ShuffleTbl[] = {
1418 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1419 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1420 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1421 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1422 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1423 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1424
1425 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1426 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1427 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1428 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1429 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1430 // + vinsertf128
1431 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1432 // + vinsertf128
1433
1434 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1435 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1436 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1437 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1438 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1439 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1440
1441 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1442 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1443 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1444 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1445 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1446 // + 2*por + vinsertf128
1447 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1448 // + 2*por + vinsertf128
1449
1450 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1451 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1452 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1453 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1454 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1455 // + 4*por + vinsertf128
1456 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1457 // + 4*por + vinsertf128
1458 };
1459
1460 if (ST->hasAVX())
1461 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1462 return LT.first * Entry->Cost;
1463
1464 static const CostTblEntry SSE41ShuffleTbl[] = {
1465 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1466 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1467 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1468 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1469 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1470 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1471 };
1472
1473 if (ST->hasSSE41())
1474 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1475 return LT.first * Entry->Cost;
1476
1477 static const CostTblEntry SSSE3ShuffleTbl[] = {
1478 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1479 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1480
1481 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1482 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1483
1484 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1485 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1486
1487 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1488 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1489
1490 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1491 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1492 };
1493
1494 if (ST->hasSSSE3())
1495 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1496 return LT.first * Entry->Cost;
1497
1498 static const CostTblEntry SSE2ShuffleTbl[] = {
1499 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1500 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1501 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1502 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1503 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1504
1505 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1506 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1507 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1508 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1509 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1510 // + 2*pshufd + 2*unpck + packus
1511
1512 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1513 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1514 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1515 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1516 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1517
1518 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1519 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1520 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1521 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1522 // + pshufd/unpck
1523 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1524 // + 2*pshufd + 2*unpck + 2*packus
1525
1526 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
1527 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
1528 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
1529 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
1530 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
1531 };
1532
1533 if (ST->hasSSE2())
1534 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1535 return LT.first * Entry->Cost;
1536
1537 static const CostTblEntry SSE1ShuffleTbl[] = {
1538 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
1539 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
1540 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
1541 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1542 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
1543 };
1544
1545 if (ST->hasSSE1())
1546 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1547 return LT.first * Entry->Cost;
1548
1549 return BaseT::getShuffleCost(Kind, BaseTp, Mask, Index, SubTp);
1550}
1551
1552InstructionCost X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
1553 Type *Src,
1554 TTI::CastContextHint CCH,
1555 TTI::TargetCostKind CostKind,
1556 const Instruction *I) {
1557 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1558 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 1558, __extension__ __PRETTY_FUNCTION__))
;
1559
1560 // TODO: Allow non-throughput costs that aren't binary.
1561 auto AdjustCost = [&CostKind](InstructionCost Cost) -> InstructionCost {
1562 if (CostKind != TTI::TCK_RecipThroughput)
1563 return Cost == 0 ? 0 : 1;
1564 return Cost;
1565 };
1566
1567 // The cost tables include both specific, custom (non-legal) src/dst type
1568 // conversions and generic, legalized types. We test for customs first, before
1569 // falling back to legalization.
1570 // FIXME: Need a better design of the cost table to handle non-simple types of
1571 // potential massive combinations (elem_num x src_type x dst_type).
1572 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1573 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1574 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1575
1576 // Mask sign extend has an instruction.
1577 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1578 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1579 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1580 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1581 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1582 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1583 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1584 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1585 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1586 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1587 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
1588
1589 // Mask zero extend is a sext + shift.
1590 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1591 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1592 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1593 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1594 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1595 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1596 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1597 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1598 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1599 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1600 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
1601
1602 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 2 },
1603 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // widen to zmm
1604 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // widen to zmm
1605 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // widen to zmm
1606 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i16, 2 }, // vpmovwb
1607 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // widen to zmm
1608 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // widen to zmm
1609 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 }, // vpmovwb
1610 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // widen to zmm
1611 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // widen to zmm
1612 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 }, // vpmovwb
1613 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // widen to zmm
1614 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // widen to zmm
1615 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // widen to zmm
1616 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i16, 2 },
1617 { ISD::TRUNCATE, MVT::v64i1, MVT::v64i8, 2 },
1618 };
1619
1620 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1621 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1622 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1623
1624 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1625 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1626
1627 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
1628 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
1629
1630 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
1631 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
1632 };
1633
1634 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1635 // 256-bit wide vectors.
1636
1637 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1638 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
1639 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
1640 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
1641
1642 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1643 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1644 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1645 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 3 }, // sext+vpslld+vptestmd
1646 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1647 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1648 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1649 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 3 }, // sext+vpslld+vptestmd
1650 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // zmm vpslld+vptestmd
1651 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // zmm vpslld+vptestmd
1652 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // zmm vpslld+vptestmd
1653 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i32, 2 }, // vpslld+vptestmd
1654 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // zmm vpsllq+vptestmq
1655 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // zmm vpsllq+vptestmq
1656 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 2 }, // vpsllq+vptestmq
1657 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i32, 2 }, // vpmovdb
1658 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 2 }, // vpmovdb
1659 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 2 }, // vpmovdb
1660 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 2 }, // vpmovdb
1661 { ISD::TRUNCATE, MVT::v2i8, MVT::v2i64, 2 }, // vpmovqb
1662 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i64, 1 }, // vpshufb
1663 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i64, 2 }, // vpmovqb
1664 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 2 }, // vpmovqw
1665 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, // vpmovqd
1666 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // zmm vpmovqd
1667 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i64, 5 },// 2*vpmovqd+concat+vpmovdb
1668
1669 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 }, // extend to v16i32
1670 { ISD::TRUNCATE, MVT::v32i8, MVT::v32i16, 8 },
1671
1672 // Sign extend is zmm vpternlogd+vptruncdb.
1673 // Zero extend is zmm broadcast load+vptruncdw.
1674 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 3 },
1675 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 4 },
1676 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 3 },
1677 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 4 },
1678 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 3 },
1679 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 4 },
1680 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 3 },
1681 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 4 },
1682
1683 // Sign extend is zmm vpternlogd+vptruncdw.
1684 // Zero extend is zmm vpternlogd+vptruncdw+vpsrlw.
1685 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 3 },
1686 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1687 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 3 },
1688 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1689 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 3 },
1690 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1691 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 3 },
1692 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1693
1694 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // zmm vpternlogd
1695 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // zmm vpternlogd+psrld
1696 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // zmm vpternlogd
1697 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // zmm vpternlogd+psrld
1698 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // zmm vpternlogd
1699 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // zmm vpternlogd+psrld
1700 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // zmm vpternlogq
1701 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // zmm vpternlogq+psrlq
1702 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // zmm vpternlogq
1703 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // zmm vpternlogq+psrlq
1704
1705 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 1 }, // vpternlogd
1706 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 }, // vpternlogd+psrld
1707 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i1, 1 }, // vpternlogq
1708 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i1, 2 }, // vpternlogq+psrlq
1709
1710 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1711 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1712 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1713 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1714 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1715 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 1 },
1716 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1717 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1718 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1719 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1720
1721 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1722 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 3 }, // FIXME: May not be right
1723
1724 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1725 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1726 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
1727 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
1728 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1729 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
1730 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1731 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1732
1733 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1734 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1735 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v16i8, 2 },
1736 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 1 },
1737 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1738 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 1 },
1739 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1740 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1741 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1742 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
1743
1744 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
1745 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f64, 7 },
1746 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f64,15 },
1747 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f32,11 },
1748 { ISD::FP_TO_SINT, MVT::v64i8, MVT::v64f64,31 },
1749 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f64, 3 },
1750 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v16f64, 7 },
1751 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f32, 5 },
1752 { ISD::FP_TO_SINT, MVT::v32i16, MVT::v32f64,15 },
1753 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 1 },
1754 { ISD::FP_TO_SINT, MVT::v16i32, MVT::v16f64, 3 },
1755
1756 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
1757 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 3 },
1758 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 3 },
1759 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1760 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 3 },
1761 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 3 },
1762 };
1763
1764 static const TypeConversionCostTblEntry AVX512BWVLConversionTbl[] {
1765 // Mask sign extend has an instruction.
1766 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 1 },
1767 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 1 },
1768 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 1 },
1769 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 1 },
1770 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 1 },
1771 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1772 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1773 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1774 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1775
1776 // Mask zero extend is a sext + shift.
1777 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 2 },
1778 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 2 },
1779 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 2 },
1780 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 2 },
1781 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 2 },
1782 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1783 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1784 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1785 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1786
1787 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
1788 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 2 }, // vpsllw+vptestmb
1789 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // vpsllw+vptestmw
1790 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // vpsllw+vptestmb
1791 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 2 }, // vpsllw+vptestmw
1792 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 2 }, // vpsllw+vptestmb
1793 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 2 }, // vpsllw+vptestmw
1794 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 2 }, // vpsllw+vptestmb
1795 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 2 }, // vpsllw+vptestmw
1796 { ISD::TRUNCATE, MVT::v32i1, MVT::v32i8, 2 }, // vpsllw+vptestmb
1797 };
1798
1799 static const TypeConversionCostTblEntry AVX512DQVLConversionTbl[] = {
1800 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1801 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1802 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1803 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1804
1805 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1806 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1807 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1808 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1809
1810 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v4f32, 1 },
1811 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
1812 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1813 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
1814
1815 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v4f32, 1 },
1816 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
1817 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1818 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
1819 };
1820
1821 static const TypeConversionCostTblEntry AVX512VLConversionTbl[] = {
1822 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // sext+vpslld+vptestmd
1823 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 3 }, // sext+vpslld+vptestmd
1824 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 3 }, // sext+vpslld+vptestmd
1825 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i8, 8 }, // split+2*v8i8
1826 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 3 }, // sext+vpsllq+vptestmq
1827 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 3 }, // sext+vpsllq+vptestmq
1828 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i16, 3 }, // sext+vpsllq+vptestmq
1829 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 8 }, // split+2*v8i16
1830 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 2 }, // vpslld+vptestmd
1831 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i32, 2 }, // vpslld+vptestmd
1832 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 }, // vpslld+vptestmd
1833 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i64, 2 }, // vpsllq+vptestmq
1834 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 2 }, // vpsllq+vptestmq
1835 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 }, // vpmovqd
1836 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 }, // vpmovqb
1837 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 }, // vpmovqw
1838 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 }, // vpmovwb
1839
1840 // sign extend is vpcmpeq+maskedmove+vpmovdw+vpacksswb
1841 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw+vpackuswb
1842 { ISD::SIGN_EXTEND, MVT::v2i8, MVT::v2i1, 5 },
1843 { ISD::ZERO_EXTEND, MVT::v2i8, MVT::v2i1, 6 },
1844 { ISD::SIGN_EXTEND, MVT::v4i8, MVT::v4i1, 5 },
1845 { ISD::ZERO_EXTEND, MVT::v4i8, MVT::v4i1, 6 },
1846 { ISD::SIGN_EXTEND, MVT::v8i8, MVT::v8i1, 5 },
1847 { ISD::ZERO_EXTEND, MVT::v8i8, MVT::v8i1, 6 },
1848 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 10 },
1849 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 12 },
1850
1851 // sign extend is vpcmpeq+maskedmove+vpmovdw
1852 // zero extend is vpcmpeq+maskedmove+vpmovdw+vpsrlw
1853 { ISD::SIGN_EXTEND, MVT::v2i16, MVT::v2i1, 4 },
1854 { ISD::ZERO_EXTEND, MVT::v2i16, MVT::v2i1, 5 },
1855 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i1, 4 },
1856 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i1, 5 },
1857 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 4 },
1858 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 5 },
1859 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 10 },
1860 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 12 },
1861
1862 { ISD::SIGN_EXTEND, MVT::v2i32, MVT::v2i1, 1 }, // vpternlogd
1863 { ISD::ZERO_EXTEND, MVT::v2i32, MVT::v2i1, 2 }, // vpternlogd+psrld
1864 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i1, 1 }, // vpternlogd
1865 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i1, 2 }, // vpternlogd+psrld
1866 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 1 }, // vpternlogd
1867 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 2 }, // vpternlogd+psrld
1868 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i1, 1 }, // vpternlogq
1869 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i1, 2 }, // vpternlogq+psrlq
1870 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 1 }, // vpternlogq
1871 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 2 }, // vpternlogq+psrlq
1872
1873 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
1874 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 1 },
1875 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
1876 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 1 },
1877 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1878 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1879 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
1880 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 1 },
1881 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1882 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1883 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1884 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1885
1886 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
1887 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
1888 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
1889 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
1890
1891 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 1 },
1892 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
1893 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
1894 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 1 },
1895 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
1896 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 1 },
1897 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
1898 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1899 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1900 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1901 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1902 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1903 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
1904
1905 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
1906 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v16f32, 2 },
1907 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v32f32, 5 },
1908
1909 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 1 },
1910 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 1 },
1911 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1912 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 1 },
1913 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
1914 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1915 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f64, 1 },
1916 };
1917
1918 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1919 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1920 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1921 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1922 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1923 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1924 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1925
1926 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
1927 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 2 },
1928 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
1929 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 2 },
1930 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1931 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1932 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
1933 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 2 },
1934 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1935 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1936 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1937 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 3 },
1938 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1939 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1940
1941 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 2 },
1942
1943 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 1 },
1944 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 1 },
1945 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 1 },
1946 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 4 },
1947 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 4 },
1948 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 1 },
1949 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 1 },
1950 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 5 },
1951 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
1952 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1953
1954 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1955 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1956
1957 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 1 },
1958 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 1 },
1959 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 1 },
1960 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 3 },
1961
1962 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 3 },
1963 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 3 },
1964 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 1 },
1965 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
1966 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
1967 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4 },
1968 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 3 },
1969 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 4 },
1970
1971 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
1972 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
1973 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
1974 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1975 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1976 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1977 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 3 },
1978
1979 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 2 },
1980 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 2 },
1981 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 2 },
1982 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1983 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1984 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1985 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 2 },
1986 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
1987 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
1988 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
1989 };
1990
1991 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1992 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1993 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1994 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1995 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1996 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1997 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 4 },
1998
1999 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
2000 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v16i8, 3 },
2001 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
2002 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v16i8, 3 },
2003 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
2004 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
2005 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
2006 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v8i16, 3 },
2007 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
2008 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
2009 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
2010 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
2011
2012 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i64, 4 },
2013 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i32, 5 },
2014 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i16, 4 },
2015 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i64, 9 },
2016 { ISD::TRUNCATE, MVT::v16i1, MVT::v16i64, 11 },
2017
2018 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 }, // and+extract+packuswb
2019 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i32, 5 },
2020 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2021 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i64, 5 },
2022 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i64, 3 }, // and+extract+2*packusdw
2023 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
2024
2025 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
2026 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
2027 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
2028 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
2029 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
2030 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2031 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
2032 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2033 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 2 },
2034 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 4 },
2035 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 5 },
2036 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 8 },
2037
2038 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
2039 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
2040 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
2041 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v16i8, 4 },
2042 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v16i8, 2 },
2043 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 4 },
2044 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v8i16, 2 },
2045 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 4 },
2046 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 4 },
2047 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2048 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
2049 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
2050 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 10 },
2051 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 10 },
2052 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 18 },
2053 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
2054 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 10 },
2055
2056 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v8f32, 2 },
2057 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f64, 2 },
2058 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v8f32, 2 },
2059 { ISD::FP_TO_SINT, MVT::v32i8, MVT::v4f64, 2 },
2060 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v8f32, 2 },
2061 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f64, 2 },
2062 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v8f32, 2 },
2063 { ISD::FP_TO_SINT, MVT::v16i16, MVT::v4f64, 2 },
2064 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f64, 2 },
2065 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f32, 2 },
2066 { ISD::FP_TO_SINT, MVT::v8i32, MVT::v8f64, 5 },
2067
2068 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v8f32, 2 },
2069 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f64, 2 },
2070 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v8f32, 2 },
2071 { ISD::FP_TO_UINT, MVT::v32i8, MVT::v4f64, 2 },
2072 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f32, 2 },
2073 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f64, 2 },
2074 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v8f32, 2 },
2075 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v4f64, 2 },
2076 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 3 },
2077 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2078 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 6 },
2079 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 7 },
2080 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v4f64, 7 },
2081
2082 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
2083 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
2084 };
2085
2086 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
2087 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
2088 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 1 },
2089 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
2090 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 1 },
2091 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2092 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2093 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
2094 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 1 },
2095 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2096 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2097 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2098 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2099
2100 // These truncates end up widening elements.
2101 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 1 }, // PMOVXZBQ
2102 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 1 }, // PMOVXZWQ
2103 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 1 }, // PMOVXZBD
2104
2105 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 2 },
2106 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 2 },
2107 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 2 },
2108
2109 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 1 },
2110 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 1 },
2111 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 1 },
2112 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 1 },
2113 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
2114 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2115 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
2116 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2117 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
2118 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 1 },
2119 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 2 },
2120
2121 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 1 },
2122 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 1 },
2123 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 4 },
2124 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
2125 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 1 },
2126 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 1 },
2127 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 1 },
2128 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 1 },
2129 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 3 },
2130 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
2131 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 2 },
2132 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 12 },
2133 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 22 },
2134 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 4 },
2135
2136 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 1 },
2137 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 1 },
2138 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 1 },
2139 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 1 },
2140 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 2 },
2141 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 2 },
2142 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 1 },
2143 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 1 },
2144 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
2145 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 1 },
2146
2147 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 1 },
2148 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
2149 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 1 },
2150 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 4 },
2151 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 2 },
2152 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 2 },
2153 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 1 },
2154 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 1 },
2155 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 4 },
2156 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 4 },
2157 };
2158
2159 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
2160 // These are somewhat magic numbers justified by comparing the
2161 // output of llvm-mca for our various supported scheduler models
2162 // and basing it off the worst case scenario.
2163 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 3 },
2164 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 3 },
2165 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 3 },
2166 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 3 },
2167 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 3 },
2168 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
2169 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 3 },
2170 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
2171 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 3 },
2172 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4 },
2173 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 8 },
2174 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 8 },
2175
2176 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 3 },
2177 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 3 },
2178 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 8 },
2179 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 9 },
2180 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 4 },
2181 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 4 },
2182 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 4 },
2183 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 4 },
2184 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 7 },
2185 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 7 },
2186 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
2187 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 15 },
2188 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 18 },
2189
2190 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 4 },
2191 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 4 },
2192 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 4 },
2193 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 4 },
2194 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v4f32, 6 },
2195 { ISD::FP_TO_SINT, MVT::v16i8, MVT::v2f64, 6 },
2196 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v4f32, 5 },
2197 { ISD::FP_TO_SINT, MVT::v8i16, MVT::v2f64, 5 },
2198 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 4 },
2199 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v2f64, 4 },
2200
2201 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 4 },
2202 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 4 },
2203 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 4 },
2204 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 15 },
2205 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v4f32, 6 },
2206 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v2f64, 6 },
2207 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v4f32, 5 },
2208 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v2f64, 5 },
2209 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 8 },
2210 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v2f64, 8 },
2211
2212 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
2213 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v16i8, 4 },
2214 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v16i8, 2 },
2215 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v16i8, 3 },
2216 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v16i8, 1 },
2217 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v16i8, 2 },
2218 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v8i16, 2 },
2219 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v8i16, 3 },
2220 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v8i16, 1 },
2221 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v8i16, 2 },
2222 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v4i32, 1 },
2223 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v4i32, 2 },
2224
2225 // These truncates are really widening elements.
2226 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i32, 1 }, // PSHUFD
2227 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i16, 2 }, // PUNPCKLWD+DQ
2228 { ISD::TRUNCATE, MVT::v2i1, MVT::v2i8, 3 }, // PUNPCKLBW+WD+PSHUFD
2229 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i16, 1 }, // PUNPCKLWD
2230 { ISD::TRUNCATE, MVT::v4i1, MVT::v4i8, 2 }, // PUNPCKLBW+WD
2231 { ISD::TRUNCATE, MVT::v8i1, MVT::v8i8, 1 }, // PUNPCKLBW
2232
2233 { ISD::TRUNCATE, MVT::v16i8, MVT::v8i16, 2 }, // PAND+PACKUSWB
2234 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
2235 { ISD::TRUNCATE, MVT::v16i8, MVT::v4i32, 3 }, // PAND+2*PACKUSWB
2236 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
2237 { ISD::TRUNCATE, MVT::v2i16, MVT::v2i32, 1 },
2238 { ISD::TRUNCATE, MVT::v8i16, MVT::v4i32, 3 },
2239 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
2240 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32,10 },
2241 { ISD::TRUNCATE, MVT::v16i8, MVT::v2i64, 4 }, // PAND+3*PACKUSWB
2242 { ISD::TRUNCATE, MVT::v8i16, MVT::v2i64, 2 }, // PSHUFD+PSHUFLW
2243 { ISD::TRUNCATE, MVT::v4i32, MVT::v2i64, 1 }, // PSHUFD
2244 };
2245
2246 // Attempt to map directly to (simple) MVT types to let us match custom entries.
2247 EVT SrcTy = TLI->getValueType(DL, Src);
2248 EVT DstTy = TLI->getValueType(DL, Dst);
2249
2250 // The function getSimpleVT only handles simple value types.
2251 if (SrcTy.isSimple() && DstTy.isSimple()) {
2252 MVT SimpleSrcTy = SrcTy.getSimpleVT();
2253 MVT SimpleDstTy = DstTy.getSimpleVT();
2254
2255 if (ST->useAVX512Regs()) {
2256 if (ST->hasBWI())
2257 if (const auto *Entry = ConvertCostTableLookup(
2258 AVX512BWConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2259 return AdjustCost(Entry->Cost);
2260
2261 if (ST->hasDQI())
2262 if (const auto *Entry = ConvertCostTableLookup(
2263 AVX512DQConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2264 return AdjustCost(Entry->Cost);
2265
2266 if (ST->hasAVX512())
2267 if (const auto *Entry = ConvertCostTableLookup(
2268 AVX512FConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2269 return AdjustCost(Entry->Cost);
2270 }
2271
2272 if (ST->hasBWI())
2273 if (const auto *Entry = ConvertCostTableLookup(
2274 AVX512BWVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2275 return AdjustCost(Entry->Cost);
2276
2277 if (ST->hasDQI())
2278 if (const auto *Entry = ConvertCostTableLookup(
2279 AVX512DQVLConversionTbl, ISD, SimpleDstTy, SimpleSrcTy))
2280 return AdjustCost(Entry->Cost);
2281
2282 if (ST->hasAVX512())
2283 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2284 SimpleDstTy, SimpleSrcTy))
2285 return AdjustCost(Entry->Cost);
2286
2287 if (ST->hasAVX2()) {
2288 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2289 SimpleDstTy, SimpleSrcTy))
2290 return AdjustCost(Entry->Cost);
2291 }
2292
2293 if (ST->hasAVX()) {
2294 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2295 SimpleDstTy, SimpleSrcTy))
2296 return AdjustCost(Entry->Cost);
2297 }
2298
2299 if (ST->hasSSE41()) {
2300 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2301 SimpleDstTy, SimpleSrcTy))
2302 return AdjustCost(Entry->Cost);
2303 }
2304
2305 if (ST->hasSSE2()) {
2306 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2307 SimpleDstTy, SimpleSrcTy))
2308 return AdjustCost(Entry->Cost);
2309 }
2310 }
2311
2312 // Fall back to legalized types.
2313 std::pair<InstructionCost, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
2314 std::pair<InstructionCost, MVT> LTDest =
2315 TLI->getTypeLegalizationCost(DL, Dst);
2316
2317 if (ST->useAVX512Regs()) {
2318 if (ST->hasBWI())
2319 if (const auto *Entry = ConvertCostTableLookup(
2320 AVX512BWConversionTbl, ISD, LTDest.second, LTSrc.second))
2321 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2322
2323 if (ST->hasDQI())
2324 if (const auto *Entry = ConvertCostTableLookup(
2325 AVX512DQConversionTbl, ISD, LTDest.second, LTSrc.second))
2326 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2327
2328 if (ST->hasAVX512())
2329 if (const auto *Entry = ConvertCostTableLookup(
2330 AVX512FConversionTbl, ISD, LTDest.second, LTSrc.second))
2331 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2332 }
2333
2334 if (ST->hasBWI())
2335 if (const auto *Entry = ConvertCostTableLookup(AVX512BWVLConversionTbl, ISD,
2336 LTDest.second, LTSrc.second))
2337 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2338
2339 if (ST->hasDQI())
2340 if (const auto *Entry = ConvertCostTableLookup(AVX512DQVLConversionTbl, ISD,
2341 LTDest.second, LTSrc.second))
2342 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2343
2344 if (ST->hasAVX512())
2345 if (const auto *Entry = ConvertCostTableLookup(AVX512VLConversionTbl, ISD,
2346 LTDest.second, LTSrc.second))
2347 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2348
2349 if (ST->hasAVX2())
2350 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
2351 LTDest.second, LTSrc.second))
2352 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2353
2354 if (ST->hasAVX())
2355 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
2356 LTDest.second, LTSrc.second))
2357 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2358
2359 if (ST->hasSSE41())
2360 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
2361 LTDest.second, LTSrc.second))
2362 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2363
2364 if (ST->hasSSE2())
2365 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
2366 LTDest.second, LTSrc.second))
2367 return AdjustCost(std::max(LTSrc.first, LTDest.first) * Entry->Cost);
2368
2369 // Fallback, for i8/i16 sitofp/uitofp cases we need to extend to i32 for
2370 // sitofp.
2371 if ((ISD == ISD::SINT_TO_FP || ISD == ISD::UINT_TO_FP) &&
2372 1 < Src->getScalarSizeInBits() && Src->getScalarSizeInBits() < 32) {
2373 Type *ExtSrc = Src->getWithNewBitWidth(32);
2374 unsigned ExtOpc =
2375 (ISD == ISD::SINT_TO_FP) ? Instruction::SExt : Instruction::ZExt;
2376
2377 // For scalar loads the extend would be free.
2378 InstructionCost ExtCost = 0;
2379 if (!(Src->isIntegerTy() && I && isa<LoadInst>(I->getOperand(0))))
2380 ExtCost = getCastInstrCost(ExtOpc, ExtSrc, Src, CCH, CostKind);
2381
2382 return ExtCost + getCastInstrCost(Instruction::SIToFP, Dst, ExtSrc,
2383 TTI::CastContextHint::None, CostKind);
2384 }
2385
2386 // Fallback for fptosi/fptoui i8/i16 cases we need to truncate from fptosi
2387 // i32.
2388 if ((ISD == ISD::FP_TO_SINT || ISD == ISD::FP_TO_UINT) &&
2389 1 < Dst->getScalarSizeInBits() && Dst->getScalarSizeInBits() < 32) {
2390 Type *TruncDst = Dst->getWithNewBitWidth(32);
2391 return getCastInstrCost(Instruction::FPToSI, TruncDst, Src, CCH, CostKind) +
2392 getCastInstrCost(Instruction::Trunc, Dst, TruncDst,
2393 TTI::CastContextHint::None, CostKind);
2394 }
2395
2396 return AdjustCost(
2397 BaseT::getCastInstrCost(Opcode, Dst, Src, CCH, CostKind, I));
2398}
2399
2400InstructionCost X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
2401 Type *CondTy,
2402 CmpInst::Predicate VecPred,
2403 TTI::TargetCostKind CostKind,
2404 const Instruction *I) {
2405 // TODO: Handle other cost kinds.
2406 if (CostKind != TTI::TCK_RecipThroughput)
2407 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind,
2408 I);
2409
2410 // Legalize the type.
2411 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2412
2413 MVT MTy = LT.second;
2414
2415 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2416 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 2416, __extension__ __PRETTY_FUNCTION__))
;
2417
2418 unsigned ExtraCost = 0;
2419 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) {
2420 // Some vector comparison predicates cost extra instructions.
2421 if (MTy.isVector() &&
2422 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
2423 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
2424 ST->hasBWI())) {
2425 switch (cast<CmpInst>(I)->getPredicate()) {
2426 case CmpInst::Predicate::ICMP_NE:
2427 // xor(cmpeq(x,y),-1)
2428 ExtraCost = 1;
2429 break;
2430 case CmpInst::Predicate::ICMP_SGE:
2431 case CmpInst::Predicate::ICMP_SLE:
2432 // xor(cmpgt(x,y),-1)
2433 ExtraCost = 1;
2434 break;
2435 case CmpInst::Predicate::ICMP_ULT:
2436 case CmpInst::Predicate::ICMP_UGT:
2437 // cmpgt(xor(x,signbit),xor(y,signbit))
2438 // xor(cmpeq(pmaxu(x,y),x),-1)
2439 ExtraCost = 2;
2440 break;
2441 case CmpInst::Predicate::ICMP_ULE:
2442 case CmpInst::Predicate::ICMP_UGE:
2443 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
2444 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
2445 // cmpeq(psubus(x,y),0)
2446 // cmpeq(pminu(x,y),x)
2447 ExtraCost = 1;
2448 } else {
2449 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
2450 ExtraCost = 3;
2451 }
2452 break;
2453 default:
2454 break;
2455 }
2456 }
2457 }
2458
2459 static const CostTblEntry SLMCostTbl[] = {
2460 // slm pcmpeq/pcmpgt throughput is 2
2461 { ISD::SETCC, MVT::v2i64, 2 },
2462 };
2463
2464 static const CostTblEntry AVX512BWCostTbl[] = {
2465 { ISD::SETCC, MVT::v32i16, 1 },
2466 { ISD::SETCC, MVT::v64i8, 1 },
2467
2468 { ISD::SELECT, MVT::v32i16, 1 },
2469 { ISD::SELECT, MVT::v64i8, 1 },
2470 };
2471
2472 static const CostTblEntry AVX512CostTbl[] = {
2473 { ISD::SETCC, MVT::v8i64, 1 },
2474 { ISD::SETCC, MVT::v16i32, 1 },
2475 { ISD::SETCC, MVT::v8f64, 1 },
2476 { ISD::SETCC, MVT::v16f32, 1 },
2477
2478 { ISD::SELECT, MVT::v8i64, 1 },
2479 { ISD::SELECT, MVT::v16i32, 1 },
2480 { ISD::SELECT, MVT::v8f64, 1 },
2481 { ISD::SELECT, MVT::v16f32, 1 },
2482
2483 { ISD::SETCC, MVT::v32i16, 2 }, // FIXME: should probably be 4
2484 { ISD::SETCC, MVT::v64i8, 2 }, // FIXME: should probably be 4
2485
2486 { ISD::SELECT, MVT::v32i16, 2 }, // FIXME: should be 3
2487 { ISD::SELECT, MVT::v64i8, 2 }, // FIXME: should be 3
2488 };
2489
2490 static const CostTblEntry AVX2CostTbl[] = {
2491 { ISD::SETCC, MVT::v4i64, 1 },
2492 { ISD::SETCC, MVT::v8i32, 1 },
2493 { ISD::SETCC, MVT::v16i16, 1 },
2494 { ISD::SETCC, MVT::v32i8, 1 },
2495
2496 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb
2497 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb
2498 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb
2499 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb
2500 };
2501
2502 static const CostTblEntry AVX1CostTbl[] = {
2503 { ISD::SETCC, MVT::v4f64, 1 },
2504 { ISD::SETCC, MVT::v8f32, 1 },
2505 // AVX1 does not support 8-wide integer compare.
2506 { ISD::SETCC, MVT::v4i64, 4 },
2507 { ISD::SETCC, MVT::v8i32, 4 },
2508 { ISD::SETCC, MVT::v16i16, 4 },
2509 { ISD::SETCC, MVT::v32i8, 4 },
2510
2511 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd
2512 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps
2513 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd
2514 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps
2515 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps
2516 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps
2517 };
2518
2519 static const CostTblEntry SSE42CostTbl[] = {
2520 { ISD::SETCC, MVT::v2f64, 1 },
2521 { ISD::SETCC, MVT::v4f32, 1 },
2522 { ISD::SETCC, MVT::v2i64, 1 },
2523 };
2524
2525 static const CostTblEntry SSE41CostTbl[] = {
2526 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd
2527 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps
2528 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb
2529 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb
2530 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb
2531 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb
2532 };
2533
2534 static const CostTblEntry SSE2CostTbl[] = {
2535 { ISD::SETCC, MVT::v2f64, 2 },
2536 { ISD::SETCC, MVT::f64, 1 },
2537 { ISD::SETCC, MVT::v2i64, 8 },
2538 { ISD::SETCC, MVT::v4i32, 1 },
2539 { ISD::SETCC, MVT::v8i16, 1 },
2540 { ISD::SETCC, MVT::v16i8, 1 },
2541
2542 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd
2543 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por
2544 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por
2545 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por
2546 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por
2547 };
2548
2549 static const CostTblEntry SSE1CostTbl[] = {
2550 { ISD::SETCC, MVT::v4f32, 2 },
2551 { ISD::SETCC, MVT::f32, 1 },
2552
2553 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps
2554 };
2555
2556 if (ST->isSLM())
2557 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2558 return LT.first * (ExtraCost + Entry->Cost);
2559
2560 if (ST->hasBWI())
2561 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2562 return LT.first * (ExtraCost + Entry->Cost);
2563
2564 if (ST->hasAVX512())
2565 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2566 return LT.first * (ExtraCost + Entry->Cost);
2567
2568 if (ST->hasAVX2())
2569 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2570 return LT.first * (ExtraCost + Entry->Cost);
2571
2572 if (ST->hasAVX())
2573 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2574 return LT.first * (ExtraCost + Entry->Cost);
2575
2576 if (ST->hasSSE42())
2577 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2578 return LT.first * (ExtraCost + Entry->Cost);
2579
2580 if (ST->hasSSE41())
2581 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
2582 return LT.first * (ExtraCost + Entry->Cost);
2583
2584 if (ST->hasSSE2())
2585 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2586 return LT.first * (ExtraCost + Entry->Cost);
2587
2588 if (ST->hasSSE1())
2589 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2590 return LT.first * (ExtraCost + Entry->Cost);
2591
2592 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, VecPred, CostKind, I);
2593}
2594
2595unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
2596
2597InstructionCost
2598X86TTIImpl::getTypeBasedIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
2599 TTI::TargetCostKind CostKind) {
2600
2601 // Costs should match the codegen from:
2602 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
2603 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
2604 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
2605 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
2606 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
2607
2608 // TODO: Overflow intrinsics (*ADDO, *SUBO, *MULO) with vector types are not
2609 // specialized in these tables yet.
2610 static const CostTblEntry AVX512BITALGCostTbl[] = {
2611 { ISD::CTPOP, MVT::v32i16, 1 },
2612 { ISD::CTPOP, MVT::v64i8, 1 },
2613 { ISD::CTPOP, MVT::v16i16, 1 },
2614 { ISD::CTPOP, MVT::v32i8, 1 },
2615 { ISD::CTPOP, MVT::v8i16, 1 },
2616 { ISD::CTPOP, MVT::v16i8, 1 },
2617 };
2618 static const CostTblEntry AVX512VPOPCNTDQCostTbl[] = {
2619 { ISD::CTPOP, MVT::v8i64, 1 },
2620 { ISD::CTPOP, MVT::v16i32, 1 },
2621 { ISD::CTPOP, MVT::v4i64, 1 },
2622 { ISD::CTPOP, MVT::v8i32, 1 },
2623 { ISD::CTPOP, MVT::v2i64, 1 },
2624 { ISD::CTPOP, MVT::v4i32, 1 },
2625 };
2626 static const CostTblEntry AVX512CDCostTbl[] = {
2627 { ISD::CTLZ, MVT::v8i64, 1 },
2628 { ISD::CTLZ, MVT::v16i32, 1 },
2629 { ISD::CTLZ, MVT::v32i16, 8 },
2630 { ISD::CTLZ, MVT::v64i8, 20 },
2631 { ISD::CTLZ, MVT::v4i64, 1 },
2632 { ISD::CTLZ, MVT::v8i32, 1 },
2633 { ISD::CTLZ, MVT::v16i16, 4 },
2634 { ISD::CTLZ, MVT::v32i8, 10 },
2635 { ISD::CTLZ, MVT::v2i64, 1 },
2636 { ISD::CTLZ, MVT::v4i32, 1 },
2637 { ISD::CTLZ, MVT::v8i16, 4 },
2638 { ISD::CTLZ, MVT::v16i8, 4 },
2639 };
2640 static const CostTblEntry AVX512BWCostTbl[] = {
2641 { ISD::ABS, MVT::v32i16, 1 },
2642 { ISD::ABS, MVT::v64i8, 1 },
2643 { ISD::BITREVERSE, MVT::v8i64, 3 },
2644 { ISD::BITREVERSE, MVT::v16i32, 3 },
2645 { ISD::BITREVERSE, MVT::v32i16, 3 },
2646 { ISD::BITREVERSE, MVT::v64i8, 2 },
2647 { ISD::BSWAP, MVT::v8i64, 1 },
2648 { ISD::BSWAP, MVT::v16i32, 1 },
2649 { ISD::BSWAP, MVT::v32i16, 1 },
2650 { ISD::CTLZ, MVT::v8i64, 23 },
2651 { ISD::CTLZ, MVT::v16i32, 22 },
2652 { ISD::CTLZ, MVT::v32i16, 18 },
2653 { ISD::CTLZ, MVT::v64i8, 17 },
2654 { ISD::CTPOP, MVT::v8i64, 7 },
2655 { ISD::CTPOP, MVT::v16i32, 11 },
2656 { ISD::CTPOP, MVT::v32i16, 9 },
2657 { ISD::CTPOP, MVT::v64i8, 6 },
2658 { ISD::CTTZ, MVT::v8i64, 10 },
2659 { ISD::CTTZ, MVT::v16i32, 14 },
2660 { ISD::CTTZ, MVT::v32i16, 12 },
2661 { ISD::CTTZ, MVT::v64i8, 9 },
2662 { ISD::SADDSAT, MVT::v32i16, 1 },
2663 { ISD::SADDSAT, MVT::v64i8, 1 },
2664 { ISD::SMAX, MVT::v32i16, 1 },
2665 { ISD::SMAX, MVT::v64i8, 1 },
2666 { ISD::SMIN, MVT::v32i16, 1 },
2667 { ISD::SMIN, MVT::v64i8, 1 },
2668 { ISD::SSUBSAT, MVT::v32i16, 1 },
2669 { ISD::SSUBSAT, MVT::v64i8, 1 },
2670 { ISD::UADDSAT, MVT::v32i16, 1 },
2671 { ISD::UADDSAT, MVT::v64i8, 1 },
2672 { ISD::UMAX, MVT::v32i16, 1 },
2673 { ISD::UMAX, MVT::v64i8, 1 },
2674 { ISD::UMIN, MVT::v32i16, 1 },
2675 { ISD::UMIN, MVT::v64i8, 1 },
2676 { ISD::USUBSAT, MVT::v32i16, 1 },
2677 { ISD::USUBSAT, MVT::v64i8, 1 },
2678 };
2679 static const CostTblEntry AVX512CostTbl[] = {
2680 { ISD::ABS, MVT::v8i64, 1 },
2681 { ISD::ABS, MVT::v16i32, 1 },
2682 { ISD::ABS, MVT::v32i16, 2 }, // FIXME: include split
2683 { ISD::ABS, MVT::v64i8, 2 }, // FIXME: include split
2684 { ISD::ABS, MVT::v4i64, 1 },
2685 { ISD::ABS, MVT::v2i64, 1 },
2686 { ISD::BITREVERSE, MVT::v8i64, 36 },
2687 { ISD::BITREVERSE, MVT::v16i32, 24 },
2688 { ISD::BITREVERSE, MVT::v32i16, 10 },
2689 { ISD::BITREVERSE, MVT::v64i8, 10 },
2690 { ISD::BSWAP, MVT::v8i64, 4 },
2691 { ISD::BSWAP, MVT::v16i32, 4 },
2692 { ISD::BSWAP, MVT::v32i16, 4 },
2693 { ISD::CTLZ, MVT::v8i64, 29 },
2694 { ISD::CTLZ, MVT::v16i32, 35 },
2695 { ISD::CTLZ, MVT::v32i16, 28 },
2696 { ISD::CTLZ, MVT::v64i8, 18 },
2697 { ISD::CTPOP, MVT::v8i64, 16 },
2698 { ISD::CTPOP, MVT::v16i32, 24 },
2699 { ISD::CTPOP, MVT::v32i16, 18 },
2700 { ISD::CTPOP, MVT::v64i8, 12 },
2701 { ISD::CTTZ, MVT::v8i64, 20 },
2702 { ISD::CTTZ, MVT::v16i32, 28 },
2703 { ISD::CTTZ, MVT::v32i16, 24 },
2704 { ISD::CTTZ, MVT::v64i8, 18 },
2705 { ISD::SMAX, MVT::v8i64, 1 },
2706 { ISD::SMAX, MVT::v16i32, 1 },
2707 { ISD::SMAX, MVT::v32i16, 2 }, // FIXME: include split
2708 { ISD::SMAX, MVT::v64i8, 2 }, // FIXME: include split
2709 { ISD::SMAX, MVT::v4i64, 1 },
2710 { ISD::SMAX, MVT::v2i64, 1 },
2711 { ISD::SMIN, MVT::v8i64, 1 },
2712 { ISD::SMIN, MVT::v16i32, 1 },
2713 { ISD::SMIN, MVT::v32i16, 2 }, // FIXME: include split
2714 { ISD::SMIN, MVT::v64i8, 2 }, // FIXME: include split
2715 { ISD::SMIN, MVT::v4i64, 1 },
2716 { ISD::SMIN, MVT::v2i64, 1 },
2717 { ISD::UMAX, MVT::v8i64, 1 },
2718 { ISD::UMAX, MVT::v16i32, 1 },
2719 { ISD::UMAX, MVT::v32i16, 2 }, // FIXME: include split
2720 { ISD::UMAX, MVT::v64i8, 2 }, // FIXME: include split
2721 { ISD::UMAX, MVT::v4i64, 1 },
2722 { ISD::UMAX, MVT::v2i64, 1 },
2723 { ISD::UMIN, MVT::v8i64, 1 },
2724 { ISD::UMIN, MVT::v16i32, 1 },
2725 { ISD::UMIN, MVT::v32i16, 2 }, // FIXME: include split
2726 { ISD::UMIN, MVT::v64i8, 2 }, // FIXME: include split
2727 { ISD::UMIN, MVT::v4i64, 1 },
2728 { ISD::UMIN, MVT::v2i64, 1 },
2729 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd
2730 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq
2731 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq
2732 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq
2733 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd
2734 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq
2735 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq
2736 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq
2737 { ISD::SADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2738 { ISD::SADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2739 { ISD::SSUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2740 { ISD::SSUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2741 { ISD::UADDSAT, MVT::v32i16, 2 }, // FIXME: include split
2742 { ISD::UADDSAT, MVT::v64i8, 2 }, // FIXME: include split
2743 { ISD::USUBSAT, MVT::v32i16, 2 }, // FIXME: include split
2744 { ISD::USUBSAT, MVT::v64i8, 2 }, // FIXME: include split
2745 { ISD::FMAXNUM, MVT::f32, 2 },
2746 { ISD::FMAXNUM, MVT::v4f32, 2 },
2747 { ISD::FMAXNUM, MVT::v8f32, 2 },
2748 { ISD::FMAXNUM, MVT::v16f32, 2 },
2749 { ISD::FMAXNUM, MVT::f64, 2 },
2750 { ISD::FMAXNUM, MVT::v2f64, 2 },
2751 { ISD::FMAXNUM, MVT::v4f64, 2 },
2752 { ISD::FMAXNUM, MVT::v8f64, 2 },
2753 };
2754 static const CostTblEntry XOPCostTbl[] = {
2755 { ISD::BITREVERSE, MVT::v4i64, 4 },
2756 { ISD::BITREVERSE, MVT::v8i32, 4 },
2757 { ISD::BITREVERSE, MVT::v16i16, 4 },
2758 { ISD::BITREVERSE, MVT::v32i8, 4 },
2759 { ISD::BITREVERSE, MVT::v2i64, 1 },
2760 { ISD::BITREVERSE, MVT::v4i32, 1 },
2761 { ISD::BITREVERSE, MVT::v8i16, 1 },
2762 { ISD::BITREVERSE, MVT::v16i8, 1 },
2763 { ISD::BITREVERSE, MVT::i64, 3 },
2764 { ISD::BITREVERSE, MVT::i32, 3 },
2765 { ISD::BITREVERSE, MVT::i16, 3 },
2766 { ISD::BITREVERSE, MVT::i8, 3 }
2767 };
2768 static const CostTblEntry AVX2CostTbl[] = {
2769 { ISD::ABS, MVT::v4i64, 2 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2770 { ISD::ABS, MVT::v8i32, 1 },
2771 { ISD::ABS, MVT::v16i16, 1 },
2772 { ISD::ABS, MVT::v32i8, 1 },
2773 { ISD::BITREVERSE, MVT::v2i64, 3 },
2774 { ISD::BITREVERSE, MVT::v4i64, 3 },
2775 { ISD::BITREVERSE, MVT::v4i32, 3 },
2776 { ISD::BITREVERSE, MVT::v8i32, 3 },
2777 { ISD::BITREVERSE, MVT::v8i16, 3 },
2778 { ISD::BITREVERSE, MVT::v16i16, 3 },
2779 { ISD::BITREVERSE, MVT::v16i8, 3 },
2780 { ISD::BITREVERSE, MVT::v32i8, 3 },
2781 { ISD::BSWAP, MVT::v4i64, 1 },
2782 { ISD::BSWAP, MVT::v8i32, 1 },
2783 { ISD::BSWAP, MVT::v16i16, 1 },
2784 { ISD::CTLZ, MVT::v2i64, 7 },
2785 { ISD::CTLZ, MVT::v4i64, 7 },
2786 { ISD::CTLZ, MVT::v4i32, 5 },
2787 { ISD::CTLZ, MVT::v8i32, 5 },
2788 { ISD::CTLZ, MVT::v8i16, 4 },
2789 { ISD::CTLZ, MVT::v16i16, 4 },
2790 { ISD::CTLZ, MVT::v16i8, 3 },
2791 { ISD::CTLZ, MVT::v32i8, 3 },
2792 { ISD::CTPOP, MVT::v2i64, 3 },
2793 { ISD::CTPOP, MVT::v4i64, 3 },
2794 { ISD::CTPOP, MVT::v4i32, 7 },
2795 { ISD::CTPOP, MVT::v8i32, 7 },
2796 { ISD::CTPOP, MVT::v8i16, 3 },
2797 { ISD::CTPOP, MVT::v16i16, 3 },
2798 { ISD::CTPOP, MVT::v16i8, 2 },
2799 { ISD::CTPOP, MVT::v32i8, 2 },
2800 { ISD::CTTZ, MVT::v2i64, 4 },
2801 { ISD::CTTZ, MVT::v4i64, 4 },
2802 { ISD::CTTZ, MVT::v4i32, 7 },
2803 { ISD::CTTZ, MVT::v8i32, 7 },
2804 { ISD::CTTZ, MVT::v8i16, 4 },
2805 { ISD::CTTZ, MVT::v16i16, 4 },
2806 { ISD::CTTZ, MVT::v16i8, 3 },
2807 { ISD::CTTZ, MVT::v32i8, 3 },
2808 { ISD::SADDSAT, MVT::v16i16, 1 },
2809 { ISD::SADDSAT, MVT::v32i8, 1 },
2810 { ISD::SMAX, MVT::v8i32, 1 },
2811 { ISD::SMAX, MVT::v16i16, 1 },
2812 { ISD::SMAX, MVT::v32i8, 1 },
2813 { ISD::SMIN, MVT::v8i32, 1 },
2814 { ISD::SMIN, MVT::v16i16, 1 },
2815 { ISD::SMIN, MVT::v32i8, 1 },
2816 { ISD::SSUBSAT, MVT::v16i16, 1 },
2817 { ISD::SSUBSAT, MVT::v32i8, 1 },
2818 { ISD::UADDSAT, MVT::v16i16, 1 },
2819 { ISD::UADDSAT, MVT::v32i8, 1 },
2820 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd
2821 { ISD::UMAX, MVT::v8i32, 1 },
2822 { ISD::UMAX, MVT::v16i16, 1 },
2823 { ISD::UMAX, MVT::v32i8, 1 },
2824 { ISD::UMIN, MVT::v8i32, 1 },
2825 { ISD::UMIN, MVT::v16i16, 1 },
2826 { ISD::UMIN, MVT::v32i8, 1 },
2827 { ISD::USUBSAT, MVT::v16i16, 1 },
2828 { ISD::USUBSAT, MVT::v32i8, 1 },
2829 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd
2830 { ISD::FMAXNUM, MVT::v8f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2831 { ISD::FMAXNUM, MVT::v4f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2832 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
2833 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
2834 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
2835 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
2836 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
2837 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
2838 };
2839 static const CostTblEntry AVX1CostTbl[] = {
2840 { ISD::ABS, MVT::v4i64, 5 }, // VBLENDVPD(X,VPSUBQ(0,X),X)
2841 { ISD::ABS, MVT::v8i32, 3 },
2842 { ISD::ABS, MVT::v16i16, 3 },
2843 { ISD::ABS, MVT::v32i8, 3 },
2844 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
2845 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
2846 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
2847 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
2848 { ISD::BSWAP, MVT::v4i64, 4 },
2849 { ISD::BSWAP, MVT::v8i32, 4 },
2850 { ISD::BSWAP, MVT::v16i16, 4 },
2851 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
2852 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
2853 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
2854 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2855 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
2856 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
2857 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
2858 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
2859 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
2860 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
2861 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
2862 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
2863 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2864 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2865 { ISD::SMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2866 { ISD::SMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2867 { ISD::SMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2868 { ISD::SMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2869 { ISD::SMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2870 { ISD::SMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2871 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2872 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2873 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2874 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2875 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert
2876 { ISD::UMAX, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2877 { ISD::UMAX, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2878 { ISD::UMAX, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2879 { ISD::UMIN, MVT::v8i32, 4 }, // 2 x 128-bit Op + extract/insert
2880 { ISD::UMIN, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2881 { ISD::UMIN, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2882 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
2883 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
2884 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert
2885 { ISD::FMAXNUM, MVT::f32, 3 }, // MAXSS + CMPUNORDSS + BLENDVPS
2886 { ISD::FMAXNUM, MVT::v4f32, 3 }, // MAXPS + CMPUNORDPS + BLENDVPS
2887 { ISD::FMAXNUM, MVT::v8f32, 5 }, // MAXPS + CMPUNORDPS + BLENDVPS + ?
2888 { ISD::FMAXNUM, MVT::f64, 3 }, // MAXSD + CMPUNORDSD + BLENDVPD
2889 { ISD::FMAXNUM, MVT::v2f64, 3 }, // MAXPD + CMPUNORDPD + BLENDVPD
2890 { ISD::FMAXNUM, MVT::v4f64, 5 }, // MAXPD + CMPUNORDPD + BLENDVPD + ?
2891 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
2892 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
2893 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
2894 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
2895 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
2896 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
2897 };
2898 static const CostTblEntry GLMCostTbl[] = {
2899 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss
2900 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
2901 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd
2902 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
2903 };
2904 static const CostTblEntry SLMCostTbl[] = {
2905 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss
2906 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
2907 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd
2908 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
2909 };
2910 static const CostTblEntry SSE42CostTbl[] = {
2911 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd
2912 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd
2913 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
2914 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
2915 };
2916 static const CostTblEntry SSE41CostTbl[] = {
2917 { ISD::ABS, MVT::v2i64, 2 }, // BLENDVPD(X,PSUBQ(0,X),X)
2918 { ISD::SMAX, MVT::v4i32, 1 },
2919 { ISD::SMAX, MVT::v16i8, 1 },
2920 { ISD::SMIN, MVT::v4i32, 1 },
2921 { ISD::SMIN, MVT::v16i8, 1 },
2922 { ISD::UMAX, MVT::v4i32, 1 },
2923 { ISD::UMAX, MVT::v8i16, 1 },
2924 { ISD::UMIN, MVT::v4i32, 1 },
2925 { ISD::UMIN, MVT::v8i16, 1 },
2926 };
2927 static const CostTblEntry SSSE3CostTbl[] = {
2928 { ISD::ABS, MVT::v4i32, 1 },
2929 { ISD::ABS, MVT::v8i16, 1 },
2930 { ISD::ABS, MVT::v16i8, 1 },
2931 { ISD::BITREVERSE, MVT::v2i64, 5 },
2932 { ISD::BITREVERSE, MVT::v4i32, 5 },
2933 { ISD::BITREVERSE, MVT::v8i16, 5 },
2934 { ISD::BITREVERSE, MVT::v16i8, 5 },
2935 { ISD::BSWAP, MVT::v2i64, 1 },
2936 { ISD::BSWAP, MVT::v4i32, 1 },
2937 { ISD::BSWAP, MVT::v8i16, 1 },
2938 { ISD::CTLZ, MVT::v2i64, 23 },
2939 { ISD::CTLZ, MVT::v4i32, 18 },
2940 { ISD::CTLZ, MVT::v8i16, 14 },
2941 { ISD::CTLZ, MVT::v16i8, 9 },
2942 { ISD::CTPOP, MVT::v2i64, 7 },
2943 { ISD::CTPOP, MVT::v4i32, 11 },
2944 { ISD::CTPOP, MVT::v8i16, 9 },
2945 { ISD::CTPOP, MVT::v16i8, 6 },
2946 { ISD::CTTZ, MVT::v2i64, 10 },
2947 { ISD::CTTZ, MVT::v4i32, 14 },
2948 { ISD::CTTZ, MVT::v8i16, 12 },
2949 { ISD::CTTZ, MVT::v16i8, 9 }
2950 };
2951 static const CostTblEntry SSE2CostTbl[] = {
2952 { ISD::ABS, MVT::v2i64, 4 },
2953 { ISD::ABS, MVT::v4i32, 3 },
2954 { ISD::ABS, MVT::v8i16, 2 },
2955 { ISD::ABS, MVT::v16i8, 2 },
2956 { ISD::BITREVERSE, MVT::v2i64, 29 },
2957 { ISD::BITREVERSE, MVT::v4i32, 27 },
2958 { ISD::BITREVERSE, MVT::v8i16, 27 },
2959 { ISD::BITREVERSE, MVT::v16i8, 20 },
2960 { ISD::BSWAP, MVT::v2i64, 7 },
2961 { ISD::BSWAP, MVT::v4i32, 7 },
2962 { ISD::BSWAP, MVT::v8i16, 7 },
2963 { ISD::CTLZ, MVT::v2i64, 25 },
2964 { ISD::CTLZ, MVT::v4i32, 26 },
2965 { ISD::CTLZ, MVT::v8i16, 20 },
2966 { ISD::CTLZ, MVT::v16i8, 17 },
2967 { ISD::CTPOP, MVT::v2i64, 12 },
2968 { ISD::CTPOP, MVT::v4i32, 15 },
2969 { ISD::CTPOP, MVT::v8i16, 13 },
2970 { ISD::CTPOP, MVT::v16i8, 10 },
2971 { ISD::CTTZ, MVT::v2i64, 14 },
2972 { ISD::CTTZ, MVT::v4i32, 18 },
2973 { ISD::CTTZ, MVT::v8i16, 16 },
2974 { ISD::CTTZ, MVT::v16i8, 13 },
2975 { ISD::SADDSAT, MVT::v8i16, 1 },
2976 { ISD::SADDSAT, MVT::v16i8, 1 },
2977 { ISD::SMAX, MVT::v8i16, 1 },
2978 { ISD::SMIN, MVT::v8i16, 1 },
2979 { ISD::SSUBSAT, MVT::v8i16, 1 },
2980 { ISD::SSUBSAT, MVT::v16i8, 1 },
2981 { ISD::UADDSAT, MVT::v8i16, 1 },
2982 { ISD::UADDSAT, MVT::v16i8, 1 },
2983 { ISD::UMAX, MVT::v8i16, 2 },
2984 { ISD::UMAX, MVT::v16i8, 1 },
2985 { ISD::UMIN, MVT::v8i16, 2 },
2986 { ISD::UMIN, MVT::v16i8, 1 },
2987 { ISD::USUBSAT, MVT::v8i16, 1 },
2988 { ISD::USUBSAT, MVT::v16i8, 1 },
2989 { ISD::FMAXNUM, MVT::f64, 4 },
2990 { ISD::FMAXNUM, MVT::v2f64, 4 },
2991 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
2992 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
2993 };
2994 static const CostTblEntry SSE1CostTbl[] = {
2995 { ISD::FMAXNUM, MVT::f32, 4 },
2996 { ISD::FMAXNUM, MVT::v4f32, 4 },
2997 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
2998 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
2999 };
3000 static const CostTblEntry BMI64CostTbl[] = { // 64-bit targets
3001 { ISD::CTTZ, MVT::i64, 1 },
3002 };
3003 static const CostTblEntry BMI32CostTbl[] = { // 32 or 64-bit targets
3004 { ISD::CTTZ, MVT::i32, 1 },
3005 { ISD::CTTZ, MVT::i16, 1 },
3006 { ISD::CTTZ, MVT::i8, 1 },
3007 };
3008 static const CostTblEntry LZCNT64CostTbl[] = { // 64-bit targets
3009 { ISD::CTLZ, MVT::i64, 1 },
3010 };
3011 static const CostTblEntry LZCNT32CostTbl[] = { // 32 or 64-bit targets
3012 { ISD::CTLZ, MVT::i32, 1 },
3013 { ISD::CTLZ, MVT::i16, 1 },
3014 { ISD::CTLZ, MVT::i8, 1 },
3015 };
3016 static const CostTblEntry POPCNT64CostTbl[] = { // 64-bit targets
3017 { ISD::CTPOP, MVT::i64, 1 },
3018 };
3019 static const CostTblEntry POPCNT32CostTbl[] = { // 32 or 64-bit targets
3020 { ISD::CTPOP, MVT::i32, 1 },
3021 { ISD::CTPOP, MVT::i16, 1 },
3022 { ISD::CTPOP, MVT::i8, 1 },
3023 };
3024 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3025 { ISD::ABS, MVT::i64, 2 }, // SUB+CMOV
3026 { ISD::BITREVERSE, MVT::i64, 14 },
3027 { ISD::BSWAP, MVT::i64, 1 },
3028 { ISD::CTLZ, MVT::i64, 4 }, // BSR+XOR or BSR+XOR+CMOV
3029 { ISD::CTTZ, MVT::i64, 3 }, // TEST+BSF+CMOV/BRANCH
3030 { ISD::CTPOP, MVT::i64, 10 },
3031 { ISD::SADDO, MVT::i64, 1 },
3032 { ISD::UADDO, MVT::i64, 1 },
3033 { ISD::UMULO, MVT::i64, 2 }, // mulq + seto
3034 };
3035 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3036 { ISD::ABS, MVT::i32, 2 }, // SUB+CMOV
3037 { ISD::ABS, MVT::i16, 2 }, // SUB+CMOV
3038 { ISD::BITREVERSE, MVT::i32, 14 },
3039 { ISD::BITREVERSE, MVT::i16, 14 },
3040 { ISD::BITREVERSE, MVT::i8, 11 },
3041 { ISD::BSWAP, MVT::i32, 1 },
3042 { ISD::BSWAP, MVT::i16, 1 }, // ROL
3043 { ISD::CTLZ, MVT::i32, 4 }, // BSR+XOR or BSR+XOR+CMOV
3044 { ISD::CTLZ, MVT::i16, 4 }, // BSR+XOR or BSR+XOR+CMOV
3045 { ISD::CTLZ, MVT::i8, 4 }, // BSR+XOR or BSR+XOR+CMOV
3046 { ISD::CTTZ, MVT::i32, 3 }, // TEST+BSF+CMOV/BRANCH
3047 { ISD::CTTZ, MVT::i16, 3 }, // TEST+BSF+CMOV/BRANCH
3048 { ISD::CTTZ, MVT::i8, 3 }, // TEST+BSF+CMOV/BRANCH
3049 { ISD::CTPOP, MVT::i32, 8 },
3050 { ISD::CTPOP, MVT::i16, 9 },
3051 { ISD::CTPOP, MVT::i8, 7 },
3052 { ISD::SADDO, MVT::i32, 1 },
3053 { ISD::SADDO, MVT::i16, 1 },
3054 { ISD::SADDO, MVT::i8, 1 },
3055 { ISD::UADDO, MVT::i32, 1 },
3056 { ISD::UADDO, MVT::i16, 1 },
3057 { ISD::UADDO, MVT::i8, 1 },
3058 { ISD::UMULO, MVT::i32, 2 }, // mul + seto
3059 { ISD::UMULO, MVT::i16, 2 },
3060 { ISD::UMULO, MVT::i8, 2 },
3061 };
3062
3063 Type *RetTy = ICA.getReturnType();
3064 Type *OpTy = RetTy;
3065 Intrinsic::ID IID = ICA.getID();
3066 unsigned ISD = ISD::DELETED_NODE;
3067 switch (IID) {
3068 default:
3069 break;
3070 case Intrinsic::abs:
3071 ISD = ISD::ABS;
3072 break;
3073 case Intrinsic::bitreverse:
3074 ISD = ISD::BITREVERSE;
3075 break;
3076 case Intrinsic::bswap:
3077 ISD = ISD::BSWAP;
3078 break;
3079 case Intrinsic::ctlz:
3080 ISD = ISD::CTLZ;
3081 break;
3082 case Intrinsic::ctpop:
3083 ISD = ISD::CTPOP;
3084 break;
3085 case Intrinsic::cttz:
3086 ISD = ISD::CTTZ;
3087 break;
3088 case Intrinsic::maxnum:
3089 case Intrinsic::minnum:
3090 // FMINNUM has same costs so don't duplicate.
3091 ISD = ISD::FMAXNUM;
3092 break;
3093 case Intrinsic::sadd_sat:
3094 ISD = ISD::SADDSAT;
3095 break;
3096 case Intrinsic::smax:
3097 ISD = ISD::SMAX;
3098 break;
3099 case Intrinsic::smin:
3100 ISD = ISD::SMIN;
3101 break;
3102 case Intrinsic::ssub_sat:
3103 ISD = ISD::SSUBSAT;
3104 break;
3105 case Intrinsic::uadd_sat:
3106 ISD = ISD::UADDSAT;
3107 break;
3108 case Intrinsic::umax:
3109 ISD = ISD::UMAX;
3110 break;
3111 case Intrinsic::umin:
3112 ISD = ISD::UMIN;
3113 break;
3114 case Intrinsic::usub_sat:
3115 ISD = ISD::USUBSAT;
3116 break;
3117 case Intrinsic::sqrt:
3118 ISD = ISD::FSQRT;
3119 break;
3120 case Intrinsic::sadd_with_overflow:
3121 case Intrinsic::ssub_with_overflow:
3122 // SSUBO has same costs so don't duplicate.
3123 ISD = ISD::SADDO;
3124 OpTy = RetTy->getContainedType(0);
3125 break;
3126 case Intrinsic::uadd_with_overflow:
3127 case Intrinsic::usub_with_overflow:
3128 // USUBO has same costs so don't duplicate.
3129 ISD = ISD::UADDO;
3130 OpTy = RetTy->getContainedType(0);
3131 break;
3132 case Intrinsic::umul_with_overflow:
3133 case Intrinsic::smul_with_overflow:
3134 // SMULO has same costs so don't duplicate.
3135 ISD = ISD::UMULO;
3136 OpTy = RetTy->getContainedType(0);
3137 break;
3138 }
3139
3140 if (ISD != ISD::DELETED_NODE) {
3141 // Legalize the type.
3142 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
3143 MVT MTy = LT.second;
3144
3145 // Attempt to lookup cost.
3146 if (ISD == ISD::BITREVERSE && ST->hasGFNI() && ST->hasSSSE3() &&
3147 MTy.isVector()) {
3148 // With PSHUFB the code is very similar for all types. If we have integer
3149 // byte operations, we just need a GF2P8AFFINEQB for vXi8. For other types
3150 // we also need a PSHUFB.
3151 unsigned Cost = MTy.getVectorElementType() == MVT::i8 ? 1 : 2;
3152
3153 // Without byte operations, we need twice as many GF2P8AFFINEQB and PSHUFB
3154 // instructions. We also need an extract and an insert.
3155 if (!(MTy.is128BitVector() || (ST->hasAVX2() && MTy.is256BitVector()) ||
3156 (ST->hasBWI() && MTy.is512BitVector())))
3157 Cost = Cost * 2 + 2;
3158
3159 return LT.first * Cost;
3160 }
3161
3162 auto adjustTableCost = [](const CostTblEntry &Entry,
3163 InstructionCost LegalizationCost,
3164 FastMathFlags FMF) {
3165 // If there are no NANs to deal with, then these are reduced to a
3166 // single MIN** or MAX** instruction instead of the MIN/CMP/SELECT that we
3167 // assume is used in the non-fast case.
3168 if (Entry.ISD == ISD::FMAXNUM || Entry.ISD == ISD::FMINNUM) {
3169 if (FMF.noNaNs())
3170 return LegalizationCost * 1;
3171 }
3172 return LegalizationCost * (int)Entry.Cost;
3173 };
3174
3175 if (ST->useGLMDivSqrtCosts())
3176 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
3177 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3178
3179 if (ST->isSLM())
3180 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
3181 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3182
3183 if (ST->hasBITALG())
3184 if (const auto *Entry = CostTableLookup(AVX512BITALGCostTbl, ISD, MTy))
3185 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3186
3187 if (ST->hasVPOPCNTDQ())
3188 if (const auto *Entry = CostTableLookup(AVX512VPOPCNTDQCostTbl, ISD, MTy))
3189 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3190
3191 if (ST->hasCDI())
3192 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
3193 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3194
3195 if (ST->hasBWI())
3196 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
3197 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3198
3199 if (ST->hasAVX512())
3200 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3201 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3202
3203 if (ST->hasXOP())
3204 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3205 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3206
3207 if (ST->hasAVX2())
3208 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
3209 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3210
3211 if (ST->hasAVX())
3212 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
3213 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3214
3215 if (ST->hasSSE42())
3216 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
3217 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3218
3219 if (ST->hasSSE41())
3220 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
3221 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3222
3223 if (ST->hasSSSE3())
3224 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
3225 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3226
3227 if (ST->hasSSE2())
3228 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
3229 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3230
3231 if (ST->hasSSE1())
3232 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
3233 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3234
3235 if (ST->hasBMI()) {
3236 if (ST->is64Bit())
3237 if (const auto *Entry = CostTableLookup(BMI64CostTbl, ISD, MTy))
3238 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3239
3240 if (const auto *Entry = CostTableLookup(BMI32CostTbl, ISD, MTy))
3241 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3242 }
3243
3244 if (ST->hasLZCNT()) {
3245 if (ST->is64Bit())
3246 if (const auto *Entry = CostTableLookup(LZCNT64CostTbl, ISD, MTy))
3247 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3248
3249 if (const auto *Entry = CostTableLookup(LZCNT32CostTbl, ISD, MTy))
3250 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3251 }
3252
3253 if (ST->hasPOPCNT()) {
3254 if (ST->is64Bit())
3255 if (const auto *Entry = CostTableLookup(POPCNT64CostTbl, ISD, MTy))
3256 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3257
3258 if (const auto *Entry = CostTableLookup(POPCNT32CostTbl, ISD, MTy))
3259 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3260 }
3261
3262 if (ISD == ISD::BSWAP && ST->hasMOVBE() && ST->hasFastMOVBE()) {
3263 if (const Instruction *II = ICA.getInst()) {
3264 if (II->hasOneUse() && isa<StoreInst>(II->user_back()))
3265 return TTI::TCC_Free;
3266 if (auto *LI = dyn_cast<LoadInst>(II->getOperand(0))) {
3267 if (LI->hasOneUse())
3268 return TTI::TCC_Free;
3269 }
3270 }
3271 }
3272
3273 // TODO - add BMI (TZCNT) scalar handling
3274
3275 if (ST->is64Bit())
3276 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3277 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3278
3279 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3280 return adjustTableCost(*Entry, LT.first, ICA.getFlags());
3281 }
3282
3283 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3284}
3285
3286InstructionCost
3287X86TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
3288 TTI::TargetCostKind CostKind) {
3289 if (ICA.isTypeBasedOnly())
3290 return getTypeBasedIntrinsicInstrCost(ICA, CostKind);
3291
3292 static const CostTblEntry AVX512CostTbl[] = {
3293 { ISD::ROTL, MVT::v8i64, 1 },
3294 { ISD::ROTL, MVT::v4i64, 1 },
3295 { ISD::ROTL, MVT::v2i64, 1 },
3296 { ISD::ROTL, MVT::v16i32, 1 },
3297 { ISD::ROTL, MVT::v8i32, 1 },
3298 { ISD::ROTL, MVT::v4i32, 1 },
3299 { ISD::ROTR, MVT::v8i64, 1 },
3300 { ISD::ROTR, MVT::v4i64, 1 },
3301 { ISD::ROTR, MVT::v2i64, 1 },
3302 { ISD::ROTR, MVT::v16i32, 1 },
3303 { ISD::ROTR, MVT::v8i32, 1 },
3304 { ISD::ROTR, MVT::v4i32, 1 }
3305 };
3306 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
3307 static const CostTblEntry XOPCostTbl[] = {
3308 { ISD::ROTL, MVT::v4i64, 4 },
3309 { ISD::ROTL, MVT::v8i32, 4 },
3310 { ISD::ROTL, MVT::v16i16, 4 },
3311 { ISD::ROTL, MVT::v32i8, 4 },
3312 { ISD::ROTL, MVT::v2i64, 1 },
3313 { ISD::ROTL, MVT::v4i32, 1 },
3314 { ISD::ROTL, MVT::v8i16, 1 },
3315 { ISD::ROTL, MVT::v16i8, 1 },
3316 { ISD::ROTR, MVT::v4i64, 6 },
3317 { ISD::ROTR, MVT::v8i32, 6 },
3318 { ISD::ROTR, MVT::v16i16, 6 },
3319 { ISD::ROTR, MVT::v32i8, 6 },
3320 { ISD::ROTR, MVT::v2i64, 2 },
3321 { ISD::ROTR, MVT::v4i32, 2 },
3322 { ISD::ROTR, MVT::v8i16, 2 },
3323 { ISD::ROTR, MVT::v16i8, 2 }
3324 };
3325 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
3326 { ISD::ROTL, MVT::i64, 1 },
3327 { ISD::ROTR, MVT::i64, 1 },
3328 { ISD::FSHL, MVT::i64, 4 }
3329 };
3330 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
3331 { ISD::ROTL, MVT::i32, 1 },
3332 { ISD::ROTL, MVT::i16, 1 },
3333 { ISD::ROTL, MVT::i8, 1 },
3334 { ISD::ROTR, MVT::i32, 1 },
3335 { ISD::ROTR, MVT::i16, 1 },
3336 { ISD::ROTR, MVT::i8, 1 },
3337 { ISD::FSHL, MVT::i32, 4 },
3338 { ISD::FSHL, MVT::i16, 4 },
3339 { ISD::FSHL, MVT::i8, 4 }
3340 };
3341
3342 Intrinsic::ID IID = ICA.getID();
3343 Type *RetTy = ICA.getReturnType();
3344 const SmallVectorImpl<const Value *> &Args = ICA.getArgs();
3345 unsigned ISD = ISD::DELETED_NODE;
3346 switch (IID) {
3347 default:
3348 break;
3349 case Intrinsic::fshl:
3350 ISD = ISD::FSHL;
3351 if (Args[0] == Args[1])
3352 ISD = ISD::ROTL;
3353 break;
3354 case Intrinsic::fshr:
3355 // FSHR has same costs so don't duplicate.
3356 ISD = ISD::FSHL;
3357 if (Args[0] == Args[1])
3358 ISD = ISD::ROTR;
3359 break;
3360 }
3361
3362 if (ISD != ISD::DELETED_NODE) {
3363 // Legalize the type.
3364 std::pair<InstructionCost, MVT> LT =
3365 TLI->getTypeLegalizationCost(DL, RetTy);
3366 MVT MTy = LT.second;
3367
3368 // Attempt to lookup cost.
3369 if (ST->hasAVX512())
3370 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
3371 return LT.first * Entry->Cost;
3372
3373 if (ST->hasXOP())
3374 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
3375 return LT.first * Entry->Cost;
3376
3377 if (ST->is64Bit())
3378 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
3379 return LT.first * Entry->Cost;
3380
3381 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
3382 return LT.first * Entry->Cost;
3383 }
3384
3385 return BaseT::getIntrinsicInstrCost(ICA, CostKind);
3386}
3387
3388InstructionCost X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
3389 unsigned Index) {
3390 static const CostTblEntry SLMCostTbl[] = {
3391 { ISD::EXTRACT_VECTOR_ELT, MVT::i8, 4 },
3392 { ISD::EXTRACT_VECTOR_ELT, MVT::i16, 4 },
3393 { ISD::EXTRACT_VECTOR_ELT, MVT::i32, 4 },
3394 { ISD::EXTRACT_VECTOR_ELT, MVT::i64, 7 }
3395 };
3396
3397 assert(Val->isVectorTy() && "This must be a vector type")(static_cast <bool> (Val->isVectorTy() && "This must be a vector type"
) ? void (0) : __assert_fail ("Val->isVectorTy() && \"This must be a vector type\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3397, __extension__ __PRETTY_FUNCTION__))
;
24
'?' condition is true
3398 Type *ScalarType = Val->getScalarType();
3399 int RegisterFileMoveCost = 0;
3400
3401 // Non-immediate extraction/insertion can be handled as a sequence of
3402 // aliased loads+stores via the stack.
3403 if (Index == -1U && (Opcode == Instruction::ExtractElement ||
3404 Opcode == Instruction::InsertElement)) {
3405 // TODO: On some SSE41+ targets, we expand to cmp+splat+select patterns:
3406 // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
3407
3408 // TODO: Move this to BasicTTIImpl.h? We'd need better gep + index handling.
3409 assert(isa<FixedVectorType>(Val) && "Fixed vector type expected")(static_cast <bool> (isa<FixedVectorType>(Val) &&
"Fixed vector type expected") ? void (0) : __assert_fail ("isa<FixedVectorType>(Val) && \"Fixed vector type expected\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3409, __extension__ __PRETTY_FUNCTION__))
;
3410 Align VecAlign = DL.getPrefTypeAlign(Val);
3411 Align SclAlign = DL.getPrefTypeAlign(ScalarType);
3412
3413 // Extract - store vector to stack, load scalar.
3414 if (Opcode == Instruction::ExtractElement) {
3415 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3416 TTI::TargetCostKind::TCK_RecipThroughput) +
3417 getMemoryOpCost(Instruction::Load, ScalarType, SclAlign, 0,
3418 TTI::TargetCostKind::TCK_RecipThroughput);
3419 }
3420 // Insert - store vector to stack, store scalar, load vector.
3421 if (Opcode == Instruction::InsertElement) {
3422 return getMemoryOpCost(Instruction::Store, Val, VecAlign, 0,
3423 TTI::TargetCostKind::TCK_RecipThroughput) +
3424 getMemoryOpCost(Instruction::Store, ScalarType, SclAlign, 0,
3425 TTI::TargetCostKind::TCK_RecipThroughput) +
3426 getMemoryOpCost(Instruction::Load, Val, VecAlign, 0,
3427 TTI::TargetCostKind::TCK_RecipThroughput);
3428 }
3429 }
3430
3431 if (Index != -1U && (Opcode
24.1
'Opcode' is equal to ExtractElement
24.1
'Opcode' is equal to ExtractElement
24.1
'Opcode' is equal to ExtractElement
== Instruction::ExtractElement ||
3432 Opcode == Instruction::InsertElement)) {
3433 // Legalize the type.
3434 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
3435
3436 // This type is legalized to a scalar type.
3437 if (!LT.second.isVector())
25
Calling 'MVT::isVector'
29
Returning from 'MVT::isVector'
30
Taking false branch
3438 return 0;
3439
3440 // The type may be split. Normalize the index to the new type.
3441 unsigned NumElts = LT.second.getVectorNumElements();
3442 unsigned SubNumElts = NumElts;
3443 Index = Index % NumElts;
3444
3445 // For >128-bit vectors, we need to extract higher 128-bit subvectors.
3446 // For inserts, we also need to insert the subvector back.
3447 if (LT.second.getSizeInBits() > 128) {
31
Assuming the condition is true
3448 assert((LT.second.getSizeInBits() % 128) == 0 && "Illegal vector")(static_cast <bool> ((LT.second.getSizeInBits() % 128) ==
0 && "Illegal vector") ? void (0) : __assert_fail ("(LT.second.getSizeInBits() % 128) == 0 && \"Illegal vector\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3448, __extension__ __PRETTY_FUNCTION__))
;
32
Taking true branch
33
Assuming the condition is true
34
'?' condition is true
3449 unsigned NumSubVecs = LT.second.getSizeInBits() / 128;
3450 SubNumElts = NumElts / NumSubVecs;
35
Value assigned to 'SubNumElts'
3451 if (SubNumElts <= Index) {
36
Assuming 'SubNumElts' is <= 'Index'
37
Taking true branch
3452 RegisterFileMoveCost += (Opcode
37.1
'Opcode' is not equal to InsertElement
37.1
'Opcode' is not equal to InsertElement
37.1
'Opcode' is not equal to InsertElement
== Instruction::InsertElement ? 2 : 1);
38
'?' condition is false
3453 Index %= SubNumElts;
39
Division by zero
3454 }
3455 }
3456
3457 if (Index == 0) {
3458 // Floating point scalars are already located in index #0.
3459 // Many insertions to #0 can fold away for scalar fp-ops, so let's assume
3460 // true for all.
3461 if (ScalarType->isFloatingPointTy())
3462 return RegisterFileMoveCost;
3463
3464 // Assume movd/movq XMM -> GPR is relatively cheap on all targets.
3465 if (ScalarType->isIntegerTy() && Opcode == Instruction::ExtractElement)
3466 return 1 + RegisterFileMoveCost;
3467 }
3468
3469 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3470 assert(ISD && "Unexpected vector opcode")(static_cast <bool> (ISD && "Unexpected vector opcode"
) ? void (0) : __assert_fail ("ISD && \"Unexpected vector opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3470, __extension__ __PRETTY_FUNCTION__))
;
3471 MVT MScalarTy = LT.second.getScalarType();
3472 if (ST->isSLM())
3473 if (auto *Entry = CostTableLookup(SLMCostTbl, ISD, MScalarTy))
3474 return Entry->Cost + RegisterFileMoveCost;
3475
3476 // Assume pinsr/pextr XMM <-> GPR is relatively cheap on all targets.
3477 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3478 (MScalarTy.isInteger() && ST->hasSSE41()))
3479 return 1 + RegisterFileMoveCost;
3480
3481 // Assume insertps is relatively cheap on all targets.
3482 if (MScalarTy == MVT::f32 && ST->hasSSE41() &&
3483 Opcode == Instruction::InsertElement)
3484 return 1 + RegisterFileMoveCost;
3485
3486 // For extractions we just need to shuffle the element to index 0, which
3487 // should be very cheap (assume cost = 1). For insertions we need to shuffle
3488 // the elements to its destination. In both cases we must handle the
3489 // subvector move(s).
3490 // If the vector type is already less than 128-bits then don't reduce it.
3491 // TODO: Under what circumstances should we shuffle using the full width?
3492 InstructionCost ShuffleCost = 1;
3493 if (Opcode == Instruction::InsertElement) {
3494 auto *SubTy = cast<VectorType>(Val);
3495 EVT VT = TLI->getValueType(DL, Val);
3496 if (VT.getScalarType() != MScalarTy || VT.getSizeInBits() >= 128)
3497 SubTy = FixedVectorType::get(ScalarType, SubNumElts);
3498 ShuffleCost =
3499 getShuffleCost(TTI::SK_PermuteTwoSrc, SubTy, None, 0, SubTy);
3500 }
3501 int IntOrFpCost = ScalarType->isFloatingPointTy() ? 0 : 1;
3502 return ShuffleCost + IntOrFpCost + RegisterFileMoveCost;
3503 }
3504
3505 // Add to the base cost if we know that the extracted element of a vector is
3506 // destined to be moved to and used in the integer register file.
3507 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
3508 RegisterFileMoveCost += 1;
3509
3510 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
3511}
3512
3513InstructionCost X86TTIImpl::getScalarizationOverhead(VectorType *Ty,
3514 const APInt &DemandedElts,
3515 bool Insert,
3516 bool Extract) {
3517 InstructionCost Cost = 0;
3518
3519 // For insertions, a ISD::BUILD_VECTOR style vector initialization can be much
3520 // cheaper than an accumulation of ISD::INSERT_VECTOR_ELT.
3521 if (Insert) {
3522 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
3523 MVT MScalarTy = LT.second.getScalarType();
3524
3525 if ((MScalarTy == MVT::i16 && ST->hasSSE2()) ||
3526 (MScalarTy.isInteger() && ST->hasSSE41()) ||
3527 (MScalarTy == MVT::f32 && ST->hasSSE41())) {
3528 // For types we can insert directly, insertion into 128-bit sub vectors is
3529 // cheap, followed by a cheap chain of concatenations.
3530 if (LT.second.getSizeInBits() <= 128) {
3531 Cost +=
3532 BaseT::getScalarizationOverhead(Ty, DemandedElts, Insert, false);
3533 } else {
3534 // In each 128-lane, if at least one index is demanded but not all
3535 // indices are demanded and this 128-lane is not the first 128-lane of
3536 // the legalized-vector, then this 128-lane needs a extracti128; If in
3537 // each 128-lane, there is at least one demanded index, this 128-lane
3538 // needs a inserti128.
3539
3540 // The following cases will help you build a better understanding:
3541 // Assume we insert several elements into a v8i32 vector in avx2,
3542 // Case#1: inserting into 1th index needs vpinsrd + inserti128.
3543 // Case#2: inserting into 5th index needs extracti128 + vpinsrd +
3544 // inserti128.
3545 // Case#3: inserting into 4,5,6,7 index needs 4*vpinsrd + inserti128.
3546 const int CostValue = *LT.first.getValue();
3547 assert(CostValue >= 0 && "Negative cost!")(static_cast <bool> (CostValue >= 0 && "Negative cost!"
) ? void (0) : __assert_fail ("CostValue >= 0 && \"Negative cost!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3547, __extension__ __PRETTY_FUNCTION__))
;
3548 unsigned Num128Lanes = LT.second.getSizeInBits() / 128 * CostValue;
3549 unsigned NumElts = LT.second.getVectorNumElements() * CostValue;
3550 APInt WidenedDemandedElts = DemandedElts.zextOrSelf(NumElts);
3551 unsigned Scale = NumElts / Num128Lanes;
3552 // We iterate each 128-lane, and check if we need a
3553 // extracti128/inserti128 for this 128-lane.
3554 for (unsigned I = 0; I < NumElts; I += Scale) {
3555 APInt Mask = WidenedDemandedElts.getBitsSet(NumElts, I, I + Scale);
3556 APInt MaskedDE = Mask & WidenedDemandedElts;
3557 unsigned Population = MaskedDE.countPopulation();
3558 Cost += (Population > 0 && Population != Scale &&
3559 I % LT.second.getVectorNumElements() != 0);
3560 Cost += Population > 0;
3561 }
3562 Cost += DemandedElts.countPopulation();
3563
3564 // For vXf32 cases, insertion into the 0'th index in each v4f32
3565 // 128-bit vector is free.
3566 // NOTE: This assumes legalization widens vXf32 vectors.
3567 if (MScalarTy == MVT::f32)
3568 for (unsigned i = 0, e = cast<FixedVectorType>(Ty)->getNumElements();
3569 i < e; i += 4)
3570 if (DemandedElts[i])
3571 Cost--;
3572 }
3573 } else if (LT.second.isVector()) {
3574 // Without fast insertion, we need to use MOVD/MOVQ to pass each demanded
3575 // integer element as a SCALAR_TO_VECTOR, then we build the vector as a
3576 // series of UNPCK followed by CONCAT_VECTORS - all of these can be
3577 // considered cheap.
3578 if (Ty->isIntOrIntVectorTy())
3579 Cost += DemandedElts.countPopulation();
3580
3581 // Get the smaller of the legalized or original pow2-extended number of
3582 // vector elements, which represents the number of unpacks we'll end up
3583 // performing.
3584 unsigned NumElts = LT.second.getVectorNumElements();
3585 unsigned Pow2Elts =
3586 PowerOf2Ceil(cast<FixedVectorType>(Ty)->getNumElements());
3587 Cost += (std::min<unsigned>(NumElts, Pow2Elts) - 1) * LT.first;
3588 }
3589 }
3590
3591 // TODO: Use default extraction for now, but we should investigate extending this
3592 // to handle repeated subvector extraction.
3593 if (Extract)
3594 Cost += BaseT::getScalarizationOverhead(Ty, DemandedElts, false, Extract);
3595
3596 return Cost;
3597}
3598
3599InstructionCost X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
3600 MaybeAlign Alignment,
3601 unsigned AddressSpace,
3602 TTI::TargetCostKind CostKind,
3603 const Instruction *I) {
3604 // TODO: Handle other cost kinds.
3605 if (CostKind != TTI::TCK_RecipThroughput) {
3606 if (auto *SI = dyn_cast_or_null<StoreInst>(I)) {
3607 // Store instruction with index and scale costs 2 Uops.
3608 // Check the preceding GEP to identify non-const indices.
3609 if (auto *GEP = dyn_cast<GetElementPtrInst>(SI->getPointerOperand())) {
3610 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
3611 return TTI::TCC_Basic * 2;
3612 }
3613 }
3614 return TTI::TCC_Basic;
3615 }
3616
3617 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&(static_cast <bool> ((Opcode == Instruction::Load || Opcode
== Instruction::Store) && "Invalid Opcode") ? void (
0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3618, __extension__ __PRETTY_FUNCTION__))
3618 "Invalid Opcode")(static_cast <bool> ((Opcode == Instruction::Load || Opcode
== Instruction::Store) && "Invalid Opcode") ? void (
0) : __assert_fail ("(Opcode == Instruction::Load || Opcode == Instruction::Store) && \"Invalid Opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3618, __extension__ __PRETTY_FUNCTION__))
;
3619 // Type legalization can't handle structs
3620 if (TLI->getValueType(DL, Src, true) == MVT::Other)
3621 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3622 CostKind);
3623
3624 // Legalize the type.
3625 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
3626
3627 auto *VTy = dyn_cast<FixedVectorType>(Src);
3628
3629 // Handle the simple case of non-vectors.
3630 // NOTE: this assumes that legalization never creates vector from scalars!
3631 if (!VTy || !LT.second.isVector())
3632 // Each load/store unit costs 1.
3633 return LT.first * 1;
3634
3635 bool IsLoad = Opcode == Instruction::Load;
3636
3637 Type *EltTy = VTy->getElementType();
3638
3639 const int EltTyBits = DL.getTypeSizeInBits(EltTy);
3640
3641 InstructionCost Cost = 0;
3642
3643 // Source of truth: how many elements were there in the original IR vector?
3644 const unsigned SrcNumElt = VTy->getNumElements();
3645
3646 // How far have we gotten?
3647 int NumEltRemaining = SrcNumElt;
3648 // Note that we intentionally capture by-reference, NumEltRemaining changes.
3649 auto NumEltDone = [&]() { return SrcNumElt - NumEltRemaining; };
3650
3651 const int MaxLegalOpSizeBytes = divideCeil(LT.second.getSizeInBits(), 8);
3652
3653 // Note that even if we can store 64 bits of an XMM, we still operate on XMM.
3654 const unsigned XMMBits = 128;
3655 if (XMMBits % EltTyBits != 0)
3656 // Vector size must be a multiple of the element size. I.e. no padding.
3657 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3658 CostKind);
3659 const int NumEltPerXMM = XMMBits / EltTyBits;
3660
3661 auto *XMMVecTy = FixedVectorType::get(EltTy, NumEltPerXMM);
3662
3663 for (int CurrOpSizeBytes = MaxLegalOpSizeBytes, SubVecEltsLeft = 0;
3664 NumEltRemaining > 0; CurrOpSizeBytes /= 2) {
3665 // How many elements would a single op deal with at once?
3666 if ((8 * CurrOpSizeBytes) % EltTyBits != 0)
3667 // Vector size must be a multiple of the element size. I.e. no padding.
3668 return BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace,
3669 CostKind);
3670 int CurrNumEltPerOp = (8 * CurrOpSizeBytes) / EltTyBits;
3671
3672 assert(CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && "How'd we get here?")(static_cast <bool> (CurrOpSizeBytes > 0 && CurrNumEltPerOp
> 0 && "How'd we get here?") ? void (0) : __assert_fail
("CurrOpSizeBytes > 0 && CurrNumEltPerOp > 0 && \"How'd we get here?\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3672, __extension__ __PRETTY_FUNCTION__))
;
3673 assert((((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) ||(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3676, __extension__ __PRETTY_FUNCTION__))
3674 (CurrOpSizeBytes == MaxLegalOpSizeBytes)) &&(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3676, __extension__ __PRETTY_FUNCTION__))
3675 "Unless we haven't halved the op size yet, "(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3676, __extension__ __PRETTY_FUNCTION__))
3676 "we have less than two op's sized units of work left.")(static_cast <bool> ((((NumEltRemaining * EltTyBits) <
(2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes
)) && "Unless we haven't halved the op size yet, " "we have less than two op's sized units of work left."
) ? void (0) : __assert_fail ("(((NumEltRemaining * EltTyBits) < (2 * 8 * CurrOpSizeBytes)) || (CurrOpSizeBytes == MaxLegalOpSizeBytes)) && \"Unless we haven't halved the op size yet, \" \"we have less than two op's sized units of work left.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3676, __extension__ __PRETTY_FUNCTION__))
;
3677
3678 auto *CurrVecTy = CurrNumEltPerOp > NumEltPerXMM
3679 ? FixedVectorType::get(EltTy, CurrNumEltPerOp)
3680 : XMMVecTy;
3681
3682 assert(CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 &&(static_cast <bool> (CurrVecTy->getNumElements() % CurrNumEltPerOp
== 0 && "After halving sizes, the vector elt count is no longer a multiple "
"of number of elements per operation?") ? void (0) : __assert_fail
("CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && \"After halving sizes, the vector elt count is no longer a multiple \" \"of number of elements per operation?\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3684, __extension__ __PRETTY_FUNCTION__))
3683 "After halving sizes, the vector elt count is no longer a multiple "(static_cast <bool> (CurrVecTy->getNumElements() % CurrNumEltPerOp
== 0 && "After halving sizes, the vector elt count is no longer a multiple "
"of number of elements per operation?") ? void (0) : __assert_fail
("CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && \"After halving sizes, the vector elt count is no longer a multiple \" \"of number of elements per operation?\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3684, __extension__ __PRETTY_FUNCTION__))
3684 "of number of elements per operation?")(static_cast <bool> (CurrVecTy->getNumElements() % CurrNumEltPerOp
== 0 && "After halving sizes, the vector elt count is no longer a multiple "
"of number of elements per operation?") ? void (0) : __assert_fail
("CurrVecTy->getNumElements() % CurrNumEltPerOp == 0 && \"After halving sizes, the vector elt count is no longer a multiple \" \"of number of elements per operation?\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3684, __extension__ __PRETTY_FUNCTION__))
;
3685 auto *CoalescedVecTy =
3686 CurrNumEltPerOp == 1
3687 ? CurrVecTy
3688 : FixedVectorType::get(
3689 IntegerType::get(Src->getContext(),
3690 EltTyBits * CurrNumEltPerOp),
3691 CurrVecTy->getNumElements() / CurrNumEltPerOp);
3692 assert(DL.getTypeSizeInBits(CoalescedVecTy) ==(static_cast <bool> (DL.getTypeSizeInBits(CoalescedVecTy
) == DL.getTypeSizeInBits(CurrVecTy) && "coalesciing elements doesn't change vector width."
) ? void (0) : __assert_fail ("DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && \"coalesciing elements doesn't change vector width.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3694, __extension__ __PRETTY_FUNCTION__))
3693 DL.getTypeSizeInBits(CurrVecTy) &&(static_cast <bool> (DL.getTypeSizeInBits(CoalescedVecTy
) == DL.getTypeSizeInBits(CurrVecTy) && "coalesciing elements doesn't change vector width."
) ? void (0) : __assert_fail ("DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && \"coalesciing elements doesn't change vector width.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3694, __extension__ __PRETTY_FUNCTION__))
3694 "coalesciing elements doesn't change vector width.")(static_cast <bool> (DL.getTypeSizeInBits(CoalescedVecTy
) == DL.getTypeSizeInBits(CurrVecTy) && "coalesciing elements doesn't change vector width."
) ? void (0) : __assert_fail ("DL.getTypeSizeInBits(CoalescedVecTy) == DL.getTypeSizeInBits(CurrVecTy) && \"coalesciing elements doesn't change vector width.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3694, __extension__ __PRETTY_FUNCTION__))
;
3695
3696 while (NumEltRemaining > 0) {
3697 assert(SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?")(static_cast <bool> (SubVecEltsLeft >= 0 && "Subreg element count overconsumtion?"
) ? void (0) : __assert_fail ("SubVecEltsLeft >= 0 && \"Subreg element count overconsumtion?\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3697, __extension__ __PRETTY_FUNCTION__))
;
3698
3699 // Can we use this vector size, as per the remaining element count?
3700 // Iff the vector is naturally aligned, we can do a wide load regardless.
3701 if (NumEltRemaining < CurrNumEltPerOp &&
3702 (!IsLoad || Alignment.valueOrOne() < CurrOpSizeBytes) &&
3703 CurrOpSizeBytes != 1)
3704 break; // Try smalled vector size.
3705
3706 bool Is0thSubVec = (NumEltDone() % LT.second.getVectorNumElements()) == 0;
3707
3708 // If we have fully processed the previous reg, we need to replenish it.
3709 if (SubVecEltsLeft == 0) {
3710 SubVecEltsLeft += CurrVecTy->getNumElements();
3711 // And that's free only for the 0'th subvector of a legalized vector.
3712 if (!Is0thSubVec)
3713 Cost += getShuffleCost(IsLoad ? TTI::ShuffleKind::SK_InsertSubvector
3714 : TTI::ShuffleKind::SK_ExtractSubvector,
3715 VTy, None, NumEltDone(), CurrVecTy);
3716 }
3717
3718 // While we can directly load/store ZMM, YMM, and 64-bit halves of XMM,
3719 // for smaller widths (32/16/8) we have to insert/extract them separately.
3720 // Again, it's free for the 0'th subreg (if op is 32/64 bit wide,
3721 // but let's pretend that it is also true for 16/8 bit wide ops...)
3722 if (CurrOpSizeBytes <= 32 / 8 && !Is0thSubVec) {
3723 int NumEltDoneInCurrXMM = NumEltDone() % NumEltPerXMM;
3724 assert(NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && "")(static_cast <bool> (NumEltDoneInCurrXMM % CurrNumEltPerOp
== 0 && "") ? void (0) : __assert_fail ("NumEltDoneInCurrXMM % CurrNumEltPerOp == 0 && \"\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3724, __extension__ __PRETTY_FUNCTION__))
;
3725 int CoalescedVecEltIdx = NumEltDoneInCurrXMM / CurrNumEltPerOp;
3726 APInt DemandedElts =
3727 APInt::getBitsSet(CoalescedVecTy->getNumElements(),
3728 CoalescedVecEltIdx, CoalescedVecEltIdx + 1);
3729 assert(DemandedElts.countPopulation() == 1 && "Inserting single value")(static_cast <bool> (DemandedElts.countPopulation() == 1
&& "Inserting single value") ? void (0) : __assert_fail
("DemandedElts.countPopulation() == 1 && \"Inserting single value\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3729, __extension__ __PRETTY_FUNCTION__))
;
3730 Cost += getScalarizationOverhead(CoalescedVecTy, DemandedElts, IsLoad,
3731 !IsLoad);
3732 }
3733
3734 // This isn't exactly right. We're using slow unaligned 32-byte accesses
3735 // as a proxy for a double-pumped AVX memory interface such as on
3736 // Sandybridge.
3737 if (CurrOpSizeBytes == 32 && ST->isUnalignedMem32Slow())
3738 Cost += 2;
3739 else
3740 Cost += 1;
3741
3742 SubVecEltsLeft -= CurrNumEltPerOp;
3743 NumEltRemaining -= CurrNumEltPerOp;
3744 Alignment = commonAlignment(Alignment.valueOrOne(), CurrOpSizeBytes);
3745 }
3746 }
3747
3748 assert(NumEltRemaining <= 0 && "Should have processed all the elements.")(static_cast <bool> (NumEltRemaining <= 0 &&
"Should have processed all the elements.") ? void (0) : __assert_fail
("NumEltRemaining <= 0 && \"Should have processed all the elements.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3748, __extension__ __PRETTY_FUNCTION__))
;
3749
3750 return Cost;
3751}
3752
3753InstructionCost
3754X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy, Align Alignment,
3755 unsigned AddressSpace,
3756 TTI::TargetCostKind CostKind) {
3757 bool IsLoad = (Instruction::Load == Opcode);
3758 bool IsStore = (Instruction::Store == Opcode);
3759
3760 auto *SrcVTy = dyn_cast<FixedVectorType>(SrcTy);
3761 if (!SrcVTy)
3762 // To calculate scalar take the regular cost, without mask
3763 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace, CostKind);
3764
3765 unsigned NumElem = SrcVTy->getNumElements();
3766 auto *MaskTy =
3767 FixedVectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
3768 if ((IsLoad && !isLegalMaskedLoad(SrcVTy, Alignment)) ||
3769 (IsStore && !isLegalMaskedStore(SrcVTy, Alignment))) {
3770 // Scalarization
3771 APInt DemandedElts = APInt::getAllOnes(NumElem);
3772 InstructionCost MaskSplitCost =
3773 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
3774 InstructionCost ScalarCompareCost = getCmpSelInstrCost(
3775 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr,
3776 CmpInst::BAD_ICMP_PREDICATE, CostKind);
3777 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
3778 InstructionCost MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
3779 InstructionCost ValueSplitCost =
3780 getScalarizationOverhead(SrcVTy, DemandedElts, IsLoad, IsStore);
3781 InstructionCost MemopCost =
3782 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
3783 Alignment, AddressSpace, CostKind);
3784 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
3785 }
3786
3787 // Legalize the type.
3788 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
3789 auto VT = TLI->getValueType(DL, SrcVTy);
3790 InstructionCost Cost = 0;
3791 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
3792 LT.second.getVectorNumElements() == NumElem)
3793 // Promotion requires extend/truncate for data and a shuffle for mask.
3794 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, None, 0, nullptr) +
3795 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, None, 0, nullptr);
3796
3797 else if (LT.first * LT.second.getVectorNumElements() > NumElem) {
3798 auto *NewMaskTy = FixedVectorType::get(MaskTy->getElementType(),
3799 LT.second.getVectorNumElements());
3800 // Expanding requires fill mask with zeroes
3801 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, None, 0, MaskTy);
3802 }
3803
3804 // Pre-AVX512 - each maskmov load costs 2 + store costs ~8.
3805 if (!ST->hasAVX512())
3806 return Cost + LT.first * (IsLoad ? 2 : 8);
3807
3808 // AVX-512 masked load/store is cheapper
3809 return Cost + LT.first;
3810}
3811
3812InstructionCost X86TTIImpl::getAddressComputationCost(Type *Ty,
3813 ScalarEvolution *SE,
3814 const SCEV *Ptr) {
3815 // Address computations in vectorized code with non-consecutive addresses will
3816 // likely result in more instructions compared to scalar code where the
3817 // computation can more often be merged into the index mode. The resulting
3818 // extra micro-ops can significantly decrease throughput.
3819 const unsigned NumVectorInstToHideOverhead = 10;
3820
3821 // Cost modeling of Strided Access Computation is hidden by the indexing
3822 // modes of X86 regardless of the stride value. We dont believe that there
3823 // is a difference between constant strided access in gerenal and constant
3824 // strided value which is less than or equal to 64.
3825 // Even in the case of (loop invariant) stride whose value is not known at
3826 // compile time, the address computation will not incur more than one extra
3827 // ADD instruction.
3828 if (Ty->isVectorTy() && SE) {
3829 if (!BaseT::isStridedAccess(Ptr))
3830 return NumVectorInstToHideOverhead;
3831 if (!BaseT::getConstantStrideStep(SE, Ptr))
3832 return 1;
3833 }
3834
3835 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
3836}
3837
3838InstructionCost
3839X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, VectorType *ValTy,
3840 Optional<FastMathFlags> FMF,
3841 TTI::TargetCostKind CostKind) {
3842 if (TTI::requiresOrderedReduction(FMF))
3843 return BaseT::getArithmeticReductionCost(Opcode, ValTy, FMF, CostKind);
3844
3845 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
3846 // and make it as the cost.
3847
3848 static const CostTblEntry SLMCostTblNoPairWise[] = {
3849 { ISD::FADD, MVT::v2f64, 3 },
3850 { ISD::ADD, MVT::v2i64, 5 },
3851 };
3852
3853 static const CostTblEntry SSE2CostTblNoPairWise[] = {
3854 { ISD::FADD, MVT::v2f64, 2 },
3855 { ISD::FADD, MVT::v2f32, 2 },
3856 { ISD::FADD, MVT::v4f32, 4 },
3857 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
3858 { ISD::ADD, MVT::v2i32, 2 }, // FIXME: chosen to be less than v4i32
3859 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
3860 { ISD::ADD, MVT::v2i16, 2 }, // The data reported by the IACA tool is "4.3".
3861 { ISD::ADD, MVT::v4i16, 3 }, // The data reported by the IACA tool is "4.3".
3862 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
3863 { ISD::ADD, MVT::v2i8, 2 },
3864 { ISD::ADD, MVT::v4i8, 2 },
3865 { ISD::ADD, MVT::v8i8, 2 },
3866 { ISD::ADD, MVT::v16i8, 3 },
3867 };
3868
3869 static const CostTblEntry AVX1CostTblNoPairWise[] = {
3870 { ISD::FADD, MVT::v4f64, 3 },
3871 { ISD::FADD, MVT::v4f32, 3 },
3872 { ISD::FADD, MVT::v8f32, 4 },
3873 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
3874 { ISD::ADD, MVT::v4i64, 3 },
3875 { ISD::ADD, MVT::v8i32, 5 },
3876 { ISD::ADD, MVT::v16i16, 5 },
3877 { ISD::ADD, MVT::v32i8, 4 },
3878 };
3879
3880 int ISD = TLI->InstructionOpcodeToISD(Opcode);
3881 assert(ISD && "Invalid opcode")(static_cast <bool> (ISD && "Invalid opcode") ?
void (0) : __assert_fail ("ISD && \"Invalid opcode\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 3881, __extension__ __PRETTY_FUNCTION__))
;
3882
3883 // Before legalizing the type, give a chance to look up illegal narrow types
3884 // in the table.
3885 // FIXME: Is there a better way to do this?
3886 EVT VT = TLI->getValueType(DL, ValTy);
3887 if (VT.isSimple()) {
3888 MVT MTy = VT.getSimpleVT();
3889 if (ST->isSLM())
3890 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3891 return Entry->Cost;
3892
3893 if (ST->hasAVX())
3894 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3895 return Entry->Cost;
3896
3897 if (ST->hasSSE2())
3898 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3899 return Entry->Cost;
3900 }
3901
3902 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
3903
3904 MVT MTy = LT.second;
3905
3906 auto *ValVTy = cast<FixedVectorType>(ValTy);
3907
3908 // Special case: vXi8 mul reductions are performed as vXi16.
3909 if (ISD == ISD::MUL && MTy.getScalarType() == MVT::i8) {
3910 auto *WideSclTy = IntegerType::get(ValVTy->getContext(), 16);
3911 auto *WideVecTy = FixedVectorType::get(WideSclTy, ValVTy->getNumElements());
3912 return getCastInstrCost(Instruction::ZExt, WideVecTy, ValTy,
3913 TargetTransformInfo::CastContextHint::None,
3914 CostKind) +
3915 getArithmeticReductionCost(Opcode, WideVecTy, FMF, CostKind);
3916 }
3917
3918 InstructionCost ArithmeticCost = 0;
3919 if (LT.first != 1 && MTy.isVector() &&
3920 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3921 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3922 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3923 MTy.getVectorNumElements());
3924 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3925 ArithmeticCost *= LT.first - 1;
3926 }
3927
3928 if (ST->isSLM())
3929 if (const auto *Entry = CostTableLookup(SLMCostTblNoPairWise, ISD, MTy))
3930 return ArithmeticCost + Entry->Cost;
3931
3932 if (ST->hasAVX())
3933 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
3934 return ArithmeticCost + Entry->Cost;
3935
3936 if (ST->hasSSE2())
3937 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
3938 return ArithmeticCost + Entry->Cost;
3939
3940 // FIXME: These assume a naive kshift+binop lowering, which is probably
3941 // conservative in most cases.
3942 static const CostTblEntry AVX512BoolReduction[] = {
3943 { ISD::AND, MVT::v2i1, 3 },
3944 { ISD::AND, MVT::v4i1, 5 },
3945 { ISD::AND, MVT::v8i1, 7 },
3946 { ISD::AND, MVT::v16i1, 9 },
3947 { ISD::AND, MVT::v32i1, 11 },
3948 { ISD::AND, MVT::v64i1, 13 },
3949 { ISD::OR, MVT::v2i1, 3 },
3950 { ISD::OR, MVT::v4i1, 5 },
3951 { ISD::OR, MVT::v8i1, 7 },
3952 { ISD::OR, MVT::v16i1, 9 },
3953 { ISD::OR, MVT::v32i1, 11 },
3954 { ISD::OR, MVT::v64i1, 13 },
3955 };
3956
3957 static const CostTblEntry AVX2BoolReduction[] = {
3958 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
3959 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
3960 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
3961 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
3962 };
3963
3964 static const CostTblEntry AVX1BoolReduction[] = {
3965 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
3966 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
3967 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3968 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
3969 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
3970 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
3971 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3972 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
3973 };
3974
3975 static const CostTblEntry SSE2BoolReduction[] = {
3976 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
3977 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
3978 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
3979 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
3980 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
3981 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
3982 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
3983 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
3984 };
3985
3986 // Handle bool allof/anyof patterns.
3987 if (ValVTy->getElementType()->isIntegerTy(1)) {
3988 InstructionCost ArithmeticCost = 0;
3989 if (LT.first != 1 && MTy.isVector() &&
3990 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
3991 // Type needs to be split. We need LT.first - 1 arithmetic ops.
3992 auto *SingleOpTy = FixedVectorType::get(ValVTy->getElementType(),
3993 MTy.getVectorNumElements());
3994 ArithmeticCost = getArithmeticInstrCost(Opcode, SingleOpTy, CostKind);
3995 ArithmeticCost *= LT.first - 1;
3996 }
3997
3998 if (ST->hasAVX512())
3999 if (const auto *Entry = CostTableLookup(AVX512BoolReduction, ISD, MTy))
4000 return ArithmeticCost + Entry->Cost;
4001 if (ST->hasAVX2())
4002 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
4003 return ArithmeticCost + Entry->Cost;
4004 if (ST->hasAVX())
4005 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
4006 return ArithmeticCost + Entry->Cost;
4007 if (ST->hasSSE2())
4008 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
4009 return ArithmeticCost + Entry->Cost;
4010
4011 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
4012 }
4013
4014 unsigned NumVecElts = ValVTy->getNumElements();
4015 unsigned ScalarSize = ValVTy->getScalarSizeInBits();
4016
4017 // Special case power of 2 reductions where the scalar type isn't changed
4018 // by type legalization.
4019 if (!isPowerOf2_32(NumVecElts) || ScalarSize != MTy.getScalarSizeInBits())
4020 return BaseT::getArithmeticReductionCost(Opcode, ValVTy, FMF, CostKind);
4021
4022 InstructionCost ReductionCost = 0;
4023
4024 auto *Ty = ValVTy;
4025 if (LT.first != 1 && MTy.isVector() &&
4026 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4027 // Type needs to be split. We need LT.first - 1 arithmetic ops.
4028 Ty = FixedVectorType::get(ValVTy->getElementType(),
4029 MTy.getVectorNumElements());
4030 ReductionCost = getArithmeticInstrCost(Opcode, Ty, CostKind);
4031 ReductionCost *= LT.first - 1;
4032 NumVecElts = MTy.getVectorNumElements();
4033 }
4034
4035 // Now handle reduction with the legal type, taking into account size changes
4036 // at each level.
4037 while (NumVecElts > 1) {
4038 // Determine the size of the remaining vector we need to reduce.
4039 unsigned Size = NumVecElts * ScalarSize;
4040 NumVecElts /= 2;
4041 // If we're reducing from 256/512 bits, use an extract_subvector.
4042 if (Size > 128) {
4043 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4044 ReductionCost +=
4045 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4046 Ty = SubTy;
4047 } else if (Size == 128) {
4048 // Reducing from 128 bits is a permute of v2f64/v2i64.
4049 FixedVectorType *ShufTy;
4050 if (ValVTy->isFloatingPointTy())
4051 ShufTy =
4052 FixedVectorType::get(Type::getDoubleTy(ValVTy->getContext()), 2);
4053 else
4054 ShufTy =
4055 FixedVectorType::get(Type::getInt64Ty(ValVTy->getContext()), 2);
4056 ReductionCost +=
4057 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4058 } else if (Size == 64) {
4059 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4060 FixedVectorType *ShufTy;
4061 if (ValVTy->isFloatingPointTy())
4062 ShufTy =
4063 FixedVectorType::get(Type::getFloatTy(ValVTy->getContext()), 4);
4064 else
4065 ShufTy =
4066 FixedVectorType::get(Type::getInt32Ty(ValVTy->getContext()), 4);
4067 ReductionCost +=
4068 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4069 } else {
4070 // Reducing from smaller size is a shift by immediate.
4071 auto *ShiftTy = FixedVectorType::get(
4072 Type::getIntNTy(ValVTy->getContext(), Size), 128 / Size);
4073 ReductionCost += getArithmeticInstrCost(
4074 Instruction::LShr, ShiftTy, CostKind,
4075 TargetTransformInfo::OK_AnyValue,
4076 TargetTransformInfo::OK_UniformConstantValue,
4077 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4078 }
4079
4080 // Add the arithmetic op for this level.
4081 ReductionCost += getArithmeticInstrCost(Opcode, Ty, CostKind);
4082 }
4083
4084 // Add the final extract element to the cost.
4085 return ReductionCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4086}
4087
4088InstructionCost X86TTIImpl::getMinMaxCost(Type *Ty, Type *CondTy,
4089 bool IsUnsigned) {
4090 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
4091
4092 MVT MTy = LT.second;
4093
4094 int ISD;
4095 if (Ty->isIntOrIntVectorTy()) {
4096 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4097 } else {
4098 assert(Ty->isFPOrFPVectorTy() &&(static_cast <bool> (Ty->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4099, __extension__ __PRETTY_FUNCTION__))
4099 "Expected float point or integer vector type.")(static_cast <bool> (Ty->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("Ty->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4099, __extension__ __PRETTY_FUNCTION__))
;
4100 ISD = ISD::FMINNUM;
4101 }
4102
4103 static const CostTblEntry SSE1CostTbl[] = {
4104 {ISD::FMINNUM, MVT::v4f32, 1},
4105 };
4106
4107 static const CostTblEntry SSE2CostTbl[] = {
4108 {ISD::FMINNUM, MVT::v2f64, 1},
4109 {ISD::SMIN, MVT::v8i16, 1},
4110 {ISD::UMIN, MVT::v16i8, 1},
4111 };
4112
4113 static const CostTblEntry SSE41CostTbl[] = {
4114 {ISD::SMIN, MVT::v4i32, 1},
4115 {ISD::UMIN, MVT::v4i32, 1},
4116 {ISD::UMIN, MVT::v8i16, 1},
4117 {ISD::SMIN, MVT::v16i8, 1},
4118 };
4119
4120 static const CostTblEntry SSE42CostTbl[] = {
4121 {ISD::UMIN, MVT::v2i64, 3}, // xor+pcmpgtq+blendvpd
4122 };
4123
4124 static const CostTblEntry AVX1CostTbl[] = {
4125 {ISD::FMINNUM, MVT::v8f32, 1},
4126 {ISD::FMINNUM, MVT::v4f64, 1},
4127 {ISD::SMIN, MVT::v8i32, 3},
4128 {ISD::UMIN, MVT::v8i32, 3},
4129 {ISD::SMIN, MVT::v16i16, 3},
4130 {ISD::UMIN, MVT::v16i16, 3},
4131 {ISD::SMIN, MVT::v32i8, 3},
4132 {ISD::UMIN, MVT::v32i8, 3},
4133 };
4134
4135 static const CostTblEntry AVX2CostTbl[] = {
4136 {ISD::SMIN, MVT::v8i32, 1},
4137 {ISD::UMIN, MVT::v8i32, 1},
4138 {ISD::SMIN, MVT::v16i16, 1},
4139 {ISD::UMIN, MVT::v16i16, 1},
4140 {ISD::SMIN, MVT::v32i8, 1},
4141 {ISD::UMIN, MVT::v32i8, 1},
4142 };
4143
4144 static const CostTblEntry AVX512CostTbl[] = {
4145 {ISD::FMINNUM, MVT::v16f32, 1},
4146 {ISD::FMINNUM, MVT::v8f64, 1},
4147 {ISD::SMIN, MVT::v2i64, 1},
4148 {ISD::UMIN, MVT::v2i64, 1},
4149 {ISD::SMIN, MVT::v4i64, 1},
4150 {ISD::UMIN, MVT::v4i64, 1},
4151 {ISD::SMIN, MVT::v8i64, 1},
4152 {ISD::UMIN, MVT::v8i64, 1},
4153 {ISD::SMIN, MVT::v16i32, 1},
4154 {ISD::UMIN, MVT::v16i32, 1},
4155 };
4156
4157 static const CostTblEntry AVX512BWCostTbl[] = {
4158 {ISD::SMIN, MVT::v32i16, 1},
4159 {ISD::UMIN, MVT::v32i16, 1},
4160 {ISD::SMIN, MVT::v64i8, 1},
4161 {ISD::UMIN, MVT::v64i8, 1},
4162 };
4163
4164 // If we have a native MIN/MAX instruction for this type, use it.
4165 if (ST->hasBWI())
4166 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
4167 return LT.first * Entry->Cost;
4168
4169 if (ST->hasAVX512())
4170 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
4171 return LT.first * Entry->Cost;
4172
4173 if (ST->hasAVX2())
4174 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
4175 return LT.first * Entry->Cost;
4176
4177 if (ST->hasAVX())
4178 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
4179 return LT.first * Entry->Cost;
4180
4181 if (ST->hasSSE42())
4182 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
4183 return LT.first * Entry->Cost;
4184
4185 if (ST->hasSSE41())
4186 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
4187 return LT.first * Entry->Cost;
4188
4189 if (ST->hasSSE2())
4190 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
4191 return LT.first * Entry->Cost;
4192
4193 if (ST->hasSSE1())
4194 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
4195 return LT.first * Entry->Cost;
4196
4197 unsigned CmpOpcode;
4198 if (Ty->isFPOrFPVectorTy()) {
4199 CmpOpcode = Instruction::FCmp;
4200 } else {
4201 assert(Ty->isIntOrIntVectorTy() &&(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4202, __extension__ __PRETTY_FUNCTION__))
4202 "expecting floating point or integer type for min/max reduction")(static_cast <bool> (Ty->isIntOrIntVectorTy() &&
"expecting floating point or integer type for min/max reduction"
) ? void (0) : __assert_fail ("Ty->isIntOrIntVectorTy() && \"expecting floating point or integer type for min/max reduction\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4202, __extension__ __PRETTY_FUNCTION__))
;
4203 CmpOpcode = Instruction::ICmp;
4204 }
4205
4206 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4207 // Otherwise fall back to cmp+select.
4208 InstructionCost Result =
4209 getCmpSelInstrCost(CmpOpcode, Ty, CondTy, CmpInst::BAD_ICMP_PREDICATE,
4210 CostKind) +
4211 getCmpSelInstrCost(Instruction::Select, Ty, CondTy,
4212 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4213 return Result;
4214}
4215
4216InstructionCost
4217X86TTIImpl::getMinMaxReductionCost(VectorType *ValTy, VectorType *CondTy,
4218 bool IsUnsigned,
4219 TTI::TargetCostKind CostKind) {
4220 std::pair<InstructionCost, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
4221
4222 MVT MTy = LT.second;
4223
4224 int ISD;
4225 if (ValTy->isIntOrIntVectorTy()) {
4226 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
4227 } else {
4228 assert(ValTy->isFPOrFPVectorTy() &&(static_cast <bool> (ValTy->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4229, __extension__ __PRETTY_FUNCTION__))
4229 "Expected float point or integer vector type.")(static_cast <bool> (ValTy->isFPOrFPVectorTy() &&
"Expected float point or integer vector type.") ? void (0) :
__assert_fail ("ValTy->isFPOrFPVectorTy() && \"Expected float point or integer vector type.\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4229, __extension__ __PRETTY_FUNCTION__))
;
4230 ISD = ISD::FMINNUM;
4231 }
4232
4233 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
4234 // and make it as the cost.
4235
4236 static const CostTblEntry SSE2CostTblNoPairWise[] = {
4237 {ISD::UMIN, MVT::v2i16, 5}, // need pxors to use pminsw/pmaxsw
4238 {ISD::UMIN, MVT::v4i16, 7}, // need pxors to use pminsw/pmaxsw
4239 {ISD::UMIN, MVT::v8i16, 9}, // need pxors to use pminsw/pmaxsw
4240 };
4241
4242 static const CostTblEntry SSE41CostTblNoPairWise[] = {
4243 {ISD::SMIN, MVT::v2i16, 3}, // same as sse2
4244 {ISD::SMIN, MVT::v4i16, 5}, // same as sse2
4245 {ISD::UMIN, MVT::v2i16, 5}, // same as sse2
4246 {ISD::UMIN, MVT::v4i16, 7}, // same as sse2
4247 {ISD::SMIN, MVT::v8i16, 4}, // phminposuw+xor
4248 {ISD::UMIN, MVT::v8i16, 4}, // FIXME: umin is cheaper than umax
4249 {ISD::SMIN, MVT::v2i8, 3}, // pminsb
4250 {ISD::SMIN, MVT::v4i8, 5}, // pminsb
4251 {ISD::SMIN, MVT::v8i8, 7}, // pminsb
4252 {ISD::SMIN, MVT::v16i8, 6},
4253 {ISD::UMIN, MVT::v2i8, 3}, // same as sse2
4254 {ISD::UMIN, MVT::v4i8, 5}, // same as sse2
4255 {ISD::UMIN, MVT::v8i8, 7}, // same as sse2
4256 {ISD::UMIN, MVT::v16i8, 6}, // FIXME: umin is cheaper than umax
4257 };
4258
4259 static const CostTblEntry AVX1CostTblNoPairWise[] = {
4260 {ISD::SMIN, MVT::v16i16, 6},
4261 {ISD::UMIN, MVT::v16i16, 6}, // FIXME: umin is cheaper than umax
4262 {ISD::SMIN, MVT::v32i8, 8},
4263 {ISD::UMIN, MVT::v32i8, 8},
4264 };
4265
4266 static const CostTblEntry AVX512BWCostTblNoPairWise[] = {
4267 {ISD::SMIN, MVT::v32i16, 8},
4268 {ISD::UMIN, MVT::v32i16, 8}, // FIXME: umin is cheaper than umax
4269 {ISD::SMIN, MVT::v64i8, 10},
4270 {ISD::UMIN, MVT::v64i8, 10},
4271 };
4272
4273 // Before legalizing the type, give a chance to look up illegal narrow types
4274 // in the table.
4275 // FIXME: Is there a better way to do this?
4276 EVT VT = TLI->getValueType(DL, ValTy);
4277 if (VT.isSimple()) {
4278 MVT MTy = VT.getSimpleVT();
4279 if (ST->hasBWI())
4280 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4281 return Entry->Cost;
4282
4283 if (ST->hasAVX())
4284 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4285 return Entry->Cost;
4286
4287 if (ST->hasSSE41())
4288 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4289 return Entry->Cost;
4290
4291 if (ST->hasSSE2())
4292 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4293 return Entry->Cost;
4294 }
4295
4296 auto *ValVTy = cast<FixedVectorType>(ValTy);
4297 unsigned NumVecElts = ValVTy->getNumElements();
4298
4299 auto *Ty = ValVTy;
4300 InstructionCost MinMaxCost = 0;
4301 if (LT.first != 1 && MTy.isVector() &&
4302 MTy.getVectorNumElements() < ValVTy->getNumElements()) {
4303 // Type needs to be split. We need LT.first - 1 operations ops.
4304 Ty = FixedVectorType::get(ValVTy->getElementType(),
4305 MTy.getVectorNumElements());
4306 auto *SubCondTy = FixedVectorType::get(CondTy->getElementType(),
4307 MTy.getVectorNumElements());
4308 MinMaxCost = getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4309 MinMaxCost *= LT.first - 1;
4310 NumVecElts = MTy.getVectorNumElements();
4311 }
4312
4313 if (ST->hasBWI())
4314 if (const auto *Entry = CostTableLookup(AVX512BWCostTblNoPairWise, ISD, MTy))
4315 return MinMaxCost + Entry->Cost;
4316
4317 if (ST->hasAVX())
4318 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
4319 return MinMaxCost + Entry->Cost;
4320
4321 if (ST->hasSSE41())
4322 if (const auto *Entry = CostTableLookup(SSE41CostTblNoPairWise, ISD, MTy))
4323 return MinMaxCost + Entry->Cost;
4324
4325 if (ST->hasSSE2())
4326 if (const auto *Entry = CostTableLookup(SSE2CostTblNoPairWise, ISD, MTy))
4327 return MinMaxCost + Entry->Cost;
4328
4329 unsigned ScalarSize = ValTy->getScalarSizeInBits();
4330
4331 // Special case power of 2 reductions where the scalar type isn't changed
4332 // by type legalization.
4333 if (!isPowerOf2_32(ValVTy->getNumElements()) ||
4334 ScalarSize != MTy.getScalarSizeInBits())
4335 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsUnsigned, CostKind);
4336
4337 // Now handle reduction with the legal type, taking into account size changes
4338 // at each level.
4339 while (NumVecElts > 1) {
4340 // Determine the size of the remaining vector we need to reduce.
4341 unsigned Size = NumVecElts * ScalarSize;
4342 NumVecElts /= 2;
4343 // If we're reducing from 256/512 bits, use an extract_subvector.
4344 if (Size > 128) {
4345 auto *SubTy = FixedVectorType::get(ValVTy->getElementType(), NumVecElts);
4346 MinMaxCost +=
4347 getShuffleCost(TTI::SK_ExtractSubvector, Ty, None, NumVecElts, SubTy);
4348 Ty = SubTy;
4349 } else if (Size == 128) {
4350 // Reducing from 128 bits is a permute of v2f64/v2i64.
4351 VectorType *ShufTy;
4352 if (ValTy->isFloatingPointTy())
4353 ShufTy =
4354 FixedVectorType::get(Type::getDoubleTy(ValTy->getContext()), 2);
4355 else
4356 ShufTy = FixedVectorType::get(Type::getInt64Ty(ValTy->getContext()), 2);
4357 MinMaxCost +=
4358 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4359 } else if (Size == 64) {
4360 // Reducing from 64 bits is a shuffle of v4f32/v4i32.
4361 FixedVectorType *ShufTy;
4362 if (ValTy->isFloatingPointTy())
4363 ShufTy = FixedVectorType::get(Type::getFloatTy(ValTy->getContext()), 4);
4364 else
4365 ShufTy = FixedVectorType::get(Type::getInt32Ty(ValTy->getContext()), 4);
4366 MinMaxCost +=
4367 getShuffleCost(TTI::SK_PermuteSingleSrc, ShufTy, None, 0, nullptr);
4368 } else {
4369 // Reducing from smaller size is a shift by immediate.
4370 auto *ShiftTy = FixedVectorType::get(
4371 Type::getIntNTy(ValTy->getContext(), Size), 128 / Size);
4372 MinMaxCost += getArithmeticInstrCost(
4373 Instruction::LShr, ShiftTy, TTI::TCK_RecipThroughput,
4374 TargetTransformInfo::OK_AnyValue,
4375 TargetTransformInfo::OK_UniformConstantValue,
4376 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
4377 }
4378
4379 // Add the arithmetic op for this level.
4380 auto *SubCondTy =
4381 FixedVectorType::get(CondTy->getElementType(), Ty->getNumElements());
4382 MinMaxCost += getMinMaxCost(Ty, SubCondTy, IsUnsigned);
4383 }
4384
4385 // Add the final extract element to the cost.
4386 return MinMaxCost + getVectorInstrCost(Instruction::ExtractElement, Ty, 0);
4387}
4388
4389/// Calculate the cost of materializing a 64-bit value. This helper
4390/// method might only calculate a fraction of a larger immediate. Therefore it
4391/// is valid to return a cost of ZERO.
4392InstructionCost X86TTIImpl::getIntImmCost(int64_t Val) {
4393 if (Val == 0)
4394 return TTI::TCC_Free;
4395
4396 if (isInt<32>(Val))
4397 return TTI::TCC_Basic;
4398
4399 return 2 * TTI::TCC_Basic;
4400}
4401
4402InstructionCost X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty,
4403 TTI::TargetCostKind CostKind) {
4404 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4404, __extension__ __PRETTY_FUNCTION__))
;
4405
4406 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4407 if (BitSize == 0)
4408 return ~0U;
4409
4410 // Never hoist constants larger than 128bit, because this might lead to
4411 // incorrect code generation or assertions in codegen.
4412 // Fixme: Create a cost model for types larger than i128 once the codegen
4413 // issues have been fixed.
4414 if (BitSize > 128)
4415 return TTI::TCC_Free;
4416
4417 if (Imm == 0)
4418 return TTI::TCC_Free;
4419
4420 // Sign-extend all constants to a multiple of 64-bit.
4421 APInt ImmVal = Imm;
4422 if (BitSize % 64 != 0)
4423 ImmVal = Imm.sext(alignTo(BitSize, 64));
4424
4425 // Split the constant into 64-bit chunks and calculate the cost for each
4426 // chunk.
4427 InstructionCost Cost = 0;
4428 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
4429 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
4430 int64_t Val = Tmp.getSExtValue();
4431 Cost += getIntImmCost(Val);
4432 }
4433 // We need at least one instruction to materialize the constant.
4434 return std::max<InstructionCost>(1, Cost);
4435}
4436
4437InstructionCost X86TTIImpl::getIntImmCostInst(unsigned Opcode, unsigned Idx,
4438 const APInt &Imm, Type *Ty,
4439 TTI::TargetCostKind CostKind,
4440 Instruction *Inst) {
4441 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4441, __extension__ __PRETTY_FUNCTION__))
;
4442
4443 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4444 // There is no cost model for constants with a bit size of 0. Return TCC_Free
4445 // here, so that constant hoisting will ignore this constant.
4446 if (BitSize == 0)
4447 return TTI::TCC_Free;
4448
4449 unsigned ImmIdx = ~0U;
4450 switch (Opcode) {
4451 default:
4452 return TTI::TCC_Free;
4453 case Instruction::GetElementPtr:
4454 // Always hoist the base address of a GetElementPtr. This prevents the
4455 // creation of new constants for every base constant that gets constant
4456 // folded with the offset.
4457 if (Idx == 0)
4458 return 2 * TTI::TCC_Basic;
4459 return TTI::TCC_Free;
4460 case Instruction::Store:
4461 ImmIdx = 0;
4462 break;
4463 case Instruction::ICmp:
4464 // This is an imperfect hack to prevent constant hoisting of
4465 // compares that might be trying to check if a 64-bit value fits in
4466 // 32-bits. The backend can optimize these cases using a right shift by 32.
4467 // Ideally we would check the compare predicate here. There also other
4468 // similar immediates the backend can use shifts for.
4469 if (Idx == 1 && Imm.getBitWidth() == 64) {
4470 uint64_t ImmVal = Imm.getZExtValue();
4471 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
4472 return TTI::TCC_Free;
4473 }
4474 ImmIdx = 1;
4475 break;
4476 case Instruction::And:
4477 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
4478 // by using a 32-bit operation with implicit zero extension. Detect such
4479 // immediates here as the normal path expects bit 31 to be sign extended.
4480 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
4481 return TTI::TCC_Free;
4482 ImmIdx = 1;
4483 break;
4484 case Instruction::Add:
4485 case Instruction::Sub:
4486 // For add/sub, we can use the opposite instruction for INT32_MIN.
4487 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
4488 return TTI::TCC_Free;
4489 ImmIdx = 1;
4490 break;
4491 case Instruction::UDiv:
4492 case Instruction::SDiv:
4493 case Instruction::URem:
4494 case Instruction::SRem:
4495 // Division by constant is typically expanded later into a different
4496 // instruction sequence. This completely changes the constants.
4497 // Report them as "free" to stop ConstantHoist from marking them as opaque.
4498 return TTI::TCC_Free;
4499 case Instruction::Mul:
4500 case Instruction::Or:
4501 case Instruction::Xor:
4502 ImmIdx = 1;
4503 break;
4504 // Always return TCC_Free for the shift value of a shift instruction.
4505 case Instruction::Shl:
4506 case Instruction::LShr:
4507 case Instruction::AShr:
4508 if (Idx == 1)
4509 return TTI::TCC_Free;
4510 break;
4511 case Instruction::Trunc:
4512 case Instruction::ZExt:
4513 case Instruction::SExt:
4514 case Instruction::IntToPtr:
4515 case Instruction::PtrToInt:
4516 case Instruction::BitCast:
4517 case Instruction::PHI:
4518 case Instruction::Call:
4519 case Instruction::Select:
4520 case Instruction::Ret:
4521 case Instruction::Load:
4522 break;
4523 }
4524
4525 if (Idx == ImmIdx) {
4526 int NumConstants = divideCeil(BitSize, 64);
4527 InstructionCost Cost = X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4528 return (Cost <= NumConstants * TTI::TCC_Basic)
4529 ? static_cast<int>(TTI::TCC_Free)
4530 : Cost;
4531 }
4532
4533 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4534}
4535
4536InstructionCost X86TTIImpl::getIntImmCostIntrin(Intrinsic::ID IID, unsigned Idx,
4537 const APInt &Imm, Type *Ty,
4538 TTI::TargetCostKind CostKind) {
4539 assert(Ty->isIntegerTy())(static_cast <bool> (Ty->isIntegerTy()) ? void (0) :
__assert_fail ("Ty->isIntegerTy()", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4539, __extension__ __PRETTY_FUNCTION__))
;
4540
4541 unsigned BitSize = Ty->getPrimitiveSizeInBits();
4542 // There is no cost model for constants with a bit size of 0. Return TCC_Free
4543 // here, so that constant hoisting will ignore this constant.
4544 if (BitSize == 0)
4545 return TTI::TCC_Free;
4546
4547 switch (IID) {
4548 default:
4549 return TTI::TCC_Free;
4550 case Intrinsic::sadd_with_overflow:
4551 case Intrinsic::uadd_with_overflow:
4552 case Intrinsic::ssub_with_overflow:
4553 case Intrinsic::usub_with_overflow:
4554 case Intrinsic::smul_with_overflow:
4555 case Intrinsic::umul_with_overflow:
4556 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
4557 return TTI::TCC_Free;
4558 break;
4559 case Intrinsic::experimental_stackmap:
4560 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4561 return TTI::TCC_Free;
4562 break;
4563 case Intrinsic::experimental_patchpoint_void:
4564 case Intrinsic::experimental_patchpoint_i64:
4565 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
4566 return TTI::TCC_Free;
4567 break;
4568 }
4569 return X86TTIImpl::getIntImmCost(Imm, Ty, CostKind);
4570}
4571
4572InstructionCost X86TTIImpl::getCFInstrCost(unsigned Opcode,
4573 TTI::TargetCostKind CostKind,
4574 const Instruction *I) {
4575 if (CostKind != TTI::TCK_RecipThroughput)
4576 return Opcode == Instruction::PHI ? 0 : 1;
4577 // Branches are assumed to be predicted.
4578 return 0;
4579}
4580
4581int X86TTIImpl::getGatherOverhead() const {
4582 // Some CPUs have more overhead for gather. The specified overhead is relative
4583 // to the Load operation. "2" is the number provided by Intel architects. This
4584 // parameter is used for cost estimation of Gather Op and comparison with
4585 // other alternatives.
4586 // TODO: Remove the explicit hasAVX512()?, That would mean we would only
4587 // enable gather with a -march.
4588 if (ST->hasAVX512() || (ST->hasAVX2() && ST->hasFastGather()))
4589 return 2;
4590
4591 return 1024;
4592}
4593
4594int X86TTIImpl::getScatterOverhead() const {
4595 if (ST->hasAVX512())
4596 return 2;
4597
4598 return 1024;
4599}
4600
4601// Return an average cost of Gather / Scatter instruction, maybe improved later.
4602// FIXME: Add TargetCostKind support.
4603InstructionCost X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy,
4604 const Value *Ptr, Align Alignment,
4605 unsigned AddressSpace) {
4606
4607 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost")(static_cast <bool> (isa<VectorType>(SrcVTy) &&
"Unexpected type in getGSVectorCost") ? void (0) : __assert_fail
("isa<VectorType>(SrcVTy) && \"Unexpected type in getGSVectorCost\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4607, __extension__ __PRETTY_FUNCTION__))
;
4608 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4609
4610 // Try to reduce index size from 64 bit (default for GEP)
4611 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
4612 // operation will use 16 x 64 indices which do not fit in a zmm and needs
4613 // to split. Also check that the base pointer is the same for all lanes,
4614 // and that there's at most one variable index.
4615 auto getIndexSizeInBits = [](const Value *Ptr, const DataLayout &DL) {
4616 unsigned IndexSize = DL.getPointerSizeInBits();
4617 const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4618 if (IndexSize < 64 || !GEP)
4619 return IndexSize;
4620
4621 unsigned NumOfVarIndices = 0;
4622 const Value *Ptrs = GEP->getPointerOperand();
4623 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
4624 return IndexSize;
4625 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
4626 if (isa<Constant>(GEP->getOperand(i)))
4627 continue;
4628 Type *IndxTy = GEP->getOperand(i)->getType();
4629 if (auto *IndexVTy = dyn_cast<VectorType>(IndxTy))
4630 IndxTy = IndexVTy->getElementType();
4631 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
4632 !isa<SExtInst>(GEP->getOperand(i))) ||
4633 ++NumOfVarIndices > 1)
4634 return IndexSize; // 64
4635 }
4636 return (unsigned)32;
4637 };
4638
4639 // Trying to reduce IndexSize to 32 bits for vector 16.
4640 // By default the IndexSize is equal to pointer size.
4641 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
4642 ? getIndexSizeInBits(Ptr, DL)
4643 : DL.getPointerSizeInBits();
4644
4645 auto *IndexVTy = FixedVectorType::get(
4646 IntegerType::get(SrcVTy->getContext(), IndexSize), VF);
4647 std::pair<InstructionCost, MVT> IdxsLT =
4648 TLI->getTypeLegalizationCost(DL, IndexVTy);
4649 std::pair<InstructionCost, MVT> SrcLT =
4650 TLI->getTypeLegalizationCost(DL, SrcVTy);
4651 InstructionCost::CostType SplitFactor =
4652 *std::max(IdxsLT.first, SrcLT.first).getValue();
4653 if (SplitFactor > 1) {
4654 // Handle splitting of vector of pointers
4655 auto *SplitSrcTy =
4656 FixedVectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
4657 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
4658 AddressSpace);
4659 }
4660
4661 // The gather / scatter cost is given by Intel architects. It is a rough
4662 // number since we are looking at one instruction in a time.
4663 const int GSOverhead = (Opcode == Instruction::Load)
4664 ? getGatherOverhead()
4665 : getScatterOverhead();
4666 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4667 MaybeAlign(Alignment), AddressSpace,
4668 TTI::TCK_RecipThroughput);
4669}
4670
4671/// Return the cost of full scalarization of gather / scatter operation.
4672///
4673/// Opcode - Load or Store instruction.
4674/// SrcVTy - The type of the data vector that should be gathered or scattered.
4675/// VariableMask - The mask is non-constant at compile time.
4676/// Alignment - Alignment for one element.
4677/// AddressSpace - pointer[s] address space.
4678///
4679/// FIXME: Add TargetCostKind support.
4680InstructionCost X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
4681 bool VariableMask, Align Alignment,
4682 unsigned AddressSpace) {
4683 unsigned VF = cast<FixedVectorType>(SrcVTy)->getNumElements();
4684 APInt DemandedElts = APInt::getAllOnes(VF);
4685 TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput;
4686
4687 InstructionCost MaskUnpackCost = 0;
4688 if (VariableMask) {
4689 auto *MaskTy =
4690 FixedVectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
4691 MaskUnpackCost =
4692 getScalarizationOverhead(MaskTy, DemandedElts, false, true);
4693 InstructionCost ScalarCompareCost = getCmpSelInstrCost(
4694 Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()), nullptr,
4695 CmpInst::BAD_ICMP_PREDICATE, CostKind);
4696 InstructionCost BranchCost = getCFInstrCost(Instruction::Br, CostKind);
4697 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
4698 }
4699
4700 // The cost of the scalar loads/stores.
4701 InstructionCost MemoryOpCost =
4702 VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
4703 MaybeAlign(Alignment), AddressSpace, CostKind);
4704
4705 InstructionCost InsertExtractCost = 0;
4706 if (Opcode == Instruction::Load)
4707 for (unsigned i = 0; i < VF; ++i)
4708 // Add the cost of inserting each scalar load into the vector
4709 InsertExtractCost +=
4710 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
4711 else
4712 for (unsigned i = 0; i < VF; ++i)
4713 // Add the cost of extracting each element out of the data vector
4714 InsertExtractCost +=
4715 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
4716
4717 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
4718}
4719
4720/// Calculate the cost of Gather / Scatter operation
4721InstructionCost X86TTIImpl::getGatherScatterOpCost(
4722 unsigned Opcode, Type *SrcVTy, const Value *Ptr, bool VariableMask,
4723 Align Alignment, TTI::TargetCostKind CostKind,
4724 const Instruction *I = nullptr) {
4725 if (CostKind != TTI::TCK_RecipThroughput) {
4726 if ((Opcode == Instruction::Load &&
4727 isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4728 (Opcode == Instruction::Store &&
4729 isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4730 return 1;
4731 return BaseT::getGatherScatterOpCost(Opcode, SrcVTy, Ptr, VariableMask,
4732 Alignment, CostKind, I);
4733 }
4734
4735 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter")(static_cast <bool> (SrcVTy->isVectorTy() &&
"Unexpected data type for Gather/Scatter") ? void (0) : __assert_fail
("SrcVTy->isVectorTy() && \"Unexpected data type for Gather/Scatter\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4735, __extension__ __PRETTY_FUNCTION__))
;
4736 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
4737 if (!PtrTy && Ptr->getType()->isVectorTy())
4738 PtrTy = dyn_cast<PointerType>(
4739 cast<VectorType>(Ptr->getType())->getElementType());
4740 assert(PtrTy && "Unexpected type for Ptr argument")(static_cast <bool> (PtrTy && "Unexpected type for Ptr argument"
) ? void (0) : __assert_fail ("PtrTy && \"Unexpected type for Ptr argument\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 4740, __extension__ __PRETTY_FUNCTION__))
;
4741 unsigned AddressSpace = PtrTy->getAddressSpace();
4742
4743 if ((Opcode == Instruction::Load &&
4744 !isLegalMaskedGather(SrcVTy, Align(Alignment))) ||
4745 (Opcode == Instruction::Store &&
4746 !isLegalMaskedScatter(SrcVTy, Align(Alignment))))
4747 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
4748 AddressSpace);
4749
4750 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
4751}
4752
4753bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
4754 TargetTransformInfo::LSRCost &C2) {
4755 // X86 specific here are "instruction number 1st priority".
4756 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
4757 C1.NumIVMuls, C1.NumBaseAdds,
4758 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
4759 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
4760 C2.NumIVMuls, C2.NumBaseAdds,
4761 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
4762}
4763
4764bool X86TTIImpl::canMacroFuseCmp() {
4765 return ST->hasMacroFusion() || ST->hasBranchFusion();
4766}
4767
4768bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, Align Alignment) {
4769 if (!ST->hasAVX())
4770 return false;
4771
4772 // The backend can't handle a single element vector.
4773 if (isa<VectorType>(DataTy) &&
4774 cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4775 return false;
4776 Type *ScalarTy = DataTy->getScalarType();
4777
4778 if (ScalarTy->isPointerTy())
4779 return true;
4780
4781 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4782 return true;
4783
4784 if (ScalarTy->isHalfTy() && ST->hasBWI() && ST->hasFP16())
4785 return true;
4786
4787 if (!ScalarTy->isIntegerTy())
4788 return false;
4789
4790 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4791 return IntWidth == 32 || IntWidth == 64 ||
4792 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
4793}
4794
4795bool X86TTIImpl::isLegalMaskedStore(Type *DataType, Align Alignment) {
4796 return isLegalMaskedLoad(DataType, Alignment);
4797}
4798
4799bool X86TTIImpl::isLegalNTLoad(Type *DataType, Align Alignment) {
4800 unsigned DataSize = DL.getTypeStoreSize(DataType);
4801 // The only supported nontemporal loads are for aligned vectors of 16 or 32
4802 // bytes. Note that 32-byte nontemporal vector loads are supported by AVX2
4803 // (the equivalent stores only require AVX).
4804 if (Alignment >= DataSize && (DataSize == 16 || DataSize == 32))
4805 return DataSize == 16 ? ST->hasSSE1() : ST->hasAVX2();
4806
4807 return false;
4808}
4809
4810bool X86TTIImpl::isLegalNTStore(Type *DataType, Align Alignment) {
4811 unsigned DataSize = DL.getTypeStoreSize(DataType);
4812
4813 // SSE4A supports nontemporal stores of float and double at arbitrary
4814 // alignment.
4815 if (ST->hasSSE4A() && (DataType->isFloatTy() || DataType->isDoubleTy()))
4816 return true;
4817
4818 // Besides the SSE4A subtarget exception above, only aligned stores are
4819 // available nontemporaly on any other subtarget. And only stores with a size
4820 // of 4..32 bytes (powers of 2, only) are permitted.
4821 if (Alignment < DataSize || DataSize < 4 || DataSize > 32 ||
4822 !isPowerOf2_32(DataSize))
4823 return false;
4824
4825 // 32-byte vector nontemporal stores are supported by AVX (the equivalent
4826 // loads require AVX2).
4827 if (DataSize == 32)
4828 return ST->hasAVX();
4829 if (DataSize == 16)
4830 return ST->hasSSE1();
4831 return true;
4832}
4833
4834bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
4835 if (!isa<VectorType>(DataTy))
4836 return false;
4837
4838 if (!ST->hasAVX512())
4839 return false;
4840
4841 // The backend can't handle a single element vector.
4842 if (cast<FixedVectorType>(DataTy)->getNumElements() == 1)
4843 return false;
4844
4845 Type *ScalarTy = cast<VectorType>(DataTy)->getElementType();
4846
4847 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4848 return true;
4849
4850 if (!ScalarTy->isIntegerTy())
4851 return false;
4852
4853 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4854 return IntWidth == 32 || IntWidth == 64 ||
4855 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
4856}
4857
4858bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
4859 return isLegalMaskedExpandLoad(DataTy);
4860}
4861
4862bool X86TTIImpl::isLegalMaskedGather(Type *DataTy, Align Alignment) {
4863 // Some CPUs have better gather performance than others.
4864 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
4865 // enable gather with a -march.
4866 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())))
4867 return false;
4868
4869 // This function is called now in two cases: from the Loop Vectorizer
4870 // and from the Scalarizer.
4871 // When the Loop Vectorizer asks about legality of the feature,
4872 // the vectorization factor is not calculated yet. The Loop Vectorizer
4873 // sends a scalar type and the decision is based on the width of the
4874 // scalar element.
4875 // Later on, the cost model will estimate usage this intrinsic based on
4876 // the vector type.
4877 // The Scalarizer asks again about legality. It sends a vector type.
4878 // In this case we can reject non-power-of-2 vectors.
4879 // We also reject single element vectors as the type legalizer can't
4880 // scalarize it.
4881 if (auto *DataVTy = dyn_cast<FixedVectorType>(DataTy)) {
4882 unsigned NumElts = DataVTy->getNumElements();
4883 if (NumElts == 1)
4884 return false;
4885 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
4886 // Vector-4 of gather/scatter instruction does not exist on KNL.
4887 // We can extend it to 8 elements, but zeroing upper bits of
4888 // the mask vector will add more instructions. Right now we give the scalar
4889 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter
4890 // instruction is better in the VariableMask case.
4891 if (ST->hasAVX512() && (NumElts == 2 || (NumElts == 4 && !ST->hasVLX())))
4892 return false;
4893 }
4894 Type *ScalarTy = DataTy->getScalarType();
4895 if (ScalarTy->isPointerTy())
4896 return true;
4897
4898 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
4899 return true;
4900
4901 if (!ScalarTy->isIntegerTy())
4902 return false;
4903
4904 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
4905 return IntWidth == 32 || IntWidth == 64;
4906}
4907
4908bool X86TTIImpl::isLegalMaskedScatter(Type *DataType, Align Alignment) {
4909 // AVX2 doesn't support scatter
4910 if (!ST->hasAVX512())
4911 return false;
4912 return isLegalMaskedGather(DataType, Alignment);
4913}
4914
4915bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
4916 EVT VT = TLI->getValueType(DL, DataType);
4917 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
4918}
4919
4920bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
4921 return false;
4922}
4923
4924bool X86TTIImpl::areInlineCompatible(const Function *Caller,
4925 const Function *Callee) const {
4926 const TargetMachine &TM = getTLI()->getTargetMachine();
4927
4928 // Work this as a subsetting of subtarget features.
4929 const FeatureBitset &CallerBits =
4930 TM.getSubtargetImpl(*Caller)->getFeatureBits();
4931 const FeatureBitset &CalleeBits =
4932 TM.getSubtargetImpl(*Callee)->getFeatureBits();
4933
4934 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
4935 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
4936 return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
4937}
4938
4939bool X86TTIImpl::areFunctionArgsABICompatible(
4940 const Function *Caller, const Function *Callee,
4941 SmallPtrSetImpl<Argument *> &Args) const {
4942 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
4943 return false;
4944
4945 // If we get here, we know the target features match. If one function
4946 // considers 512-bit vectors legal and the other does not, consider them
4947 // incompatible.
4948 const TargetMachine &TM = getTLI()->getTargetMachine();
4949
4950 if (TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
4951 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs())
4952 return true;
4953
4954 // Consider the arguments compatible if they aren't vectors or aggregates.
4955 // FIXME: Look at the size of vectors.
4956 // FIXME: Look at the element types of aggregates to see if there are vectors.
4957 // FIXME: The API of this function seems intended to allow arguments
4958 // to be removed from the set, but the caller doesn't check if the set
4959 // becomes empty so that may not work in practice.
4960 return llvm::none_of(Args, [](Argument *A) {
4961 auto *EltTy = cast<PointerType>(A->getType())->getElementType();
4962 return EltTy->isVectorTy() || EltTy->isAggregateType();
4963 });
4964}
4965
4966X86TTIImpl::TTI::MemCmpExpansionOptions
4967X86TTIImpl::enableMemCmpExpansion(bool OptSize, bool IsZeroCmp) const {
4968 TTI::MemCmpExpansionOptions Options;
4969 Options.MaxNumLoads = TLI->getMaxExpandSizeMemcmp(OptSize);
4970 Options.NumLoadsPerBlock = 2;
4971 // All GPR and vector loads can be unaligned.
4972 Options.AllowOverlappingLoads = true;
4973 if (IsZeroCmp) {
4974 // Only enable vector loads for equality comparison. Right now the vector
4975 // version is not as fast for three way compare (see #33329).
4976 const unsigned PreferredWidth = ST->getPreferVectorWidth();
4977 if (PreferredWidth >= 512 && ST->hasAVX512()) Options.LoadSizes.push_back(64);
4978 if (PreferredWidth >= 256 && ST->hasAVX()) Options.LoadSizes.push_back(32);
4979 if (PreferredWidth >= 128 && ST->hasSSE2()) Options.LoadSizes.push_back(16);
4980 }
4981 if (ST->is64Bit()) {
4982 Options.LoadSizes.push_back(8);
4983 }
4984 Options.LoadSizes.push_back(4);
4985 Options.LoadSizes.push_back(2);
4986 Options.LoadSizes.push_back(1);
4987 return Options;
4988}
4989
4990bool X86TTIImpl::enableInterleavedAccessVectorization() {
4991 // TODO: We expect this to be beneficial regardless of arch,
4992 // but there are currently some unexplained performance artifacts on Atom.
4993 // As a temporary solution, disable on Atom.
4994 return !(ST->isAtom());
4995}
4996
4997// Get estimation for interleaved load/store operations for AVX2.
4998// \p Factor is the interleaved-access factor (stride) - number of
4999// (interleaved) elements in the group.
5000// \p Indices contains the indices for a strided load: when the
5001// interleaved load has gaps they indicate which elements are used.
5002// If Indices is empty (or if the number of indices is equal to the size
5003// of the interleaved-access as given in \p Factor) the access has no gaps.
5004//
5005// As opposed to AVX-512, AVX2 does not have generic shuffles that allow
5006// computing the cost using a generic formula as a function of generic
5007// shuffles. We therefore use a lookup table instead, filled according to
5008// the instruction sequences that codegen currently generates.
5009InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX2(
5010 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
5011 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
5012 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
5013
5014 if (UseMaskForCond || UseMaskForGaps)
4
Assuming 'UseMaskForCond' is false
5
Assuming 'UseMaskForGaps' is false
5015 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5016 Alignment, AddressSpace, CostKind,
5017 UseMaskForCond, UseMaskForGaps);
5018
5019 // We currently Support only fully-interleaved groups, with no gaps.
5020 // TODO: Support also strided loads (interleaved-groups with gaps).
5021 if (Indices.size() && Indices.size() != Factor)
6
Assuming the condition is true
7
Assuming the condition is true
8
Taking true branch
5022 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
9
Calling 'BasicTTIImplBase::getInterleavedMemoryOpCost'
5023 Alignment, AddressSpace, CostKind);
5024
5025 // VecTy for interleave memop is <VF*Factor x Elt>.
5026 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5027 // VecTy = <12 x i32>.
5028 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5029
5030 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
5031 // the VF=2, while v2i128 is an unsupported MVT vector type
5032 // (see MachineValueType.h::getVectorVT()).
5033 if (!LegalVT.isVector())
5034 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5035 Alignment, AddressSpace, CostKind);
5036
5037 unsigned VF = VecTy->getNumElements() / Factor;
5038 Type *ScalarTy = VecTy->getElementType();
5039 // Deduplicate entries, model floats/pointers as appropriately-sized integers.
5040 if (!ScalarTy->isIntegerTy())
5041 ScalarTy =
5042 Type::getIntNTy(ScalarTy->getContext(), DL.getTypeSizeInBits(ScalarTy));
5043
5044 // Get the cost of all the memory operations.
5045 InstructionCost MemOpCosts = getMemoryOpCost(
5046 Opcode, VecTy, MaybeAlign(Alignment), AddressSpace, CostKind);
5047
5048 auto *VT = FixedVectorType::get(ScalarTy, VF);
5049 EVT ETy = TLI->getValueType(DL, VT);
5050 if (!ETy.isSimple())
5051 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5052 Alignment, AddressSpace, CostKind);
5053
5054 // TODO: Complete for other data-types and strides.
5055 // Each combination of Stride, element bit width and VF results in a different
5056 // sequence; The cost tables are therefore accessed with:
5057 // Factor (stride) and VectorType=VFxiN.
5058 // The Cost accounts only for the shuffle sequence;
5059 // The cost of the loads/stores is accounted for separately.
5060 //
5061 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
5062 {2, MVT::v4i64, 6}, // (load 8i64 and) deinterleave into 2 x 4i64
5063
5064 {3, MVT::v2i8, 10}, // (load 6i8 and) deinterleave into 3 x 2i8
5065 {3, MVT::v4i8, 4}, // (load 12i8 and) deinterleave into 3 x 4i8
5066 {3, MVT::v8i8, 9}, // (load 24i8 and) deinterleave into 3 x 8i8
5067 {3, MVT::v16i8, 11}, // (load 48i8 and) deinterleave into 3 x 16i8
5068 {3, MVT::v32i8, 13}, // (load 96i8 and) deinterleave into 3 x 32i8
5069
5070 {3, MVT::v8i32, 17}, // (load 24i32 and) deinterleave into 3 x 8i32
5071
5072 {4, MVT::v2i8, 12}, // (load 8i8 and) deinterleave into 4 x 2i8
5073 {4, MVT::v4i8, 4}, // (load 16i8 and) deinterleave into 4 x 4i8
5074 {4, MVT::v8i8, 20}, // (load 32i8 and) deinterleave into 4 x 8i8
5075 {4, MVT::v16i8, 39}, // (load 64i8 and) deinterleave into 4 x 16i8
5076 {4, MVT::v32i8, 80}, // (load 128i8 and) deinterleave into 4 x 32i8
5077
5078 {8, MVT::v8i32, 40} // (load 64i32 and) deinterleave into 8 x 8i32
5079 };
5080
5081 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
5082 {2, MVT::v4i64, 6}, // interleave 2 x 4i64 into 8i64 (and store)
5083
5084 {3, MVT::v2i8, 7}, // interleave 3 x 2i8 into 6i8 (and store)
5085 {3, MVT::v4i8, 8}, // interleave 3 x 4i8 into 12i8 (and store)
5086 {3, MVT::v8i8, 11}, // interleave 3 x 8i8 into 24i8 (and store)
5087 {3, MVT::v16i8, 11}, // interleave 3 x 16i8 into 48i8 (and store)
5088 {3, MVT::v32i8, 13}, // interleave 3 x 32i8 into 96i8 (and store)
5089
5090 {4, MVT::v2i8, 12}, // interleave 4 x 2i8 into 8i8 (and store)
5091 {4, MVT::v4i8, 9}, // interleave 4 x 4i8 into 16i8 (and store)
5092 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
5093 {4, MVT::v16i8, 10}, // interleave 4 x 16i8 into 64i8 (and store)
5094 {4, MVT::v32i8, 12} // interleave 4 x 32i8 into 128i8 (and store)
5095 };
5096
5097 if (Opcode == Instruction::Load) {
5098 if (const auto *Entry =
5099 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
5100 return MemOpCosts + Entry->Cost;
5101 } else {
5102 assert(Opcode == Instruction::Store &&(static_cast <bool> (Opcode == Instruction::Store &&
"Expected Store Instruction at this point") ? void (0) : __assert_fail
("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5103, __extension__ __PRETTY_FUNCTION__))
5103 "Expected Store Instruction at this point")(static_cast <bool> (Opcode == Instruction::Store &&
"Expected Store Instruction at this point") ? void (0) : __assert_fail
("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5103, __extension__ __PRETTY_FUNCTION__))
;
5104 if (const auto *Entry =
5105 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
5106 return MemOpCosts + Entry->Cost;
5107 }
5108
5109 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5110 Alignment, AddressSpace, CostKind);
5111}
5112
5113// Get estimation for interleaved load/store operations and strided load.
5114// \p Indices contains indices for strided load.
5115// \p Factor - the factor of interleaving.
5116// AVX-512 provides 3-src shuffles that significantly reduces the cost.
5117InstructionCost X86TTIImpl::getInterleavedMemoryOpCostAVX512(
5118 unsigned Opcode, FixedVectorType *VecTy, unsigned Factor,
5119 ArrayRef<unsigned> Indices, Align Alignment, unsigned AddressSpace,
5120 TTI::TargetCostKind CostKind, bool UseMaskForCond, bool UseMaskForGaps) {
5121
5122 if (UseMaskForCond || UseMaskForGaps)
5123 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5124 Alignment, AddressSpace, CostKind,
5125 UseMaskForCond, UseMaskForGaps);
5126
5127 // VecTy for interleave memop is <VF*Factor x Elt>.
5128 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
5129 // VecTy = <12 x i32>.
5130
5131 // Calculate the number of memory operations (NumOfMemOps), required
5132 // for load/store the VecTy.
5133 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
5134 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
5135 unsigned LegalVTSize = LegalVT.getStoreSize();
5136 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
5137
5138 // Get the cost of one memory operation.
5139 auto *SingleMemOpTy = FixedVectorType::get(VecTy->getElementType(),
5140 LegalVT.getVectorNumElements());
5141 InstructionCost MemOpCost = getMemoryOpCost(
5142 Opcode, SingleMemOpTy, MaybeAlign(Alignment), AddressSpace, CostKind);
5143
5144 unsigned VF = VecTy->getNumElements() / Factor;
5145 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
5146
5147 if (Opcode == Instruction::Load) {
5148 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
5149 // contain the cost of the optimized shuffle sequence that the
5150 // X86InterleavedAccess pass will generate.
5151 // The cost of loads and stores are computed separately from the table.
5152
5153 // X86InterleavedAccess support only the following interleaved-access group.
5154 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
5155 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
5156 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
5157 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
5158 };
5159
5160 if (const auto *Entry =
5161 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
5162 return NumOfMemOps * MemOpCost + Entry->Cost;
5163 //If an entry does not exist, fallback to the default implementation.
5164
5165 // Kind of shuffle depends on number of loaded values.
5166 // If we load the entire data in one register, we can use a 1-src shuffle.
5167 // Otherwise, we'll merge 2 sources in each operation.
5168 TTI::ShuffleKind ShuffleKind =
5169 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
5170
5171 InstructionCost ShuffleCost =
5172 getShuffleCost(ShuffleKind, SingleMemOpTy, None, 0, nullptr);
5173
5174 unsigned NumOfLoadsInInterleaveGrp =
5175 Indices.size() ? Indices.size() : Factor;
5176 auto *ResultTy = FixedVectorType::get(VecTy->getElementType(),
5177 VecTy->getNumElements() / Factor);
5178 InstructionCost NumOfResults =
5179 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
5180 NumOfLoadsInInterleaveGrp;
5181
5182 // About a half of the loads may be folded in shuffles when we have only
5183 // one result. If we have more than one result, we do not fold loads at all.
5184 unsigned NumOfUnfoldedLoads =
5185 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
5186
5187 // Get a number of shuffle operations per result.
5188 unsigned NumOfShufflesPerResult =
5189 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
5190
5191 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5192 // When we have more than one destination, we need additional instructions
5193 // to keep sources.
5194 InstructionCost NumOfMoves = 0;
5195 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
5196 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
5197
5198 InstructionCost Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
5199 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
5200
5201 return Cost;
5202 }
5203
5204 // Store.
5205 assert(Opcode == Instruction::Store &&(static_cast <bool> (Opcode == Instruction::Store &&
"Expected Store Instruction at this point") ? void (0) : __assert_fail
("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5206, __extension__ __PRETTY_FUNCTION__))
5206 "Expected Store Instruction at this point")(static_cast <bool> (Opcode == Instruction::Store &&
"Expected Store Instruction at this point") ? void (0) : __assert_fail
("Opcode == Instruction::Store && \"Expected Store Instruction at this point\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86TargetTransformInfo.cpp"
, 5206, __extension__ __PRETTY_FUNCTION__))
;
5207 // X86InterleavedAccess support only the following interleaved-access group.
5208 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
5209 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
5210 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
5211 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
5212
5213 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
5214 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
5215 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
5216 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
5217 };
5218
5219 if (const auto *Entry =
5220 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
5221 return NumOfMemOps * MemOpCost + Entry->Cost;
5222 //If an entry does not exist, fallback to the default implementation.
5223
5224 // There is no strided stores meanwhile. And store can't be folded in
5225 // shuffle.
5226 unsigned NumOfSources = Factor; // The number of values to be merged.
5227 InstructionCost ShuffleCost =
5228 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, None, 0, nullptr);
5229 unsigned NumOfShufflesPerStore = NumOfSources - 1;
5230
5231 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
5232 // We need additional instructions to keep sources.
5233 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
5234 InstructionCost Cost =
5235 NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
5236 NumOfMoves;
5237 return Cost;
5238}
5239
5240InstructionCost X86TTIImpl::getInterleavedMemoryOpCost(
5241 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
5242 Align Alignment, unsigned AddressSpace, TTI::TargetCostKind CostKind,
5243 bool UseMaskForCond, bool UseMaskForGaps) {
5244 auto isSupportedOnAVX512 = [&](Type *VecTy, bool HasBW) {
5245 Type *EltTy = cast<VectorType>(VecTy)->getElementType();
5246 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
5247 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
5248 return true;
5249 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8) ||
5250 (!ST->useSoftFloat() && ST->hasFP16() && EltTy->isHalfTy()))
5251 return HasBW;
5252 return false;
5253 };
5254 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
5255 return getInterleavedMemoryOpCostAVX512(
5256 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
5257 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
5258 if (ST->hasAVX2())
1
Taking true branch
5259 return getInterleavedMemoryOpCostAVX2(
3
Calling 'X86TTIImpl::getInterleavedMemoryOpCostAVX2'
5260 Opcode, cast<FixedVectorType>(VecTy), Factor, Indices, Alignment,
2
'VecTy' is a 'FixedVectorType'
5261 AddressSpace, CostKind, UseMaskForCond, UseMaskForGaps);
5262
5263 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
5264 Alignment, AddressSpace, CostKind,
5265 UseMaskForCond, UseMaskForGaps);
5266}

/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h

1//===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// This file provides a helper that implements much of the TTI interface in
11/// terms of the target-independent code generator and TargetLowering
12/// interfaces.
13//
14//===----------------------------------------------------------------------===//
15
16#ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17#define LLVM_CODEGEN_BASICTTIIMPL_H
18
19#include "llvm/ADT/APInt.h"
20#include "llvm/ADT/ArrayRef.h"
21#include "llvm/ADT/BitVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/Analysis/LoopInfo.h"
25#include "llvm/Analysis/OptimizationRemarkEmitter.h"
26#include "llvm/Analysis/TargetTransformInfo.h"
27#include "llvm/Analysis/TargetTransformInfoImpl.h"
28#include "llvm/CodeGen/ISDOpcodes.h"
29#include "llvm/CodeGen/TargetLowering.h"
30#include "llvm/CodeGen/TargetSubtargetInfo.h"
31#include "llvm/CodeGen/ValueTypes.h"
32#include "llvm/IR/BasicBlock.h"
33#include "llvm/IR/Constant.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/InstrTypes.h"
38#include "llvm/IR/Instruction.h"
39#include "llvm/IR/Instructions.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/Operator.h"
42#include "llvm/IR/Type.h"
43#include "llvm/IR/Value.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/CommandLine.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/MachineValueType.h"
48#include "llvm/Support/MathExtras.h"
49#include "llvm/Target/TargetMachine.h"
50#include <algorithm>
51#include <cassert>
52#include <cstdint>
53#include <limits>
54#include <utility>
55
56namespace llvm {
57
58class Function;
59class GlobalValue;
60class LLVMContext;
61class ScalarEvolution;
62class SCEV;
63class TargetMachine;
64
65extern cl::opt<unsigned> PartialUnrollingThreshold;
66
67/// Base class which can be used to help build a TTI implementation.
68///
69/// This class provides as much implementation of the TTI interface as is
70/// possible using the target independent parts of the code generator.
71///
72/// In order to subclass it, your class must implement a getST() method to
73/// return the subtarget, and a getTLI() method to return the target lowering.
74/// We need these methods implemented in the derived class so that this class
75/// doesn't have to duplicate storage for them.
76template <typename T>
77class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
78private:
79 using BaseT = TargetTransformInfoImplCRTPBase<T>;
80 using TTI = TargetTransformInfo;
81
82 /// Helper function to access this as a T.
83 T *thisT() { return static_cast<T *>(this); }
84
85 /// Estimate a cost of Broadcast as an extract and sequence of insert
86 /// operations.
87 InstructionCost getBroadcastShuffleOverhead(FixedVectorType *VTy) {
88 InstructionCost Cost = 0;
89 // Broadcast cost is equal to the cost of extracting the zero'th element
90 // plus the cost of inserting it into every element of the result vector.
91 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, 0);
92
93 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
94 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
95 }
96 return Cost;
97 }
98
99 /// Estimate a cost of shuffle as a sequence of extract and insert
100 /// operations.
101 InstructionCost getPermuteShuffleOverhead(FixedVectorType *VTy) {
102 InstructionCost Cost = 0;
103 // Shuffle cost is equal to the cost of extracting element from its argument
104 // plus the cost of inserting them onto the result vector.
105
106 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
107 // index 0 of first vector, index 1 of second vector,index 2 of first
108 // vector and finally index 3 of second vector and insert them at index
109 // <0,1,2,3> of result vector.
110 for (int i = 0, e = VTy->getNumElements(); i < e; ++i) {
111 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy, i);
112 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy, i);
113 }
114 return Cost;
115 }
116
117 /// Estimate a cost of subvector extraction as a sequence of extract and
118 /// insert operations.
119 InstructionCost getExtractSubvectorOverhead(VectorType *VTy, int Index,
120 FixedVectorType *SubVTy) {
121 assert(VTy && SubVTy &&(static_cast <bool> (VTy && SubVTy && "Can only extract subvectors from vectors"
) ? void (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 122, __extension__ __PRETTY_FUNCTION__))
122 "Can only extract subvectors from vectors")(static_cast <bool> (VTy && SubVTy && "Can only extract subvectors from vectors"
) ? void (0) : __assert_fail ("VTy && SubVTy && \"Can only extract subvectors from vectors\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 122, __extension__ __PRETTY_FUNCTION__))
;
123 int NumSubElts = SubVTy->getNumElements();
124 assert((!isa<FixedVectorType>(VTy) ||(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_ExtractSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 127, __extension__ __PRETTY_FUNCTION__))
125 (Index + NumSubElts) <=(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_ExtractSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 127, __extension__ __PRETTY_FUNCTION__))
126 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_ExtractSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 127, __extension__ __PRETTY_FUNCTION__))
127 "SK_ExtractSubvector index out of range")(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_ExtractSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_ExtractSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 127, __extension__ __PRETTY_FUNCTION__))
;
128
129 InstructionCost Cost = 0;
130 // Subvector extraction cost is equal to the cost of extracting element from
131 // the source type plus the cost of inserting them into the result vector
132 // type.
133 for (int i = 0; i != NumSubElts; ++i) {
134 Cost += thisT()->getVectorInstrCost(Instruction::ExtractElement, VTy,
135 i + Index);
136 Cost +=
137 thisT()->getVectorInstrCost(Instruction::InsertElement, SubVTy, i);
138 }
139 return Cost;
140 }
141
142 /// Estimate a cost of subvector insertion as a sequence of extract and
143 /// insert operations.
144 InstructionCost getInsertSubvectorOverhead(VectorType *VTy, int Index,
145 FixedVectorType *SubVTy) {
146 assert(VTy && SubVTy &&(static_cast <bool> (VTy && SubVTy && "Can only insert subvectors into vectors"
) ? void (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 147, __extension__ __PRETTY_FUNCTION__))
147 "Can only insert subvectors into vectors")(static_cast <bool> (VTy && SubVTy && "Can only insert subvectors into vectors"
) ? void (0) : __assert_fail ("VTy && SubVTy && \"Can only insert subvectors into vectors\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 147, __extension__ __PRETTY_FUNCTION__))
;
148 int NumSubElts = SubVTy->getNumElements();
149 assert((!isa<FixedVectorType>(VTy) ||(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_InsertSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 152, __extension__ __PRETTY_FUNCTION__))
150 (Index + NumSubElts) <=(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_InsertSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 152, __extension__ __PRETTY_FUNCTION__))
151 (int)cast<FixedVectorType>(VTy)->getNumElements()) &&(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_InsertSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 152, __extension__ __PRETTY_FUNCTION__))
152 "SK_InsertSubvector index out of range")(static_cast <bool> ((!isa<FixedVectorType>(VTy) ||
(Index + NumSubElts) <= (int)cast<FixedVectorType>(
VTy)->getNumElements()) && "SK_InsertSubvector index out of range"
) ? void (0) : __assert_fail ("(!isa<FixedVectorType>(VTy) || (Index + NumSubElts) <= (int)cast<FixedVectorType>(VTy)->getNumElements()) && \"SK_InsertSubvector index out of range\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 152, __extension__ __PRETTY_FUNCTION__))
;
153
154 InstructionCost Cost = 0;
155 // Subvector insertion cost is equal to the cost of extracting element from
156 // the source type plus the cost of inserting them into the result vector
157 // type.
158 for (int i = 0; i != NumSubElts; ++i) {
159 Cost +=
160 thisT()->getVectorInstrCost(Instruction::ExtractElement, SubVTy, i);
161 Cost += thisT()->getVectorInstrCost(Instruction::InsertElement, VTy,
162 i + Index);
163 }
164 return Cost;
165 }
166
167 /// Local query method delegates up to T which *must* implement this!
168 const TargetSubtargetInfo *getST() const {
169 return static_cast<const T *>(this)->getST();
170 }
171
172 /// Local query method delegates up to T which *must* implement this!
173 const TargetLoweringBase *getTLI() const {
174 return static_cast<const T *>(this)->getTLI();
175 }
176
177 static ISD::MemIndexedMode getISDIndexedMode(TTI::MemIndexedMode M) {
178 switch (M) {
179 case TTI::MIM_Unindexed:
180 return ISD::UNINDEXED;
181 case TTI::MIM_PreInc:
182 return ISD::PRE_INC;
183 case TTI::MIM_PreDec:
184 return ISD::PRE_DEC;
185 case TTI::MIM_PostInc:
186 return ISD::POST_INC;
187 case TTI::MIM_PostDec:
188 return ISD::POST_DEC;
189 }
190 llvm_unreachable("Unexpected MemIndexedMode")::llvm::llvm_unreachable_internal("Unexpected MemIndexedMode"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 190)
;
191 }
192
193 InstructionCost getCommonMaskedMemoryOpCost(unsigned Opcode, Type *DataTy,
194 Align Alignment,
195 bool VariableMask,
196 bool IsGatherScatter,
197 TTI::TargetCostKind CostKind) {
198 auto *VT = cast<FixedVectorType>(DataTy);
199 // Assume the target does not have support for gather/scatter operations
200 // and provide a rough estimate.
201 //
202 // First, compute the cost of the individual memory operations.
203 InstructionCost AddrExtractCost =
204 IsGatherScatter
205 ? getVectorInstrCost(Instruction::ExtractElement,
206 FixedVectorType::get(
207 PointerType::get(VT->getElementType(), 0),
208 VT->getNumElements()),
209 -1)
210 : 0;
211 InstructionCost LoadCost =
212 VT->getNumElements() *
213 (AddrExtractCost +
214 getMemoryOpCost(Opcode, VT->getElementType(), Alignment, 0, CostKind));
215
216 // Next, compute the cost of packing the result in a vector.
217 InstructionCost PackingCost = getScalarizationOverhead(
218 VT, Opcode != Instruction::Store, Opcode == Instruction::Store);
219
220 InstructionCost ConditionalCost = 0;
221 if (VariableMask) {
222 // Compute the cost of conditionally executing the memory operations with
223 // variable masks. This includes extracting the individual conditions, a
224 // branches and PHIs to combine the results.
225 // NOTE: Estimating the cost of conditionally executing the memory
226 // operations accurately is quite difficult and the current solution
227 // provides a very rough estimate only.
228 ConditionalCost =
229 VT->getNumElements() *
230 (getVectorInstrCost(
231 Instruction::ExtractElement,
232 FixedVectorType::get(Type::getInt1Ty(DataTy->getContext()),
233 VT->getNumElements()),
234 -1) +
235 getCFInstrCost(Instruction::Br, CostKind) +
236 getCFInstrCost(Instruction::PHI, CostKind));
237 }
238
239 return LoadCost + PackingCost + ConditionalCost;
240 }
241
242protected:
243 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
244 : BaseT(DL) {}
245 virtual ~BasicTTIImplBase() = default;
246
247 using TargetTransformInfoImplBase::DL;
248
249public:
250 /// \name Scalar TTI Implementations
251 /// @{
252 bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
253 unsigned AddressSpace, Align Alignment,
254 bool *Fast) const {
255 EVT E = EVT::getIntegerVT(Context, BitWidth);
256 return getTLI()->allowsMisalignedMemoryAccesses(
257 E, AddressSpace, Alignment, MachineMemOperand::MONone, Fast);
258 }
259
260 bool hasBranchDivergence() { return false; }
261
262 bool useGPUDivergenceAnalysis() { return false; }
263
264 bool isSourceOfDivergence(const Value *V) { return false; }
265
266 bool isAlwaysUniform(const Value *V) { return false; }
267
268 unsigned getFlatAddressSpace() {
269 // Return an invalid address space.
270 return -1;
271 }
272
273 bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
274 Intrinsic::ID IID) const {
275 return false;
276 }
277
278 bool isNoopAddrSpaceCast(unsigned FromAS, unsigned ToAS) const {
279 return getTLI()->getTargetMachine().isNoopAddrSpaceCast(FromAS, ToAS);
280 }
281
282 unsigned getAssumedAddrSpace(const Value *V) const {
283 return getTLI()->getTargetMachine().getAssumedAddrSpace(V);
284 }
285
286 Value *rewriteIntrinsicWithAddressSpace(IntrinsicInst *II, Value *OldV,
287 Value *NewV) const {
288 return nullptr;
289 }
290
291 bool isLegalAddImmediate(int64_t imm) {
292 return getTLI()->isLegalAddImmediate(imm);
293 }
294
295 bool isLegalICmpImmediate(int64_t imm) {
296 return getTLI()->isLegalICmpImmediate(imm);
297 }
298
299 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
300 bool HasBaseReg, int64_t Scale,
301 unsigned AddrSpace, Instruction *I = nullptr) {
302 TargetLoweringBase::AddrMode AM;
303 AM.BaseGV = BaseGV;
304 AM.BaseOffs = BaseOffset;
305 AM.HasBaseReg = HasBaseReg;
306 AM.Scale = Scale;
307 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace, I);
308 }
309
310 bool isIndexedLoadLegal(TTI::MemIndexedMode M, Type *Ty,
311 const DataLayout &DL) const {
312 EVT VT = getTLI()->getValueType(DL, Ty);
313 return getTLI()->isIndexedLoadLegal(getISDIndexedMode(M), VT);
314 }
315
316 bool isIndexedStoreLegal(TTI::MemIndexedMode M, Type *Ty,
317 const DataLayout &DL) const {
318 EVT VT = getTLI()->getValueType(DL, Ty);
319 return getTLI()->isIndexedStoreLegal(getISDIndexedMode(M), VT);
320 }
321
322 bool isLSRCostLess(TTI::LSRCost C1, TTI::LSRCost C2) {
323 return TargetTransformInfoImplBase::isLSRCostLess(C1, C2);
324 }
325
326 bool isNumRegsMajorCostOfLSR() {
327 return TargetTransformInfoImplBase::isNumRegsMajorCostOfLSR();
328 }
329
330 bool isProfitableLSRChainElement(Instruction *I) {
331 return TargetTransformInfoImplBase::isProfitableLSRChainElement(I);
332 }
333
334 InstructionCost getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
335 int64_t BaseOffset, bool HasBaseReg,
336 int64_t Scale, unsigned AddrSpace) {
337 TargetLoweringBase::AddrMode AM;
338 AM.BaseGV = BaseGV;
339 AM.BaseOffs = BaseOffset;
340 AM.HasBaseReg = HasBaseReg;
341 AM.Scale = Scale;
342 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
343 }
344
345 bool isTruncateFree(Type *Ty1, Type *Ty2) {
346 return getTLI()->isTruncateFree(Ty1, Ty2);
347 }
348
349 bool isProfitableToHoist(Instruction *I) {
350 return getTLI()->isProfitableToHoist(I);
351 }
352
353 bool useAA() const { return getST()->useAA(); }
354
355 bool isTypeLegal(Type *Ty) {
356 EVT VT = getTLI()->getValueType(DL, Ty);
357 return getTLI()->isTypeLegal(VT);
358 }
359
360 InstructionCost getRegUsageForType(Type *Ty) {
361 InstructionCost Val = getTLI()->getTypeLegalizationCost(DL, Ty).first;
362 assert(Val >= 0 && "Negative cost!")(static_cast <bool> (Val >= 0 && "Negative cost!"
) ? void (0) : __assert_fail ("Val >= 0 && \"Negative cost!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/CodeGen/BasicTTIImpl.h"
, 362, __extension__ __PRETTY_FUNCTION__))
;
363 return Val;
364 }
365
366 InstructionCost getGEPCost(Type *PointeeType, const Value *Ptr,
367 ArrayRef<const Value *> Operands,
368 TTI::TargetCostKind CostKind) {
369 return BaseT::getGEPCost(PointeeType, Ptr, Operands, CostKind);
370 }
371
372 unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
373 unsigned &JumpTableSize,
374 ProfileSummaryInfo *PSI,
375 BlockFrequencyInfo *BFI) {
376 /// Try to find the estimated number of clusters. Note that the number of
377 /// clusters identified in this function could be different from the actual
378 /// numbers found in lowering. This function ignore switches that are
379 /// lowered with a mix of jump table / bit test / BTree. This function was
380 /// initially intended to be used when estimating the cost of switch in
381 /// inline cost heuristic, but it's a generic cost model to be used in other
382 /// places (e.g., in loop unrolling).
383 unsigned N = SI.getNumCases();
384 const TargetLoweringBase *TLI = getTLI();
385 const DataLayout &DL = this->getDataLayout();
386
387 JumpTableSize = 0;
388 bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
389
390 // Early exit if both a jump table and bit test are not allowed.
391 if (N < 1 || (!IsJTAllowed && DL.getIndexSizeInBits(0u) < N))
392 return N;
393
394 APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
395 APInt MinCaseVal = MaxCaseVal;
396 for (auto CI : SI.cases()) {
397 const APInt &CaseVal = CI.getCaseValue()->getValue();
398 if (CaseVal.sgt(MaxCaseVal))
399 MaxCaseVal = CaseVal;
400 if (CaseVal.slt(MinCaseVal))
401 MinCaseVal = CaseVal;
402 }
403
404 // Check if suitable for a bit test
405 if (N <= DL.getIndexSizeInBits(0u)) {
406 SmallPtrSet<const BasicBlock *, 4> Dests;
407 for (auto I : SI.cases())
408 Dests.insert(I.getCaseSuccessor());
409
410 if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
411 DL))
412 return 1;
413 }
414
415 // Check if suitable for a jump table.
416 if (IsJTAllowed) {
417 if (N < 2 || N < TLI->getMinimumJumpTableEntries())
418 return N;
419 uint64_t Range =
420 (MaxCaseVal - MinCaseVal)
421 .getLimitedValue(std::numeric_limits<uint64_t>::max() - 1) + 1;
422 // Check whether a range of clusters is dense enough for a jump table
423 if (TLI->isSuitableForJumpTable(&SI, N, Range, PSI, BFI)) {
424 JumpTableSize = Range;
425 return 1;
426 }
427 }
428 return N;
429 }
430
431 bool shouldBuildLookupTables() {
432 const TargetLoweringBase *TLI = getTLI();
433 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
434 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
435 }
436
437 bool shouldBuildRelLookupTables() const {
438 const TargetMachine &TM = getTLI()->getTargetMachine();
439 // If non-PIC mode, do not generate a relative lookup table.
440 if (!TM.isPositionIndependent())
441 return false;
442
443 /// Relative lookup table entries consist of 32-bit offsets.
444 /// Do not generate relative lookup tables for large code models
445 /// in 64-bit achitectures where 32-bit offsets might not be enough.
446 if (TM.getCodeModel() == CodeModel::Medium ||
447 TM.getCodeModel() == CodeModel::Large)
448 return false;
449
450 Triple TargetTriple = TM.getTargetTriple();
451 if (!TargetTriple.isArch64Bit())
452 return false;
453
454 // TODO: Triggers issues on aarch64 on darwin, so temporarily disable it
455 // there.
456 if (TargetTriple.getArch() == Triple::aarch64 && TargetTriple.isOSDarwin())
457 return false;
458
459 return true;